This commit is contained in:
thai 2022-07-20 13:51:05 +07:00
commit 255ced9b31
101 changed files with 17828 additions and 0 deletions

View File

@ -0,0 +1,65 @@
# Following properties are mandatory when engine files are not specified:
# int8-calib-file(Only in INT8), model-file-format
# Caffemodel mandatory properties: model-file, proto-file, output-blob-names
# UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
# ONNX: onnx-file
#
# Mandatory properties for detectors:
# num-detected-classes
#
# Optional properties for detectors:
# cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
# custom-lib-path
# parse-bbox-func-name
#
# Mandatory properties for classifiers:
# classifier-threshold, is-classifier
#
# Optional properties for classifiers:
# classifier-async-mode(Secondary mode only, Default=false)
#
# Optional properties in secondary mode:
# operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
# input-object-min-width, input-object-min-height, input-object-max-width,
# input-object-max-height
#
# Following properties are always recommended:
# batch-size(Default=1)
#
# Other optional properties:
# net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
# model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
# mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
# custom-lib-path, network-mode(Default=0 i.e FP32)
#
# The values in the config file are overridden by values set through GObject
# properties.
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
#0=RGB, 1=BGR
model-color-format=0
model-engine-file=Detect_Primary_5Classes_22_06_25epochs.engine
labelfile-path=labels.txt
process-mode=1
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0
num-detected-classes=5
gie-unique-id=1
network-type=0
output-blob-names=prob
maintain-aspect-ratio=1
parse-bbox-func-name=NvDsInferParseCustomYoloV5
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet
#scaling-filter=0
scaling-compute-hw=1
[class-attrs-all]
nms-iou-threshold=0.4
pre-cluster-threshold=0.6

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2019 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Smart recording API</b>
*/
/**
* @defgroup custom_gstreamer Custom Gstreamer APIs
*
* This section defines custom Gstreamer APIs
*
*/
#ifndef NVDSSR_H_
#define NVDSSR_H_
#include <gst/gst.h>
/**
*
* @defgroup gstreamer_nvdssr Smart Record
*
* Specifies APIs relating to smart recording.
*
* @ingroup custom_gstreamer
* @{
*/
#ifdef __cplusplus
extern "C"
{
#endif
typedef struct NvDsSRRecordingInfo NvDsSRRecordingInfo;
typedef gpointer (*NvDsSRCallbackFunc) (NvDsSRRecordingInfo *info, gpointer userData);
typedef guint32 NvDsSRSessionId;
/**
* Specifies container types.
*/
typedef enum {
NVDSSR_CONTAINER_MP4,
NVDSSR_CONTAINER_MKV
} NvDsSRContainerType;
/**
* Specifies API return status.
*/
typedef enum {
NVDSSR_STATUS_OK,
NVDSSR_STATUS_INVALID_VAL,
NVDSSR_STATUS_INVALID_OP,
NVDSSR_STATUS_ERROR,
NVDSSR_STATUS_CUSTOM1 = 100,
NVDSSR_STATUS_CUSTOM2 = 101,
NVDSSR_STATUS_CUSTOM3 = 102
} NvDsSRStatus;
/**
* Holds initializtion paramerters required to create \ref NvDsSRContext.
*/
typedef struct NvDsSRInitParams
{
/** callback function gets called once recording is complete */
NvDsSRCallbackFunc callback;
/** recording video container, MP4 / MKV */
NvDsSRContainerType containerType;
/** optional, recording video width, 0 means no transcode */
guint width;
/** optional, recording video height, 0 means no transcode */
guint height;
/** recorded file name prefix */
gchar *fileNamePrefix;
/** store recorded file under directory path */
gchar *dirpath;
/** default recording duration in seconds */
guint defaultDuration;
/** size of video cache in seconds. */
guint videoCacheSize;
} NvDsSRInitParams;
/**
* Holds information about smart record instance.
*/
typedef struct NvDsSRContext
{
/** parent bin element. */
GstElement *recordbin;
/** queue element to cache the content. */
GstElement *recordQue;
/** child bin to save the content to file. */
GstElement *encodebin;
/** filesink element */
GstElement *filesink;
/** flag to check the key frame. */
gboolean gotKeyFrame;
/** flag to check if recording is on */
gboolean recordOn;
/** flag to check if encodebin is reset */
gboolean resetDone;
/** flag to check if encodebin is in playing state. */
gboolean isPlaying;
/** initialization parameters */
NvDsSRInitParams initParams;
/** mutex to control the flow */
GMutex flowLock;
/** thread to reset the encodebin */
GThread *resetThread;
/** pointer to user provided data */
gpointer uData;
/** pointer to private data */
gpointer privData;
} NvDsSRContext;
/**
* Hold information about video recorded.
*/
typedef struct NvDsSRRecordingInfo
{
/** SR bin context */
NvDsSRContext *ctx;
/** recording session-id */
NvDsSRSessionId sessionId;
/** recorded file name */
gchar *filename;
/** recorded file dir path */
gchar *dirpath;
/** duration in milliseconds */
guint64 duration;
/** recorded video container, MP4 / MKV */
NvDsSRContainerType containerType;
/** recorded video width*/
guint width;
/** recorded video height*/
guint height;
} NvDsSRRecordingInfo;
/**
* \brief Creates the instance of smart record.
*
* This function creates the instance of smart record and returns the pointer
* to an allocated \ref NvDsSRContext. The \a params structure must be filled
* with initialization parameters required to create the instance.
*
* recordbin of \ref NvDsSRContext is smart record bin which must be added
* to the pipeline. It expects encoded frames which will be muxed and saved to
* the file. Add this bin after parser element in the pipeline.
*
* Call NvDsSRDestroy() to free resources allocated by this function.
*
* @param[out] ctx An indirect pointer to the smart record instance.
* @param[in] params A pointer to a \ref NvDsSRInitParams structure.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRCreate (NvDsSRContext **ctx, NvDsSRInitParams *params);
/**
* \brief Starts the video recording.
*
* This function starts writing the cached video data to a file. It returns
* the session id which later can be used in NvDsSRStop() to stop the
* corresponding recording.
*
* Here startTime specifies the seconds before the current time and duration
* specifies the seconds after the start of recording.
* If current time is t1, content from t1 - startTime to t1 + duration will
* be saved to file. Therefore a total of startTime + duration seconds of data
* will be recorded.
*
* @param[in] ctx A pointer to a \ref NvDsSRContext.
* @param[out] sessionId A pointer to a \ref NvDsSRSessionId.
* @param[in] startTime Seconds before the current time. Should be less than video cache size.
* @param[in] duration Duration value in seconds after the start of recording.
* @param[in] userData A pointer to user specified data.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRStart (NvDsSRContext *ctx, NvDsSRSessionId *sessionId,
guint startTime, guint duration, gpointer userData);
/**
* \brief Stops the previously started recording.
*
* @param[in] ctx A pointer to a \ref NvDsSRContext.
* @param[in] sessionId Id of seesion to stop.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRStop (NvDsSRContext *ctx, NvDsSRSessionId sessionId);
/**
* \brief Destroys the instance of smart record.
*
* This function releases the resources previously allocated by NvDsSRCreate().
*
* @param[in] ctx A pointer to a \ref NvDsSRContext to be freed.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRDestroy (NvDsSRContext *ctx);
#ifdef __cplusplus
}
#endif
#endif /* NVDSSR_H_ */
/** @} */

View File

@ -0,0 +1,158 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Custom Events</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer custom
* event functions, used to map events to individual sources which
* are batched together by Gst-nvstreammux.
*
*/
/**
* @defgroup gstreamer_nvevent Events: Custom Events API
*
* Specifies GStreamer custom event functions, used to map events
* to individual sources which are batched together by Gst-nvstreammux.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
#ifndef __GST_NVEVENT_H__
#define __GST_NVEVENT_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#define FLAG(name) GST_EVENT_TYPE_##name
/** Defines supported types of custom events. */
typedef enum {
/** Specifies a custom event to indicate Pad Added. */
GST_NVEVENT_PAD_ADDED
= GST_EVENT_MAKE_TYPE (400, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate Pad Deleted. */
GST_NVEVENT_PAD_DELETED
= GST_EVENT_MAKE_TYPE (401, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate EOS of a particular stream
in a batch. */
GST_NVEVENT_STREAM_EOS
= GST_EVENT_MAKE_TYPE (402, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate a stream segment. */
GST_NVEVENT_STREAM_SEGMENT
= GST_EVENT_MAKE_TYPE (403, FLAG(DOWNSTREAM) | FLAG(SERIALIZED))
} GstNvEventType;
#undef FLAG
/**
* Creates a "custom pad added" event for the specified source.
*
* @param[in] source_id Source ID of the stream to be added to the pipeline;
* also the pad ID of the sinkpad of the
* Gst-nvstreammux plugin for which the source
* is configured.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_pad_added (guint source_id);
/**
* Creates a "custom pad deleted" event for the specified source.
*
* @param[in] source_id Source ID of the stream to be removed
* from the pipeline; also the pad ID of the sinkpad
* of the Gst-nvstreammux plugin for which
* the source is configured.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_pad_deleted (guint source_id);
/**
* Creates a "custom EOS" event for the specified source.
*
* @param[in] source_id Source ID of the stream for which EOS is to be sent;
* also the pad ID of the sinkpad of the
* Gst-nvstreammux plugin for which
* the source is configured.
* @return A pointer to the event corresponding to request if sucxessful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_stream_eos (guint source_id);
/**
* Creates a "custom segment" event for the specified source.
*
* @param[in] source_id Source ID of the stream for which a segment event
* is to be sent; also the pad ID of the sinkpad
* of the Gst-nvstreammux plugin for which the source
* is configured.
* @param[in] segment A pointer to a copy of the segment to be sent
* with the event; corresponds to the original segment
* for the specified source.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_stream_segment (guint source_id, GstSegment *segment);
/**
* Parses a "pad added" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the pad is added to Gst-nvstreammux.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_pad_added (GstEvent * event, guint * source_id);
/**
* Parses a "pad deleted" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the pad is deleted from Gst-nvstreammux.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_pad_deleted (GstEvent * event, guint * source_id);
/**
* Parses a "stream EOS" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the source ID sends the EOS event.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_stream_eos (GstEvent * event, guint * source_id);
/**
* Parses a "stream segment" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the source ID sends a segment event.
* @param[out] source_id A pointer to the parsed source ID for which
* the event is sent.
* @param[out] segment A double pointer to the parsed segment
* corresponding to source ID for the event.
*/
void gst_nvevent_parse_stream_segment (GstEvent * event, guint * source_id,
GstSegment **segment);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Custom Message Functions</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer custom
* message functions.
*
*/
/**
* @defgroup gst_mess_evnt_qry Events, Messages and Query based APIs
*
* Defines Events, Messages and Query-based APIs
*
*/
#ifndef __GST_NVMESSAGE_H__
#define __GST_NVMESSAGE_H__
#include <gst/gst.h>
G_BEGIN_DECLS
/**
* @defgroup gst_message_plugin Messages: Custom Message Functions API
* Helper functions for custom GStreamer messages posted by DeepStream GStreamer
* plugins.
*
* DeepStream GStreamer plugins post the following custom messages:
* - Stream EOS - Posted by the `NvStreamMuxer` element when it recieves EOS
* on one of its sink pads.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
/**
* Creates a new Stream EOS message.
*
* params[in] obj The GStreamer object creating the message.
* params[in] eos_stream_id Stream ID of the stream for which EOS
* has been received.
*
* @return A pointer to the new message.
*/
GstMessage * gst_nvmessage_new_stream_eos (GstObject *obj, guint eos_stream_id);
/**
* Determines whether a message is a stream EOS message.
*
* params[in] message A pointer to the nessage to be checked.
*
* @return A Boolean; true if the message is a stream EOS message.
*/
gboolean gst_nvmessage_is_stream_eos (GstMessage * message);
/**
* \brief Parses the stream ID from a stream EOS message.
*
* The stream ID is the index of the stream which sent the EOS event to
* Gst-streammux.
*
* params[in] message A pointer to a stream EOS message.
* params[out] eos_stream_id A pointer to an unsigned integer in which
* the stream ID is stored.
*
* @return A Boolean; true if the message was successfully parsed.
*/
gboolean gst_nvmessage_parse_stream_eos (GstMessage * message, guint * eos_stream_id);
/** @} */
G_END_DECLS
#endif

View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Helper Queries</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer helper
* query functions.
*
*/
#ifndef __GST_NVQUERY_H__
#define __GST_NVQUERY_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup gst_query_plugin Query Functions
* Gets information such as the batch size and the number of streams.
* @ingroup gst_mess_evnt_qry
* @{
*/
/**
* Creates a new batch-size query, which can be used by elements to query
* the number of buffers in upstream elements' batched buffers.
*
* @return A pointer to the new batch size query.
*/
GstQuery * gst_nvquery_batch_size_new (void);
/**
* Determines whether a query is a batch size query.
*
* params[in] query A pointer to the query to be checked.
*
* @return True if the query is a batch size query.
*/
gboolean gst_nvquery_is_batch_size (GstQuery * query);
/**
* Sets the batch size, used by the elements responding to the batch size query.
*
* This function fails if the query is not a batch size query.
*
* params[in] query A pointer to a batch size query.
* params[in] batch_size The batch size to be set.
*/
void gst_nvquery_batch_size_set (GstQuery * query, guint batch_size);
/**
* Parses batch size from a batch size query.
*
* params[in] query A pointer to a batch size query.
* params[out] batch_size A pointer to an unsigned integer in which the
* batch size is stored.
*
* @return True if the query was successfully parsed.
*/
gboolean gst_nvquery_batch_size_parse (GstQuery * query, guint * batch_size);
/**
* Creates a number of streams query, used by elements to query
* upstream the number of input sources.
*
* @return A pointer to the new query.
*/
GstQuery * gst_nvquery_numStreams_size_new (void);
/**
* Determines whether a query is a number-of-streams query.
*
* params[in] query A pointer to the query to be checked.
*
* @return A Boolean; true if the query is a number of streams query.
*/
gboolean gst_nvquery_is_numStreams_size (GstQuery * query);
/**
* \brief Sets the number of input sources.
*
* This function is used by elements responding to
* a number of streams query. It fails if the query is not of the correct type.
*
* params[in] query A pointer to a number-of-streams query.
* params[in] numStreams_size The number of input sources.
*/
void gst_nvquery_numStreams_size_set (GstQuery * query, guint numStreams_size);
/**
* Parses the number of streams from a number of streams query.
*
* params[in] query A pointer to a number-of-streams query.
* params[out] batch_size A pointer to an unsigned integer in which
* the number of streams is stored.
*
* @return True if the query was successfully parsed.
*/
gboolean gst_nvquery_numStreams_size_parse (GstQuery * query, guint * numStreams_size);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GSTNVDSBUFFERPOOL_H_
#define GSTNVDSBUFFERPOOL_H_
#include <gst/gst.h>
G_BEGIN_DECLS
typedef struct _GstNvDsBufferPool GstNvDsBufferPool;
typedef struct _GstNvDsBufferPoolClass GstNvDsBufferPoolClass;
typedef struct _GstNvDsBufferPoolPrivate GstNvDsBufferPoolPrivate;
#define GST_TYPE_NVDS_BUFFER_POOL (gst_nvds_buffer_pool_get_type())
#define GST_IS_NVDS_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_NVDS_BUFFER_POOL))
#define GST_NVDS_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_NVDS_BUFFER_POOL, GstNvDsBufferPool))
#define GST_NVDS_BUFFER_POOL_CAST(obj) ((GstNvDsBufferPool*)(obj))
#define GST_NVDS_MEMORY_TYPE "nvds"
#define GST_BUFFER_POOL_OPTION_NVDS_META "GstBufferPoolOptionNvDsMeta"
struct _GstNvDsBufferPool
{
GstBufferPool bufferpool;
GstNvDsBufferPoolPrivate *priv;
};
struct _GstNvDsBufferPoolClass
{
GstBufferPoolClass parent_class;
};
GType gst_nvds_buffer_pool_get_type (void);
GstBufferPool* gst_nvds_buffer_pool_new (void);
G_END_DECLS
#endif /* GSTNVDSBUFFERPOOL_H_ */

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file gstnvdsinfer.h
* <b>NVIDIA DeepStream GStreamer NvInfer API Specification </b>
*
* @b Description: This file specifies the APIs and function definitions for
* the DeepStream GStreamer NvInfer Plugin.
*/
/**
* @defgroup gstreamer_nvinfer_api NvInfer Plugin
* Defines an API for the GStreamer NvInfer plugin.
* @ingroup custom_gstreamer
* @{
*/
G_BEGIN_DECLS
#include "nvdsinfer.h"
/**
* Function definition for the inference raw output generated callback of
* Gst-NvInfer plugin.
*
* The callback function can be registered by setting "raw-output-generated-callback"
* property on an "nvinfer" element instance. Additionally, a pointer to
* user data can be set through the "raw-output-generated-userdata" property.
* This pointer will be passed to the raw output generated callback function
* through the userdata parameter.
*
* Refer to the reference deepstream-app sources for a sample implementation
* of the callback.
*
* @param[in] buf Pointer to the GstBuffer on whose contents inference has been
* executed. The implementation should assume the buffer to be
* read-only and should not modify the buffer in any way.
* @param[in] network_info Network information for the model specified for the
* nvinfer element instance.
* @param[in] layers_info Pointer to the array containing information for all
* bound layers for the inference engine.
* @param[in] num_layers Number of layers bound for the inference engine i.e.
* number of elements in the layers_info array.
* @param[in] batch_size Number of valid input frames in the batch.
* @param[in] user_data Pointer to the user data set through the
* "raw-output-generated-userdata" property.
*/
typedef void (* gst_nvinfer_raw_output_generated_callback) (GstBuffer *buf,
NvDsInferNetworkInfo *network_info, NvDsInferLayerInfo *layers_info,
guint num_layers, guint batch_size, gpointer user_data);
/**
* Holds the raw tensor output information for one frame / one object.
*
* The "nvinfer" plugins adds this meta when the "output-tensor-meta" property
* of the element instance is set to TRUE.
*
* This meta data is added as NvDsUserMeta to the frame_user_meta_list of the
* corresponding frame_meta or object_user_meta_list of the corresponding object
* with the meta_type set to NVDSINFER_TENSOR_OUTPUT_META.
*/
typedef struct
{
/** Unique ID of the gst-nvinfer instance which attached this meta. */
guint unique_id;
/** Number of bound output layers. */
guint num_output_layers;
/** Pointer to the array containing information for the bound output layers.
* Size of the array will be equal to num_output_layers. Pointers inside
* the NvDsInferLayerInfo structure are not valid for this array. */
NvDsInferLayerInfo *output_layers_info;
/** Array of pointers to the output host buffers for the frame / object. */
void **out_buf_ptrs_host;
/** Array of pointers to the output device buffers for the frame / object. */
void **out_buf_ptrs_dev;
/** GPU device ID on which the device buffers have been allocated. */
gint gpu_id;
/** Private data used for the meta producer's internal memory management. */
void *priv_data;
/** Network information for the model specified for the nvinfer element instance. */
NvDsInferNetworkInfo network_info;
} NvDsInferTensorMeta;
/**
* Holds the segmentation model output information for one frame / one object.
*
* The "nvinfer" plugins adds this meta for segmentation models.
*
* This meta data is added as NvDsUserMeta to the frame_user_meta_list of the
* corresponding frame_meta or object_user_meta_list of the corresponding object
* with the meta_type set to NVDSINFER_SEGMENTATION_META.
*/
typedef struct
{
/** Number of classes in the segmentation output. */
guint classes;
/** Width of the segmentation output class map. */
guint width;
/** Height of the segmentation output class map. */
guint height;
/** Pointer to the array for 2D pixel class map. The output for pixel (x,y)
* will be at index (y * width + x). */
gint* class_map;
/** Pointer to the raw array containing the probabilities. The probability for
* class c and pixel (x,y) will be at index (c * width *height + y * width + x). */
gfloat *class_probabilities_map;
/** Private data used for the meta producer's internal memory management. */
void *priv_data;
} NvDsInferSegmentationMeta;
G_END_DECLS
/** @} */

View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Metadata Extension</b>
*
* @b Description: This file defines the Metadata structure used to
* carry DeepStream metadata or any other metadata in GStreamer pipeline.
*/
/**
* @defgroup gstreamer_metagroup_api DeepStream Metadata Extension
*
* Defines an API for managing GStreamer DeepStream metadata.
* @ingroup custom_gstreamer
* @{
*
* DeepStream Metadata is attached to a buffer with gst_buffer_add_nvds_meta().
* Its metadata type is set to @ref NVDS_BATCH_GST_META.
*
* Multiple groups of metadata may be attached by different elements.
* gst_buffer_get_nvds_meta() gets the last added @ref NvDsMeta.
*
* The NvDsMeta::meta_data structure member must be cast
* to a meaningful structure pointer based on the @a meta_type.
* For example, for @a meta_type = NVDS_BATCH_GST_META,
* @a meta_data must be cast as `(NvDsBatchMeta *)`.
*/
#ifndef GST_NVDS_META_API_H
#define GST_NVDS_META_API_H
#include <gst/gst.h>
#include <gst/video/video.h>
#include <gst/base/gstbasetransform.h>
#include "nvdsmeta.h"
#include "nvds_latency_meta.h"
#ifdef __cplusplus
extern "C"
{
#endif
GType nvds_meta_api_get_type (void);
#define NVDS_META_API_TYPE (nvds_meta_api_get_type())
const GstMetaInfo *nvds_meta_get_info (void);
#define NVDS_META_INFO (nvds_meta_get_info())
#define NVDS_META_STRING "nvdsmeta"
/**
* Defines the type of metadata. NVIDIA-defined %GstNvDsMetaType values are
* in the range from @ref NVDS_BATCH_GST_META to @ref NVDS_START_USER_META.
*/
typedef enum {
NVDS_GST_INVALID_META=-1,
/* Specifies information of a formed batch. */
NVDS_BATCH_GST_META = NVDS_GST_CUSTOM_META + 1,
NVDS_DECODER_GST_META,
/* Specifies information of dewarped surfaces. */
NVDS_DEWARPER_GST_META,
NVDS_RESERVED_GST_META = NVDS_GST_CUSTOM_META + 4096,
/* Specifies the first value that may be assigned to a user-defined type. */
NVDS_GST_META_FORCE32 = 0x7FFFFFFF
} GstNvDsMetaType;
/**
* Holds DeepSteam metadata.
* */
typedef struct _NvDsMeta {
GstMeta meta;
/** Holds a pointer to metadata. Must be cast to another structure based
on @a meta_type. */
gpointer meta_data;
/** Holds a pointer to user-specific data . */
gpointer user_data;
/** Holds the type of metadata, one of values of enum @ref GstNvDsMetaType. */
gint meta_type;
/** A callback to be called when @a meta_data is to be copied or transformed
from one buffer to other. @a meta_data and @a user_data are passed
as arguments. */
NvDsMetaCopyFunc copyfunc;
/** A callback to be called when @a meta_data is to be destroyed.
@a meta_data and @a user_data are passed as arguments. */
NvDsMetaReleaseFunc freefunc;
/**
* A callback to be called when @a meta_data is transformed into
* @a NvDsUserMeta.
* This function must be provided by a GStreamer plugin that precedes
* @a Gst-nvstreammux in the DeepStream pipeline.
* Gst-nvstreammux copies @a meta_data to
* user meta data at frame level inside @ref NvDsFrameMeta.
* @a meta_data and @a user_data are passed as arguments.
*
* To retrive the content of @a meta_data, iterate over
* @ref NvDsFrameMetaList. Then search for @a meta_type of @ref NvDsUserMeta
* which the user has attached. (See deepstream-gst-metadata-test source
* code for more details.)
*
* @a meta_data and @a user_data are passed as arguments.
*/
NvDsMetaCopyFunc gst_to_nvds_meta_transform_func;
/**
* A callback to be called when @a meta_data transformed into
* @a NvDsUserMeta is to be destroyed.
* This function must be provided by a GStreamer plugin that precedes
* @a Gst-nvstreammux in the DeepStream pipeline.
*/
NvDsMetaReleaseFunc gst_to_nvds_meta_release_func;
} NvDsMeta;
/**
* Adds %GstMeta of type @ref NvDsMeta to the GstBuffer and sets the @a meta_data
* member of @ref NvDsMeta.
*
* @param[in] buffer A pointer to a %GstBuffer to which the function adds
* metadata.
* @param[in] meta_data A pointer at which the function sets the @a meta_data
* member of @ref NvDsMeta.
* @param[in] user_data A pointer to the user-specific data.
* @param[in] copy_func The callback to be called when
* NvDsMeta is to be copied. The function is called with
* @a meta_data and @a user_data as parameters.
* @param[in] release_func
* The callback to be called when
* NvDsMeta is to be destroyed. The function is called with
* @a meta_data and @a user_data as parameters.
*
* @return A pointer to the attached NvDsMeta structure if successful,
* or NULL otherwise.
*/
NvDsMeta *gst_buffer_add_nvds_meta (GstBuffer *buffer, gpointer meta_data,
gpointer user_data, NvDsMetaCopyFunc copy_func,
NvDsMetaReleaseFunc release_func);
/**
* Gets the @ref NvDsMeta last added to a GstBuffer.
*
* @param[in] buffer A pointer to the GstBuffer.
*
* @return A pointer to the last added NvDsMeta structure, or NULL if no
* %NvDsMeta was attached.
*/
NvDsMeta* gst_buffer_get_nvds_meta (GstBuffer *buffer);
/**
* Gets the @ref NvDsBatchMeta added to a GstBuffer.
*
* @param[in] buffer A pointer to the GstBuffer.
*
* @return A pointer to the NvDsBatchMeta structure, or NULL if no
* NvDsMeta was attached.
*/
NvDsBatchMeta * gst_buffer_get_nvds_batch_meta (GstBuffer *buffer);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file nvbufsurface.h
* <b>NvBufSurface Interface </b>
*
* This file specifies the NvBufSurface management API.
*
* The NvBufSurface API provides methods to allocate / deallocate, map / unmap
* and copy batched buffers.
*/
/**
* @defgroup ds_nvbuf_api Buffer Management API module
*
* This section describes types and functions of NvBufSurface application
* programming interface.
*
*/
#ifndef NVBUFSURFACE_H_
#define NVBUFSURFACE_H_
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/** @defgroup ds_aaa NvBufSurface Types and Functions
* Defines types and functions of \ref NvBufSurface application
* programming interface.
* @ingroup ds_nvbuf_api
* @{ */
/** Defines the default padding length for reserved fields of structures. */
#define STRUCTURE_PADDING 4
/** Defines the maximum number of planes. */
#define NVBUF_MAX_PLANES 4
/**
* Specifies mapping types for \ref NvBufSurface.
*/
typedef enum
{
NVBUF_MAP_READ, /**< Specifies \ref NvBufSurface mapping type "read." */
NVBUF_MAP_WRITE, /**< Specifies \ref NvBufSurface mapping type
"write." */
NVBUF_MAP_READ_WRITE, /**< Specifies \ref NvBufSurface mapping type
"read/write." */
} NvBufSurfaceMemMapFlags;
/**
* Specifies color formats for \ref NvBufSurface.
*/
typedef enum
{
/** Specifies an invalid color format. */
NVBUF_COLOR_FORMAT_INVALID,
/** Specifies 8 bit GRAY scale - single plane */
NVBUF_COLOR_FORMAT_GRAY8,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YVU420,
/** Specifies BT.601 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_ER,
/** Specifies BT.601 colorspace - YVU420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YVU420_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU_ER,
/** Specifies BT.601 colorspace - YUV444 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444,
/** Specifies RGBA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBA,
/** Specifies BGRA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRA,
/** Specifies ARGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ARGB,
/** Specifies ABGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ABGR,
/** Specifies RGBx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBx,
/** Specifies BGRx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRx,
/** Specifies xRGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xRGB,
/** Specifies xBGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xBGR,
/** Specifies RGB-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGB,
/** Specifies BGR-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGR,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE,
/** Specifies BT.709 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709,
/** Specifies BT.709 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709_ER,
/** Specifies BT.2020 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_2020,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_2020,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_2020,
/** Specifies color format for packed 2 signed shorts */
NVBUF_COLOR_FORMAT_SIGNED_R16G16,
NVBUF_COLOR_FORMAT_LAST
} NvBufSurfaceColorFormat;
/**
* Specifies layout formats for \ref NvBufSurface video planes.
*/
typedef enum
{
/** Specifies pitch layout. */
NVBUF_LAYOUT_PITCH,
/** Specifies block linear layout. */
NVBUF_LAYOUT_BLOCK_LINEAR,
} NvBufSurfaceLayout;
/**
* Specifies memory types for \ref NvBufSurface.
*/
typedef enum
{
/** Specifies the default memory type, i.e. \ref NVBUF_MEM_CUDA_DEVICE
for dGPU, \ref NVBUF_MEM_SURFACE_ARRAY for Jetson. Use \ref NVBUF_MEM_DEFAULT
to allocate whichever type of memory is appropriate for the platform. */
NVBUF_MEM_DEFAULT,
/** Specifies CUDA Host memory type. */
NVBUF_MEM_CUDA_PINNED,
/** Specifies CUDA Device memory type. */
NVBUF_MEM_CUDA_DEVICE,
/** Specifies CUDA Unified memory type. */
NVBUF_MEM_CUDA_UNIFIED,
/** Specifies NVRM Surface Array type. Valid only for Jetson. */
NVBUF_MEM_SURFACE_ARRAY,
/** Specifies NVRM Handle type. Valid only for Jetson. */
NVBUF_MEM_HANDLE,
/** Specifies memory allocated by malloc(). */
NVBUF_MEM_SYSTEM,
} NvBufSurfaceMemType;
/**
* Holds the planewise parameters of a buffer.
*/
typedef struct NvBufSurfacePlaneParams
{
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds the widths of planes. */
uint32_t width[NVBUF_MAX_PLANES];
/** Holds the heights of planes. */
uint32_t height[NVBUF_MAX_PLANES];
/** Holds the pitches of planes in bytes. */
uint32_t pitch[NVBUF_MAX_PLANES];
/** Holds the offsets of planes in bytes. */
uint32_t offset[NVBUF_MAX_PLANES];
/** Holds the sizes of planes in bytes. */
uint32_t psize[NVBUF_MAX_PLANES];
/** Holds the number of bytes occupied by a pixel in each plane. */
uint32_t bytesPerPix[NVBUF_MAX_PLANES];
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParams;
/**
* Holds parameters required to allocate an \ref NvBufSurface.
*/
typedef struct NvBufSurfaceCreateParams {
/** Holds the GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the amount of memory to be allocated. Optional; if set, all other
parameters (width, height, etc.) are ignored. */
uint32_t size;
/** Holds a "contiguous memory" flag. If set, contiguous memory is allocated
for the batch. Valid only for CUDA memory types. */
bool isContiguous;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds the surface layout. May be Block Linear (BL) or Pitch Linear (PL).
For a dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds the type of memory to be allocated. */
NvBufSurfaceMemType memType;
} NvBufSurfaceCreateParams;
/**
* Holds pointers for a mapped buffer.
*/
typedef struct NvBufSurfaceMappedAddr {
/** Holds planewise pointers to a CPU mapped buffer. */
void * addr[NVBUF_MAX_PLANES];
/** Holds a pointer to a mapped EGLImage. */
void *eglImage;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceMappedAddr;
/**
* Holds information about a single buffer in a batch.
*/
typedef struct NvBufSurfaceParams {
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the pitch of the buffer. */
uint32_t pitch;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds BL or PL. For dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds a DMABUF FD. Valid only for \ref NVBUF_MEM_SURFACE_ARRAY and
\ref NVBUF_MEM_HANDLE type memory. */
uint64_t bufferDesc;
/** Holds the amount of allocated memory. */
uint32_t dataSize;
/** Holds a pointer to allocated memory. Not valid for
\ref NVBUF_MEM_SURFACE_ARRAY or \ref NVBUF_MEM_HANDLE. */
void * dataPtr;
/** Holds planewise information (width, height, pitch, offset, etc.). */
NvBufSurfacePlaneParams planeParams;
/** Holds pointers to mapped buffers. Initialized to NULL
when the structure is created. */
NvBufSurfaceMappedAddr mappedAddr;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceParams;
/**
* Holds information about batched buffers.
*/
typedef struct NvBufSurface {
/** Holds a GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the batch size. */
uint32_t batchSize;
/** Holds the number valid and filled buffers. Initialized to zero when
an instance of the structure is created. */
uint32_t numFilled;
/** Holds an "is contiguous" flag. If set, memory allocated for the batch
is contiguous. */
bool isContiguous;
/** Holds type of memory for buffers in the batch. */
NvBufSurfaceMemType memType;
/** Holds a pointer to an array of batched buffers. */
NvBufSurfaceParams *surfaceList;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurface;
/**
* \brief Allocates a batch of buffers.
*
* Allocates memory for \a batchSize buffers and returns a pointer to an
* allocated \ref NvBufSurface. The \a params structure must have
* the allocation parameters of a single buffer. If \a params.size
* is set, a buffer of that size is allocated, and all other
* parameters (width, height, color format, etc.) are ignored.
*
* Call NvBufSurfaceDestroy() to free resources allocated by this function.
*
* @param[out] surf An indirect pointer to the allocated batched
* buffers.
* @param[in] batchSize Batch size of buffers.
* @param[in] params A pointer to an \ref NvBufSurfaceCreateParams
* structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCreate (NvBufSurface **surf, uint32_t batchSize,
NvBufSurfaceCreateParams *params);
/**
* \brief Frees batched buffers previously allocated by NvBufSurfaceCreate().
*
* @param[in] surf A pointer to an \ref NvBufSurface to be freed.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceDestroy (NvBufSurface *surf);
/**
* \brief Maps hardware batched buffers to the HOST or CPU address space.
*
* Valid for \ref NVBUF_MEM_CUDA_UNIFIED type memory for dGPU and
* \ref NVBUF_MEM_SURFACE_ARRAY and \ref NVBUF_MEM_HANDLE type memory for
* Jetson.
*
* This function fills an array of pointers at
* \a surf->surfaceList->mappedAddr->addr.
* \a surf is a pointer to an \ref NvBufSurface.
* \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a addr is declared as an array of pointers to void, and holds pointers
* to the buffers.
*
* The client must call NvBufSurfaceSyncForCpu() with the virtual address
* populated by this function before accessing mapped memory in the CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and the hardware device as
* follows:
* - CPU: If the CPU modifies mapped memory, the client must call
* NvBufSurfaceSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If a hardware device modifies mapped memory, the client
* must call NvBufSurfaceSyncForCpu() before the CPU accesses the memory.
*
* Use NvBufSurfaceUnMap() to unmap buffer(s) and release any resource.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores pointers to the buffers in a descendant of this
* structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in buffer. -1 refers to all planes
* in the buffer.
* @param[in] type A flag for mapping type.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMap (NvBufSurface *surf, int index, int plane, NvBufSurfaceMemMapFlags type);
/**
* \brief Unmaps previously mapped buffer(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 indicates
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 indicates
* all planes in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMap (NvBufSurface *surf, int index, int plane);
/**
* \brief Copies the content of source batched buffer(s) to destination
* batched buffer(s).
*
* You can use this function to copy source buffer(s) of one memory type
* to destination buffer(s) of another memory type,
* e.g. CUDA host to CUDA device, malloc'ed memory to CUDA device, etc.
*
* The source and destination \ref NvBufSurface objects must have same
* buffer and batch size.
*
* @param[in] srcSurf A pointer to the source NvBufSurface structure.
* @param[in] dstSurf A pointer to the destination NvBufSurface structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
/**
* \brief Syncs the hardware memory cache for the CPU.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of the buffer in the batch. -1 refers to
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForCpu (NvBufSurface *surf, int index, int plane);
/**
* \brief Syncs the hardware memory cache for the device.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForDevice (NvBufSurface *surf, int index, int plane);
/**
* \brief Gets the \ref NvBufSurface from the DMABUF FD.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[out] buffer A pointer to the NvBufSurface.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceFromFd (int dmabuf_fd, void **buffer);
/**
* \brief Fills each byte of the buffer(s) in an \ref NvBufSurface with a
* provided value.
*
* You can also use this function to reset the buffer(s) in the batch.
*
* @param[in] surf A pointer to the NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
* @param[in] value The value to be used as fill.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMemSet (NvBufSurface *surf, int index, int plane, uint8_t value);
/**
* \brief Creates an EGLImage from the memory of one or more
* \ref NvBufSurface buffers.
*
* Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
*
* This function returns the created EGLImage by storing its address at
* \a surf->surfaceList->mappedAddr->eglImage. (\a surf is a pointer to
* an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a eglImage is declared as a pointer to void, and holds an
* EGLImageKHR.)
*
* You can use this function in scenarios where a CUDA operation on Jetson
* hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
* The EGLImageKHR struct provided by this function can then be registered
* with CUDA for further CUDA operations.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores a pointer to the created EGLImage in
* a descendant of this structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 specifies all buffers
* in the batch.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Destroys the previously created EGLImage object(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index The index of a buffer in the batch. -1 specifies all
* buffers in the batch.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NVBUFSURFACE_H_ */

View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvbufsurftransform.h
* <b>NvBufSurfTransform Interface </b>
*
* This file specifies the NvBufSurfTransform image transformation APIs.
*
* The NvBufSurfTransform API provides methods to set and get session parameters
* and to transform and composite APIs.
*/
#ifndef NVBUFSURFTRANSFORM_H_
#define NVBUFSURFTRANSFORM_H_
#include <cuda.h>
#include <cuda_runtime.h>
#include "nvbufsurface.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @defgroup ds_bbb NvBufSurfTransform Types and Functions
* Defines types and functions of the \ref NvBufSurfTransform
* application programming interface.
* @ingroup ds_nvbuf_api
* @{ */
/**
* Specifies compute devices used by \ref NvBufSurfTransform.
*/
typedef enum
{
/** Specifies VIC as a compute device for Jetson or dGPU for an x86_64
system. */
NvBufSurfTransformCompute_Default,
/** Specifies that the GPU is the compute device. */
NvBufSurfTransformCompute_GPU,
/** Specifies that the VIC as a compute device. Supported only for Jetson. */
NvBufSurfTransformCompute_VIC
} NvBufSurfTransform_Compute;
/**
* Specifies video flip methods. Supported only for Jetson.
*/
typedef enum
{
/** Specifies no video flip. */
NvBufSurfTransform_None,
/** Specifies rotating 90 degrees clockwise. */
NvBufSurfTransform_Rotate90,
/** Specifies rotating 180 degree clockwise. */
NvBufSurfTransform_Rotate180,
/** Specifies rotating 270 degree clockwise. */
NvBufSurfTransform_Rotate270,
/** Specifies video flip with respect to the X-axis. */
NvBufSurfTransform_FlipX,
/** Specifies video flip with respect to the Y-axis. */
NvBufSurfTransform_FlipY,
/** Specifies video flip transpose. */
NvBufSurfTransform_Transpose,
/** Specifies video flip inverse transpose. */
NvBufSurfTransform_InvTranspose,
} NvBufSurfTransform_Flip;
/**
* Specifies video interpolation methods.
*/
typedef enum
{
/** Specifies Nearest Interpolation Method interpolation. */
NvBufSurfTransformInter_Nearest = 0,
/** Specifies Bilinear Interpolation Method interpolation. */
NvBufSurfTransformInter_Bilinear,
/** Specifies GPU-Cubic, VIC-5 Tap interpolation. */
NvBufSurfTransformInter_Algo1,
/** Specifies GPU-Super, VIC-10 Tap interpolation. */
NvBufSurfTransformInter_Algo2,
/** Specifies GPU-Lanzos, VIC-Smart interpolation. */
NvBufSurfTransformInter_Algo3,
/** Specifies GPU-Ignored, VIC-Nicest interpolation. */
NvBufSurfTransformInter_Algo4,
/** Specifies GPU-Nearest, VIC-Nearest interpolation. */
NvBufSurfTransformInter_Default
} NvBufSurfTransform_Inter;
/**
* Specifies error codes returned by \ref NvBufSurfTransform functions.
*/
typedef enum
{
/** Specifies an error in source or destination ROI. */
NvBufSurfTransformError_ROI_Error = -4,
/** Specifies invalid input parameters. */
NvBufSurfTransformError_Invalid_Params = -3,
/** Specifies a runtime execution error. */
NvBufSurfTransformError_Execution_Error = -2,
/** Specifies an unsupported feature or format. */
NvBufSurfTransformError_Unsupported = -1,
/** Specifies a successful operation. */
NvBufSurfTransformError_Success = 0
} NvBufSurfTransform_Error;
/**
* Specifies transform types.
*/
typedef enum {
/** Specifies a transform to crop the source rectangle. */
NVBUFSURF_TRANSFORM_CROP_SRC = 1,
/** Specifies a transform to crop the destination rectangle. */
NVBUFSURF_TRANSFORM_CROP_DST = 1 << 1,
/** Specifies a transform to set the filter type. */
NVBUFSURF_TRANSFORM_FILTER = 1 << 2,
/** Specifies a transform to set the flip method. */
NVBUFSURF_TRANSFORM_FLIP = 1 << 3,
} NvBufSurfTransform_Transform_Flag;
/**
* Specifies types of composition operations.
*/
typedef enum {
/** Specifies a flag to describe the requested compositing operation. */
NVBUFSURF_TRANSFORM_COMPOSITE = 1,
} NvBufSurfTransform_Composite_Flag;
/**
* Holds the coordinates of a rectangle.
*/
typedef struct
{
/** Holds the rectangle top. */
uint32_t top;
/** Holds the rectangle left side. */
uint32_t left;
/** Holds the rectangle width. */
uint32_t width;
/** Holds the rectangle height. */
uint32_t height;
}NvBufSurfTransformRect;
/**
* Holds configuration parameters for a transform/composite session.
*/
typedef struct _NvBufSurfTransformConfigParams
{
/** Holds the mode of operation: VIC (Jetson) or GPU (iGPU + dGPU)
If VIC is configured, \a gpu_id is ignored. */
NvBufSurfTransform_Compute compute_mode;
/** Holds the GPU ID to be used for processing. */
int32_t gpu_id;
/** User configure stream to be used. If NULL, the default stream is used.
Ignored if VIC is used. */
cudaStream_t cuda_stream;
} NvBufSurfTransformConfigParams;
/**
* Holds transform parameters for a transform call.
*/
typedef struct _NvBufSurfaceTransformParams
{
/** Holds a flag that indicates which transform parameters are valid. */
uint32_t transform_flag;
/** Holds the flip method. */
NvBufSurfTransform_Flip transform_flip;
/** Holds a transform filter. */
NvBufSurfTransform_Inter transform_filter;
/** Holds a pointer to a list of source rectangle coordinates for
a crop operation. */
NvBufSurfTransformRect *src_rect;
/** Holds a pointer to list of destination rectangle coordinates for
a crop operation. */
NvBufSurfTransformRect *dst_rect;
}NvBufSurfTransformParams;
/**
* Holds composite parameters for a composite call.
*/
typedef struct _NvBufSurfTransformCompositeParams
{
/** Holds a flag that indicates which composition parameters are valid. */
uint32_t composite_flag;
/** Holds the number of input buffers to be composited. */
uint32_t input_buf_count;
/** Holds source rectangle coordinates of input buffers for compositing. */
NvBufSurfTransformRect *src_comp_rect;
/** Holds destination rectangle coordinates of input buffers for
compositing. */
NvBufSurfTransformRect *dst_comp_rect;
}NvBufSurfTransformCompositeParams;
/**
* \brief Sets user-defined session parameters.
*
* If user-defined session parameters are set, they override the
* NvBufSurfTransform() function's default session.
*
* @param[in] config_params A pointer to a structure that is populated
* with the session parameters to be used.
*
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformSetSessionParams
(NvBufSurfTransformConfigParams *config_params);
/**
* \brief Gets the session parameters used by NvBufSurfTransform().
*
* @param[out] config_params A pointer to a caller-allocated structure to be
* populated with the session parameters used.
*
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformGetSessionParams
(NvBufSurfTransformConfigParams *config_params);
/**
* \brief Performs a transformation on batched input images.
*
* If user-defined session parameters are to be used, call
* NvBufSurfTransformSetSessionParams() before calling this function.
*
* @param[in] src A pointer to input batched buffers to be transformed.
* @param[out] dst A pointer to a caller-allocated location where
* transformed output is to be stored.
* @par When destination cropping is performed, memory outside
* the crop location is not touched, and may contain stale
* information. The caller must perform a memset before
* calling this function if stale information must be
* eliminated.
* @param[in] transform_params
* A pointer to an \ref NvBufSurfTransformParams structure
* which specifies the type of transform to be performed. They
* may include any combination of scaling, format conversion,
* and cropping for both source and destination.
* Flipping and rotation are supported on VIC.
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransform (NvBufSurface *src, NvBufSurface *dst,
NvBufSurfTransformParams *transform_params);
/**
* \brief Composites batched input images.
*
* The compositer scales and stitches
* batched buffers indicated by \a src into a single destination buffer, \a dst.
*
* If user-defined session parameters are to be used, call
* NvBufSurfTransformSetSessionParams() before calling this function.
*
* @param[in] src A pointer to input batched buffers to be transformed.
* @param[out] dst A pointer a caller-allocated location (a single buffer)
* where composited output is to be stored.
* @param[in] composite_params
* A pointer to an \ref NvBufSurfTransformCompositeParams
* structure which specifies the compositing operation to be
* performed, e.g., the source and destination rectangles
* in \a src and \a dst.
* @return An \ref NvBufSurfTransform_Error value indicating success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformComposite (NvBufSurface *src,
NvBufSurface *dst, NvBufSurfTransformCompositeParams *composite_params);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines analytics metadata </b>
*
* @b Description: This file defines metadata concerning nvdsanalytics plugin.
*/
/**
* @defgroup NvDsMetaApi Metadata APIs
*
* This section describes types and functions of Metadata APIs
* programming interface.
*
*/
#ifndef _NVDS_ANALYTICS_META_H_
#define _NVDS_ANALYTICS_META_H_
#include <gst/gst.h>
#include <vector>
#include <unordered_map>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @defgroup ee_analytics_group Analytics Metadata
* Defines metadata concerning nvdsanalytics plugin.
* @ingroup NvDsMetaApi
* @{
*/
#define NVDS_USER_FRAME_META_NVDSANALYTICS (nvds_get_user_meta_type((gchar*)"NVIDIA.DSANALYTICSFRAME.USER_META"))
#define NVDS_USER_OBJ_META_NVDSANALYTICS (nvds_get_user_meta_type((gchar*)"NVIDIA.DSANALYTICSOBJ.USER_META"))
/**
* Holds a set of nvdsanalytics object level metadata.
*/
typedef struct
{
/** Holds the array of ROI labels in which object is present */
std::vector <std::string> roiStatus;
/** Holds the array of OverCrowding labels in which object is present */
std::vector <std::string> ocStatus;
/** Holds the array of line crossing labels which object has crossed */
std::vector <std::string> lcStatus;
/** Holds the direction string for the tracked object */
std::string dirStatus;
/** Holds unique identifier for nvdsanalytics instance */
guint unique_id;
} NvDsAnalyticsObjInfo;
/**
* Holds a set of nvdsanalytics framelevel metadata.
*/
typedef struct
{
/** Holds a map of boolean status of overcrowding for configured ROIs,
* which can be accessed using key, value pair; where key is the ROI label
*/
std::unordered_map<std::string, bool> ocStatus;
/** Holds a map of total count of valid objects in ROI for configured ROIs,
* which can be accessed using key, value pair; where key is the ROI label
*/
std::unordered_map<std::string, uint32_t> objInROIcnt;
/** Holds a map of total count of Line crossing in current frame for configured lines,
* which can be accessed using key, value pair; where key is the line crossing label
*/
std::unordered_map<std::string, uint64_t> objLCCurrCnt;
/** Holds a map of total cumulative count of Line crossing for configured lines,
* can be accessed using key, value pair; where key is the line crossing label
*/
std::unordered_map<std::string, uint64_t> objLCCumCnt;
/** Holds unique identifier for nvdsanalytics instance */
guint unique_id;
/** Holds a map of total count of objects for each class ID,
* can be accessed using key, value pair; where key is class ID
*/
std::unordered_map<int, uint32_t> objCnt;
} NvDsAnalyticsFrameMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines dewarping metadata </b>
*
* @b Description: This file defines metadata concerning dewarping.
*/
/**
* @defgroup ee_dewarping_group Dewarping Metadata
* Defines metadata concerning dewarping.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_DEWARPER_META_H_
#define _NVDS_DEWARPER_META_H_
#include <gst/gst.h>
#ifdef __cplusplus
extern "C"
{
#endif
/*
* Defines DeepStream Dewarper metadata.
*/
/**
* Maximum number of dewarped surfaces per frame supported
*/
#define MAX_DEWARPED_VIEWS 4
/**
* Defines metadata surface types.
*/
typedef enum
{
NVDS_META_SURFACE_NONE=0,
/** Defines the pushbroom surface type. */
NVDS_META_SURFACE_FISH_PUSHBROOM=1,
/** Defines the vertical radical cylindrical surface type. */
NVDS_META_SURFACE_FISH_VERTCYL=2,
/** Defines the perspective projection with Brown distortion model surface. */
NVDS_META_SURFACE_PERSPECTIVE_PERSPECTIVE=3,
} NvDsSurfaceType;
/**
* Holds a set of dewarper metadata.
*/
typedef struct _NvDewarperSurfaceMeta {
/** Holds an array of the types of dewarped surfaces. */
guint type[MAX_DEWARPED_VIEWS];
/** Holds an array of indices of dewarped surfaces. */
guint index[MAX_DEWARPED_VIEWS];
/** Holds the source ID of the frame, e.g. the camera ID. */
guint source_id;
/** Holds the number of filled surfaces in the frame. */
guint num_filled_surfaces;
}NvDewarperSurfaceMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines Latency API</b>
*
* @b Description: This file defines an API for measuring module latency.
*/
/**
* @defgroup ee_nvlatency_group Latency Measurement API
* Defines an API for measuring latency in modules.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDSMETA_LATENCY_H_
#define _NVDSMETA_LATENCY_H_
#include "glib.h"
#include "gmodule.h"
#include "nvdsmeta.h"
#define MAX_COMPONENT_LEN 64
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds information about latency of the given component
*/
typedef struct _NvDsMetaCompLatency {
/** Holds the name of the component for which latency is measured. */
gchar component_name[MAX_COMPONENT_LEN];
/** Holds the system timestamp of the buffer when it arrives
at the input of the component. */
gdouble in_system_timestamp;
/** Holds the system timestamp of buffer when it
is sent to the downstream component. */
gdouble out_system_timestamp;
/** Holds the source ID of the component, e.g. the camera ID. */
guint source_id;
/** Holds the current frame number for which latency is measured. */
guint frame_num;
/** Holds the pad or port index of the stream muxer for the frame
in the batch. */
guint pad_index;
}NvDsMetaCompLatency;
/**
* Holds information about the latency of a given frame.
*/
typedef struct
{
/** Holds the source ID of the component, e.g. the camera ID. */
guint source_id;
/** Holds the current frame number for which latency is measured. */
guint frame_num;
/** Holds the system timestamp of the buffer when it arrives
at the input of the first component in the pipeline. By default,
the decoder is considered to be the first component in the pipeline. */
gdouble comp_in_timestamp;
/** Holds the latency of the frame in milliseconds. */
gdouble latency;
} NvDsFrameLatencyInfo;
/**
* Sets the system timestamp when the Gst Buffer arrives as input at the
* component.
*
* @param[in] buffer A pointer to the arriving Gst Buffer.
* @param[in] element_name A pointer to the name of the component for which
* latency is to be measured.
*
* @returns A pointer to a @ref NvDsUserMeta structure which holds an
* @ref NvDsMetaCompLatency as @a NvDsUserMeta.user_meta_data.
*/
NvDsUserMeta *nvds_set_input_system_timestamp(GstBuffer * buffer,
gchar *element_name);
/**
* \brief Sets the system timestamp when a Gst Buffer that is pushed
* to the downstream component.
*
* This is a corresponding function to nvds_set_input_system_timestamp().
*
* @param[in] buffer A pointer to a Gst Buffer to be pushed
* to the downstream component.
* @param[in] element_name A pointer to the name of the component for which
* latency is to be measured.
*
* @returns TRUE if the timestamp is attached successfully, or FALSE otherwise.
*/
gboolean nvds_set_output_system_timestamp(GstBuffer * buffer, gchar *element_name);
/**
* \brief Measures the latency of all frames present in the current batch.
*
* The latency is computed from decoder input up to the point this API is called.
* You can install the probe on either pad of the component and call
* this function to measure the latency.
*
* @param[in] buf A pointer to a Gst Buffer to which
* @ref NvDsBatchMeta is attached as metadata.
* @param[out] latency_info A pointer to an NvDsFrameLatencyInfo structure
* allocated for a batch of this size. The function
* fills it with information about all of the sources.
*/
guint nvds_measure_buffer_latency(GstBuffer *buf,
NvDsFrameLatencyInfo *latency_info);
/**
* Indicates whether the environment variable
* @c NVDS_ENABLE_LATENCY_MEASUREMENT is exported.
*
* @returns True if the environment variable is exported, or false otherwise.
*/
gboolean nvds_get_enable_latency_measurement(void);
/**
* Defines a pseudo-variable whose value is the return value of
* @ref nvds_get_enable_latency_measurement(). It indicates whether latency
* measurement is enabled.
*/
#define nvds_enable_latency_measurement (nvds_get_enable_latency_measurement())
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018-2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream logging API</b>
*
* @b Description: This file defines an API for logging and debugging
* DeepStream applications.
*/
/**
* @defgroup ee_logging_group Logging API
*
* Defines an API for logging and debugging DeepStream applications.
*
* @ingroup NvDsLoggerApi
* @{
*/
#ifndef NVDS_LOGGER_H
#define NVDS_LOGGER_H
#include <syslog.h>
#define DSLOG_SYSLOG_IDENT "DSLOG"
//define additional categories here
#define DSLOG_CAT_CR "CR"
#define DSLOG_CAT_SG "SG"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Opens a connection to the logger.
*
* This function must be called once per DeepStream application execution,
* prior to use of the logger.
*/
void nvds_log_open();
/**
* Closes a connection to the logger.
*/
void nvds_log_close();
// Category is user (application) defined; priority is based on syslog levels
// data is message to be logged
/**
* Logs a message to a location determined by the setup script.
*
* @param[in] category A pointer to a string which specifies the category of
* this message. Categories are user-defined.
* @param[in] priority Severity of the event to be logged, based on syslog
* levels. For more information, see
* ./src/utils/nvds_logger/README relative to the
* directory that contains the DeepStream SDK.
*
* @param[in] data A pointer to a string containing the message. The
* message may contain the format specifiers recognized
* by %printf() in C/C++. @a data may be followeded by an
* arbitary number of parameters that supply values for
* the format specifiers.
*/
void nvds_log(const char *category, int priority, const char *data, ...);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream mask utils API</b>
*
* @b Description: This file specifies the APIs used to transform mask buffers
*/
#ifndef _NVDS_MAKS_UTILS_H_
#define _NVDS_MAKS_UTILS_H_
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief resize FP32 Tensor and apply threshold to create INT32 binary tensor
* Output INT32 tensor pixels are assumed ARGB32
* For resized pixels > threshold, transparency is set to 50%
* and other pixels are set to 100% transparent; RGB = 0 for all pixels
* @param src [IN/OUT] source FP32 tensor
* @param dst [IN/OUT] dst INT32 (ARGB32) tensor
* @param src_width [IN] source FP32 tensor width
* @param src_height [IN] source FP32 tensor height
* @param dst_width [IN] dst INT32 (ARGB32) tensor width
* @param dst_height [IN] dst INT32 (ARGB32) tensor height
* @param channel [IN]
* @param threshold [IN]
* @param argb_32 [IN] The pixel value in dst when src pixel > threshold
* @param interpolation [IN] The NPP interpolation method to use
* Enumeration copied below:
* NPPI_INTER_NN =1, Nearest neighbor filtering.
* NPPI_INTER_LINEAR Linear interpolation.
* NPPI_INTER_CUBIC Cubic interpolation.
* NPPI_INTER_CUBIC2P_BSPLINE Two-parameter cubic filter (B=1, C=0)
* NPPI_INTER_CUBIC2P_CATMULLROM Two-parameter cubic filter (B=0, C=1/2)
* NPPI_INTER_CUBIC2P_B05C03 Two-parameter cubic filter (B=1/2, C=3/10)
* NPPI_INTER_SUPER Super sampling.
* NPPI_INTER_LANCZOS Lanczos filtering.
* NPPI_INTER_LANCZOS3_ADVANCED Generic Lanczos filtering with order 3.
* NPPI_SMOOTH_EDGE Smooth edge filtering.
* @param stream [IN] The cuda-stream to use for scaling operation on GPU
*/
bool nvds_mask_utils_resize_to_binary_argb32(float *src, uint32_t* dst,
uint32_t src_width, uint32_t src_height,
uint32_t dst_width, uint32_t dst_height,
uint32_t channel, float threshold,
uint32_t argb32_px, uint32_t interpolation,
cudaStream_t stream);
/**
* @brief resize FP32 Tensor and apply threshold to create INT8 binary tensor
* Output INT8 tensor pixels are assumed INT8
* For resized pixels > threshold, pixel = 1
* and other pixels are set to 0
* @param src [IN/OUT] source FP32 tensor
* @param dst [IN/OUT] dst INT8 (binary) tensor
* @param src_width [IN] source FP32 tensor width
* @param src_height [IN] source FP32 tensor height
* @param dst_width [IN] dst INT8 (binary) tensor width
* @param dst_height [IN] dst INT8 (binary) tensor height
* @param channel [IN]
* @param threshold [IN]
* @param interpolation [IN] The NPP interpolation method to use
* Enumeration copied below:
* NPPI_INTER_NN =1, Nearest neighbor filtering.
* NPPI_INTER_LINEAR Linear interpolation.
* NPPI_INTER_CUBIC Cubic interpolation.
* NPPI_INTER_CUBIC2P_BSPLINE Two-parameter cubic filter (B=1, C=0)
* NPPI_INTER_CUBIC2P_CATMULLROM Two-parameter cubic filter (B=0, C=1/2)
* NPPI_INTER_CUBIC2P_B05C03 Two-parameter cubic filter (B=1/2, C=3/10)
* NPPI_INTER_SUPER Super sampling.
* NPPI_INTER_LANCZOS Lanczos filtering.
* NPPI_INTER_LANCZOS3_ADVANCED Generic Lanczos filtering with order 3.
* NPPI_SMOOTH_EDGE Smooth edge filtering.
* @param stream [IN] The cuda-stream to use for scaling operation on GPU
*/
bool nvds_mask_utils_resize_to_binary_uint8(float *src, uint8_t* dst,
uint32_t src_width, uint32_t src_height,
uint32_t dst_width, uint32_t dst_height,
uint32_t channel, float threshold,
uint32_t interpolation,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,224 @@
/*
* Copyright (c) 2018-2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>DeepStream Messaging API </b>
*
* @b Description: This file defines the DeepStream API for exchanging
* messages with remote entities and services.
*/
/**
* @defgroup ee_nvmessaging_group Messaging API
*
* Defines an API for exchanging messages with remote entities and services.
*
* @ingroup NvDsMsgApi
* @{
*/
#ifndef __NVDS_MSGAPI_H__
#define __NVDS_MSGAPI_H__
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
/** Defines the handle used by messaging API functions. */
typedef void *NvDsMsgApiHandle;
/**
* Defines events associated with connections to remote entities.
*/
typedef enum {
/** Specifies that a connection attempt was Successful*/
NVDS_MSGAPI_EVT_SUCCESS,
/** Specifies disconnection of a connection handle. */
NVDS_MSGAPI_EVT_DISCONNECT,
/** Specifies that the remote service is down. */
NVDS_MSGAPI_EVT_SERVICE_DOWN
} NvDsMsgApiEventType;
/**
* Defines completion codes for operations in the messaging API.
*/
typedef enum {
NVDS_MSGAPI_OK,
NVDS_MSGAPI_ERR,
NVDS_MSGAPI_UNKNOWN_TOPIC
} NvDsMsgApiErrorType;
/**
* Type definition for a "send" callback.
*
* @param[in] user_ptr A context pointer passed by async_send. The
* pointer may refer to any type of information
* that is useful to the callback.
* @param[in] completion_flag The completion code from a send operation.
*/
typedef void (*nvds_msgapi_send_cb_t)(void *user_ptr, NvDsMsgApiErrorType completion_flag);
/**
* @brief Type definition for callback registered during subscribe.
*
* This callback reports any event (success or error)
* during message consume
* If success, this callback reports the consumed message,
* on a subscribed topic
*
* @param[in] flag Message Consume Status
* @param[in] msg Received message/payload
* @param[in] msg_len Length of message/payload
* @param[in] topic Topic name where the message was received
* @param[in] user_ptr pointer passed during subscribe() for context
*/
typedef void (*nvds_msgapi_subscribe_request_cb_t)(NvDsMsgApiErrorType flag, void *msg, int msg_len, char *topic, void *user_ptr);
/**
* @brief Type definition for a "handle" callback.
*
* This callback reports any event (success or error)
* during a call to nvds_msgapi_connect().
*
* @param[in] h_ptr A handle for the event.
* @param[in] ds_evt Type of the event.
*/
typedef void (*nvds_msgapi_connect_cb_t)(NvDsMsgApiHandle h_ptr, NvDsMsgApiEventType ds_evt);
/**
* Connects to a remote agent by calling into a protocol adapter.
*
* @param[in] connection_str A connection string with format
* `"url;port;topic"`.
* @param[in] connect_cb A callback function for events associated with
* the connection.
* @param[in] config_path A pointer to the pathname of a configuration
* file passed to the protocol adapter.
* @return A connection handle.
*/
NvDsMsgApiHandle nvds_msgapi_connect(char *connection_str, nvds_msgapi_connect_cb_t connect_cb, char *config_path);
/**
* Sends a message synchronously over a connection.
*
* @param[in] h_ptr A connection handle.
* @param[in] topic A pointer to a string which specifies the topic
* to which to send the message.
* @param[in] payload A pointer to a byte array containing the message. The
* message may but need not be a NULL-terminated string.
* @param[in] nbuf The number of bytes of data to send, including the
* terminating NULL if the message is a string.
*
* @return A completion code for the send operation.
*/
NvDsMsgApiErrorType nvds_msgapi_send(NvDsMsgApiHandle h_ptr, char *topic, const uint8_t *payload, size_t nbuf);
/**
* Sends message asynchronously over a connection.
*
* @param[in] h_ptr A connection handle.
* @param[in] topic A pointer to a string which specifies the topic
* to which to send the message.
* @param[in] payload A pointer to a byte array containing the message.
* The message may but need not be a NULL-terminated
* string.
* @param[in] nbuf The number of bytes of data to send, including the
* terminating NULL if the message is a string.
* @param[in] send_callback A callback to be invoked when operation completes.
* @param[in] user_ptr A context pointer to pass to callback.
*
* @return A completion code for the send operation.
*/
NvDsMsgApiErrorType nvds_msgapi_send_async(NvDsMsgApiHandle h_ptr, char *topic, const uint8_t *payload, size_t nbuf, nvds_msgapi_send_cb_t send_callback, void *user_ptr);
/**
* Subscribe to a remote entity for receiving messages on a particular topic(s)
*
* @param[in] h_ptr Connection handle
* @param[in] topics Array of topics to subscribe for messages
* @param[in] num_topics num of topics
* @param[in] cb A pointer to a callback function for notifying the DS event handler
* @param[in] user_ctx user ptr to be passed to callback for context
*
* @return Status of the subscribe operation.
*/
NvDsMsgApiErrorType nvds_msgapi_subscribe (NvDsMsgApiHandle h_ptr, char ** topics, int num_topics, nvds_msgapi_subscribe_request_cb_t cb, void *user_ctx);
/**
* Calls into the adapter to allow for execution of undnerlying protocol logic.
* In this call the adapter is expected to service pending incoming and
* outgoing messages. It can also perform periodic housekeeping tasks
* such as sending heartbeats.
*
* This design gives the client control over when protocol logic gets executed.
* The client must call it periodically, according to the individual adapter's
* requirements.
*
* @param[in] h_ptr A connection handle.
*/
void nvds_msgapi_do_work(NvDsMsgApiHandle h_ptr);
/**
* Terminates a connection.
*
* @param[in] h_ptr The connection handle.
*
* @return A completion code for the "terminate" operation.
*/
NvDsMsgApiErrorType nvds_msgapi_disconnect(NvDsMsgApiHandle h_ptr);
/**
* Gets the version number of the messaging API interface supported by the
* protocol adapter.
*
* @return A pointer to a string that contains version number in
* `"major.minor"` format.
*/
char *nvds_msgapi_getversion(void);
/**
* Gets the name of the protocol used in the adapter.
*
* @return A pointer to a string
*/
char *nvds_msgapi_get_protocol_name(void);
/**
* Fetch the connection signature by parsing broker_connection string and cfg file
*
* A connection signature is a unique string used to identify a connection.
* It is generated by parsing all the connection params provided in broker_str and cfg file
*
* Connection signature can be retreived only if the cfg option share-connection = 1
*
* @param[in] broker_str Broker connection string used to create connection
* @param[in] cfg Path to config file
* @param[out] output_str connection signature
* @param[in] max_len max len of output_str
*
* @return Valid connection signature if success
* Empty string("") in case of errors or if share-connection cfg option is not set to 1
*/
NvDsMsgApiErrorType nvds_msgapi_connection_signature(char *broker_str, char *cfg, char *output_str, int max_len);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Object Encoder</b>
*/
/**
* @defgroup ee_object_encoder Object Encoder API
*
* Defines APIs for Object Encoder.
*
* @ingroup NvllBatchJpegEncodeinterfaceApi
* @{
*/
#ifndef __NVDS_ENCODE_OBJECT_H__
#define __NVDS_ENCODE_OBJECT_H__
#ifdef __cplusplus
extern "C"
{
#endif
#define FILE_NAME_SIZE (1024)
struct _NvDsObjEncCtx;
/** Opaque Handle to the Object Encoder Context */
typedef struct _NvDsObjEncCtx * NvDsObjEncCtxHandle;
/**
* Holds output parameters. This structure is available to the user.
*/
typedef struct _NvDsObjEncOutParams
{
/** Pointer to the JPEG Encoded Object */
uint8_t *outBuffer;
/** Length of the JPEG Encoded Object */
uint64_t outLen;
} NvDsObjEncOutParams;
/**
* Holds user parameters for a nvds_obj_enc_process call.
*/
typedef struct _NvDsObjEncUsrArgs
{
/** Boolean variable to save image */
bool saveImg;
/** Variable to attach user metadata.
* Metadata Type is "NVDS_CROP_IMAGE_META".
*/
bool attachUsrMeta;
/** If user specifies the filename then it'll be used otherwise the
* following naming convention is used to create filename of the encoded
* objects -
* "frame-number_stream-number_object-number_object-type_widthxheight.jpg".
* For example - 0_2_3_PERSON_126x148.jpg
*/
char fileNameImg[FILE_NAME_SIZE];
/** Object number in the frame */
int objNum;
} NvDsObjEncUsrArgs;
/** Create context and return a handle to NvObjEncCtx */
NvDsObjEncCtxHandle nvds_obj_enc_create_context (void);
/** Enqueue an object crop for JPEG encode.
* This is a non-blocking call and user should call nvds_obj_enc_finish()
* to make sure all enqueued object crops have been processed.
*/
bool nvds_obj_enc_process (NvDsObjEncCtxHandle, NvDsObjEncUsrArgs *,
NvBufSurface *, NvDsObjectMeta *, NvDsFrameMeta *);
/** Wait for all enqueued crops to be encoded */
void nvds_obj_enc_finish (NvDsObjEncCtxHandle);
/** Destroy context */
void nvds_obj_enc_destroy_context (NvDsObjEncCtxHandle);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Optical Flow Metadata </b>
*
* @b Description: This file defines the optical flow metadata.
*/
/**
* @defgroup ee_opticalflow_meta Optical flow metadata
*
* Defines the optical flow metadata.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_OPTICALFLOW_META_H_
#define _NVDS_OPTICALFLOW_META_H_
#include <gst/gst.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds motion vector information about an element.
*/
typedef struct _NvOFFlowVector
{
/** Holds the motion vector X component. */
gshort flowx;
/** Holds the motion vector Y component. */
gshort flowy;
} NvOFFlowVector;
/**
* Holds optical flow metadata about a frame.
*/
typedef struct
{
/** Holds the number of rows in the frame for a given block size,
e.g. if block size is 4 and frame height is 720, then the number of
rows is (720/4) = 180. */
guint rows;
/** Holds the number of columns in the frame for given block size,
e.g. if block size is 4 and frame width is 1280, then the number of
columns is (1280/4) = 320. */
guint cols;
/** Holds the size of the motion vector. @see NvOFFlowVector. */
guint mv_size;
/** Holds the current frame number of the source. */
gulong frame_num;
/** Holds a pointer to the motion vector. */
void *data;
/** Reserved for internal use. */
void *priv;
/** Reserved for internal use. */
void *reserved;
} NvDsOpticalFlowMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines Tracker Metadata</b>
*/
/**
* @defgroup ee_tracker_group Tracker Metadata
*
* Specifies metadata concerning tracking.
*
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_TRACKER_META_H_
#define _NVDS_TRACKER_META_H_
#include <stdint.h>
#include "nvll_osd_struct.h"
#include "nvdsmeta.h"
#ifdef __cplusplus
extern "C"
{
#endif
typedef struct _NvDsPastFrameObj
{
uint32_t frameNum;
NvOSD_RectParams tBbox;
float confidence;
uint32_t age;
} NvDsPastFrameObj;
/**
* One object in several past frames
*/
typedef struct _NvDsPastFrameObjList
{
/**< Pointer to past frame info of this object. */
NvDsPastFrameObj *list;
/**< Number of frames this object appreared in the past. */
uint32_t numObj;
/**< Object tracking id. */
uint64_t uniqueId;
/**< Object class id. */
uint16_t classId;
/**< An array of the string describing the object class. */
gchar objLabel[MAX_LABEL_SIZE];
} NvDsPastFrameObjList;
/**
* List of objects in each stream
* */
typedef struct _NvDsPastFrameObjStream
{
NvDsPastFrameObjList *list; /**< Pointer to objects inside this stream. */
uint32_t streamID; /**< Stream id the same as frame_meta->pad_index. */
uint64_t surfaceStreamID; /**< Stream id used inside tracker plugin. */
uint32_t numAllocated; /**< Maximum number of objects allocated. */
uint32_t numFilled; /**< Number of objects in this frame. */
} NvDsPastFrameObjStream;
/**
* Batch of lists of buffered objects
*/
typedef struct _NvDsPastFrameObjBatch
{
NvDsPastFrameObjStream *list; /**< Pointer to array of stream lists. */
uint32_t numAllocated; /**< Number of blocks allocated for the list. */
uint32_t numFilled; /**< Number of filled blocks in the list. */
} NvDsPastFrameObjBatch;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream version API</b>
*
* @b Description: This file specifies the APIs used to view the version of
* NVIDIA DEEPSTREAM and its dependencies, such as TensorRT, CUDA and cuDNN.
*/
/**
* @defgroup ee_version Version Number API
*
* Defines the API used to get the current version number of DeepStream and
* its dependencies.
*
* @ingroup NvDsUtilsApi
* @{
*/
#ifndef _NVDS_VERSION_H_
#define _NVDS_VERSION_H_
#define NVDS_VERSION_MAJOR 5
#define NVDS_VERSION_MINOR 0
#define NVDS_VERSION_MICRO 0
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Get the DEEPSTREAM_SDK major and minor version
* numbers and return them in major and minor variable pointers.
*
* @param[in] major holds the major part of DEEPSTREAM_SDK version.
* @param[in] minor holds the minor part of DEEPSTREAM_SDK version.
*/
void nvds_version (unsigned int * major, unsigned int * minor);
/**
* Print the version as major.minor.
* To obtain major and minor, this function calls @ref nvds_version.
*/
void nvds_version_print (void);
/**
* Print the versions of dependencies such as Cuda, cuDNN and TensorRT.
*/
void nvds_dependencies_version_print (void);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>Defines NVIDIA DeepStream GStreamer Utilities</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer utility
* functions.
*
*/
/**
* @defgroup gstreamer_utils Utilities: Gstreamer utilities API
*
* Specifies GStreamer utilities functions, used to configure the source to generate NTP Sync values.
*
* @ingroup NvDsUtilsApi
* @{
*/
#ifndef __NVDS_GSTUTILS_H__
#define __NVDS_GSTUTILS_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <gst/gst.h>
/**
* Configure the source to generate NTP sync values for RTSP sources.
*
* These values are used by the DeepStream GStreamer element NvStreamMux to
* calculate the NTP time of the frames at the source.
*
* This functionality is dependent on the RTSP sending the RTCP Sender Reports.
* source.
*
* This function only works for RTSP sources i.e. GStreamer elements "rtspsrc"
* or "uridecodebin" with an RTSP uri.
*
* params[in] src_elem GStreamer source element to be configured.
*/
void configure_source_for_ntp_sync (GstElement *src_elem);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,293 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream inference specifications </b>
*
* @b Description: This file defines common elements used in the API
* exposed by the Gst-nvinfer plugin.
*/
/**
* @defgroup ee_nvinf Gst-infer API Common Elements
*
* Defines common elements used in the API exposed by the Gst-inference plugin.
* @ingroup NvDsInferApi
* @{
*/
#ifndef _NVDSINFER_H_
#define _NVDSINFER_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C"
{
#endif
#define NVDSINFER_MAX_DIMS 8
#define _DS_DEPRECATED_(STR) __attribute__ ((deprecated (STR)))
/**
* Holds the dimensions of a layer.
*/
typedef struct
{
/** Holds the number of dimesions in the layer.*/
unsigned int numDims;
/** Holds the size of the layer in each dimension. */
unsigned int d[NVDSINFER_MAX_DIMS];
/** Holds the number of elements in the layer, including all dimensions.*/
unsigned int numElements;
} NvDsInferDims;
/**
* Holds the dimensions of a three-dimensional layer.
*/
typedef struct
{
/** Holds the channel count of the layer.*/
unsigned int c;
/** Holds the height of the layer.*/
unsigned int h;
/** Holds the width of the layer.*/
unsigned int w;
} NvDsInferDimsCHW;
/**
* Specifies the data type of a layer.
*/
typedef enum
{
/** Specifies FP32 format. */
FLOAT = 0,
/** Specifies FP16 format. */
HALF = 1,
/** Specifies INT8 format. */
INT8 = 2,
/** Specifies INT32 format. */
INT32 = 3
} NvDsInferDataType;
/**
* Holds information about one layer in the model.
*/
typedef struct
{
/** Holds the data type of the layer. */
NvDsInferDataType dataType;
/** Holds the dimensions of the layer. */
union {
NvDsInferDims inferDims;
NvDsInferDims dims _DS_DEPRECATED_("dims is deprecated. Use inferDims instead");
};
/** Holds the TensorRT binding index of the layer. */
int bindingIndex;
/** Holds the name of the layer. */
const char* layerName;
/** Holds a pointer to the buffer for the layer data. */
void *buffer;
/** Holds a Boolean; true if the layer is an input layer,
or false if an output layer. */
int isInput;
} NvDsInferLayerInfo;
/**
* Holds information about the model network.
*/
typedef struct
{
/** Holds the input width for the model. */
unsigned int width;
/** Holds the input height for the model. */
unsigned int height;
/** Holds the number of input channels for the model. */
unsigned int channels;
} NvDsInferNetworkInfo;
/**
* Sets values on a @ref NvDsInferDimsCHW structure from a @ref NvDsInferDims
* structure.
*/
#define getDimsCHWFromDims(dimsCHW,dims) \
do { \
(dimsCHW).c = (dims).d[0]; \
(dimsCHW).h = (dims).d[1]; \
(dimsCHW).w = (dims).d[2]; \
} while (0)
/**
* Holds information about one parsed object from a detector's output.
*/
typedef struct
{
/** Holds the ID of the class to which the object belongs. */
unsigned int classId;
/** Holds the horizontal offset of the bounding box shape for the object. */
float left;
/** Holds the vertical offset of the object's bounding box. */
float top;
/** Holds the width of the object's bounding box. */
float width;
/** Holds the height of the object's bounding box. */
float height;
/** Holds the object detection confidence level; must in the range
[0.0,1.0]. */
float detectionConfidence;
} NvDsInferObjectDetectionInfo;
/**
* A typedef defined to maintain backward compatibility.
*/
typedef NvDsInferObjectDetectionInfo NvDsInferParseObjectInfo;
/**
* Holds information about one parsed object and instance mask from a detector's output.
*/
typedef struct
{
/** Holds the ID of the class to which the object belongs. */
unsigned int classId;
/** Holds the horizontal offset of the bounding box shape for the object. */
float left;
/** Holds the vertical offset of the object's bounding box. */
float top;
/** Holds the width of the object's bounding box. */
float width;
/** Holds the height of the object's bounding box. */
float height;
/** Holds the object detection confidence level; must in the range
[0.0,1.0]. */
float detectionConfidence;
/** Holds object segment mask */
float *mask;
/** Holds width of mask */
unsigned int mask_width;
/** Holds height of mask */
unsigned int mask_height;
/** Holds size of mask in bytes*/
unsigned int mask_size;
} NvDsInferInstanceMaskInfo;
/**
* Holds information about one classified attribute.
*/
typedef struct
{
/** Holds the index of the attribute's label. This index corresponds to
the order of output layers specified in the @a outputCoverageLayerNames
vector during initialization. */
unsigned int attributeIndex;
/** Holds the the attribute's output value. */
unsigned int attributeValue;
/** Holds the attribute's confidence level. */
float attributeConfidence;
/** Holds a pointer to a string containing the attribute's label.
Memory for the string must not be freed. */
const char *attributeLabel;
} NvDsInferAttribute;
/**
* Enum for the status codes returned by NvDsInferContext.
*/
typedef enum {
/** NvDsInferContext operation succeeded. */
NVDSINFER_SUCCESS = 0,
/** Failed to configure the NvDsInferContext instance possibly due to an
* erroneous initialization property. */
NVDSINFER_CONFIG_FAILED,
/** Custom Library interface implementation failed. */
NVDSINFER_CUSTOM_LIB_FAILED,
/** Invalid parameters were supplied. */
NVDSINFER_INVALID_PARAMS,
/** Output parsing failed. */
NVDSINFER_OUTPUT_PARSING_FAILED,
/** CUDA error was encountered. */
NVDSINFER_CUDA_ERROR,
/** TensorRT interface failed. */
NVDSINFER_TENSORRT_ERROR,
/** Resource error was encountered. */
NVDSINFER_RESOURCE_ERROR,
/** TRT-IS error was encountered. */
NVDSINFER_TRTIS_ERROR,
/** Unknown error was encountered. */
NVDSINFER_UNKNOWN_ERROR
} NvDsInferStatus;
/**
* Enum for the log levels of NvDsInferContext.
*/
typedef enum {
NVDSINFER_LOG_ERROR = 0,
NVDSINFER_LOG_WARNING,
NVDSINFER_LOG_INFO,
NVDSINFER_LOG_DEBUG,
} NvDsInferLogLevel;
/**
* Get the string name for the status.
*
* @param[in] status An NvDsInferStatus value.
* @return String name for the status. Memory is owned by the function. Callers
* should not free the pointer.
*/
const char* NvDsInferStatus2Str(NvDsInferStatus status);
#ifdef __cplusplus
}
#endif
/* C++ data types */
#ifdef __cplusplus
/**
* Enum for selecting between minimum/optimal/maximum dimensions of a layer
* in case of dynamic shape network.
*/
typedef enum
{
kSELECTOR_MIN = 0,
kSELECTOR_OPT,
kSELECTOR_MAX,
kSELECTOR_SIZE
} NvDsInferProfileSelector;
/**
* Holds full dimensions (including batch size) for a layer.
*/
typedef struct
{
int batchSize = 0;
NvDsInferDims dims = {0};
} NvDsInferBatchDims;
/**
* Extended structure for bound layer information which additionally includes
* min/optimal/max full dimensions of a layer in case of dynamic shape.
*/
struct NvDsInferBatchDimsLayerInfo : NvDsInferLayerInfo
{
NvDsInferBatchDims profileDims[kSELECTOR_SIZE];
};
#endif
#endif
/** @} */

View File

@ -0,0 +1,852 @@
/**
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvdsinfer_context.h
* <b>NVIDIA DeepStream Inference Interface </b>
*
* @b Description: This file specifies the DeepStream inference interface API.
*/
/**
* @defgroup gstreamer_nvinfer_context Inference Interface API
*
* Defines the DeepStream inference interface API. In C++, defines the
* NvDsInferContext class.
*
* The DeepStream inference API "NvDsInfer" provides methods to initialize and
* deinitialize the inference engine, pre-process the input frames as required
* by the network, and parse the output from the raw tensor buffers.
*
* Both C and C++ interfaces are available, with the C interface being a simple
* wrapper over the C++ interface.
*
* You can create an opaque handle to an instance of the context required by
* the API by calling the factory function createNvDsInferContext() or
* NvDsInferContext_Create(). Both functions accept an instance of
* @ref NvDsInferContextInitParams to initialize the context.
* Both let you specify a logging
* callback to get detailed information about failures and warnings.
*
* Initialization parameters allow you to configure the network data type,
* network type (Detector, Classifier, or Other), preprocessing parameters
* (mean subtraction and normalization), model-related parameters like
* Caffe/Uff/Onnx model file paths, output layer names, etc.
*
* Batches of frames can be queued for inferencing, using
* NvDsInferContext::queueInputBatch() or NvDsInferContext_QueueInputBatch().
* The input frame memories must be accessible
* to the GPU device configured during initialization. You can provide
* an asynchronous callback function to return the input buffers to the caller
* as soon as the input is consumed.
*
* Inference output can be dequeued using NvDsInferContext::dequeueOutputBatch()
* or NvDsInferContext_DequeueOutputBatch(). The order of dequeued outputs
* corresponds
* to the input queueing order. In case of failure, the output of the batch is
* lost. The dequeued output must be released back to the context using
* NvDsInferContext::releaseBatchOutput() or
* NvDsInferContext_ReleaseBatchOutput()
* to free the associated memory and return the output layer buffers for reuse
* by the context.
*
* Detectors output an array of detected objects for each frame in the batch.
* Classifiers classify entire frames and output an array of attributes for
* each frame. Segmentation classifies each pixel in the frame. A special
* network type (Other) has been provided whose output layers are not
* parsed. The caller can parse the device and host output layer buffers.
* You can also use this network type
* with the Gst-infer plugin to flow the output buffers as metadata.
*
* Other methods and functions get parsed labels from a label's
* file and properties of all layers bound by the inference engine.
*
* You can extend the Gst-nvinfer API using the custom method implementations.
* Refer to the Custom Method Implementations section for more details.
*
* @ingroup NvDsInferApi
* @{
*/
/**
* @name NvDsInferContext DeepStream Inference Interface API
*/
#ifndef __NVDSINFER_CONTEXT_H__
#define __NVDSINFER_CONTEXT_H__
#include "nvdsinfer.h"
/** @name NvDsInferContext API common types and functions.
* This section describes the common types and functions for both the C and C++
* interfaces for the NvDsInferContext class.
*/
/** @{ */
/** Maximum length of a file path parameter. */
#define _PATH_MAX 4096
/** Defines the maximum number of channels supported by the API
for image input layers. */
#define _MAX_CHANNELS 4
/** Defines the maximum length of string parameters. */
#define _MAX_STR_LENGTH 1024
/** Defines the maximum batch size supported by nvdsinfer. */
#define NVDSINFER_MAX_BATCH_SIZE 1024
/** Defines the minimum number of sets of output buffers that must be
allocated. */
#define NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE 2
/**
* Defines internal data formats used by the inference engine.
*/
typedef enum
{
NvDsInferNetworkMode_FP32,
NvDsInferNetworkMode_INT8,
NvDsInferNetworkMode_FP16
} NvDsInferNetworkMode;
/**
* Defines network types.
*/
typedef enum
{
/** Specifies a detector. Detectors find objects and their coordinates,
and their classes in an input frame. */
NvDsInferNetworkType_Detector,
/** Specifies a classifier. Classifiers classify an entire frame into
one of several classes. */
NvDsInferNetworkType_Classifier,
/** Specifies a segmentation network. A segmentation network classifies
each pixel into one of several classes. */
NvDsInferNetworkType_Segmentation,
/** Specifies a instance segmentation network. A instance segmentation
network detects objects, bounding box and mask for objects, and
their classes in an input frame */
NvDsInferNetworkType_InstanceSegmentation,
/** Specifies other. Output layers of an "other" network are not parsed by
NvDsInferContext. This is useful for networks that produce custom output.
Output can be parsed by the NvDsInferContext client or can be combined
with the Gst-nvinfer feature to flow output tensors as metadata. */
NvDsInferNetworkType_Other = 100
} NvDsInferNetworkType;
/**
* Defines color formats.
*/
typedef enum
{
/** Specifies 24-bit interleaved R-G-B format. */
NvDsInferFormat_RGB,
/** Specifies 24-bit interleaved B-G-R format. */
NvDsInferFormat_BGR,
/** Specifies 8-bit Luma format. */
NvDsInferFormat_GRAY,
/** Specifies 32-bit interleaved R-G-B-A format. */
NvDsInferFormat_RGBA,
/** Specifies 32-bit interleaved B-G-R-x format. */
NvDsInferFormat_BGRx,
/** NCHW planar */
NvDsInferFormat_Tensor,
NvDsInferFormat_Unknown = 0xFFFFFFFF,
} NvDsInferFormat;
/**
* Defines UFF input layer orders.
*/
typedef enum {
NvDsInferTensorOrder_kNCHW,
NvDsInferTensorOrder_kNHWC,
NvDsInferTensorOrder_kNC,
NvDsInferUffOrder_kNCHW _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNCHW instead") = NvDsInferTensorOrder_kNCHW,
NvDsInferUffOrder_kNHWC _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNCWH instead") = NvDsInferTensorOrder_kNHWC,
NvDsInferUffOrder_kNC _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNC instead") = NvDsInferTensorOrder_kNC
} NvDsInferTensorOrder;
#define NvDsInferUffOrder _Pragma \
("GCC warning \"'NvDsInferUffOrder' macro is deprecated. Use NvDsInferTensorOrder instead.\"") \
NvDsInferTensorOrder
/**
* Holds detection and bounding box grouping parameters.
*/
typedef struct
{
/** Holds the bounding box detection threshold to be applied prior
* to clustering operation. */
union {
float threshold _DS_DEPRECATED_("Use preclusterThreshold instead.");
float preClusterThreshold;
};
/** Hold the bounding box detection threshold to be applied post
* clustering operation. */
float postClusterThreshold;
/** Holds the epsilon to control merging of overlapping boxes. Refer to OpenCV
* groupRectangles and DBSCAN documentation for more information on epsilon. */
float eps;
/** Holds the minimum number of boxes in a cluster to be considered
an object during grouping using DBSCAN. */
int minBoxes;
/** Holds the minimum number boxes in a cluster to be considered
an object during grouping using OpenCV groupRectangles. */
int groupThreshold;
/** Minimum score in a cluster for the cluster to be considered an object
during grouping. Different clustering may cause the algorithm
to use different scores. */
float minScore;
/** IOU threshold to be used with NMS mode of clustering. */
float nmsIOUThreshold;
/** Number of objects with objects to be filtered in the decensding order
* of probability */
int topK;
} NvDsInferDetectionParams;
/**
* Enum for clustering mode for detectors
*/
typedef enum
{
NVDSINFER_CLUSTER_GROUP_RECTANGLES = 0,
NVDSINFER_CLUSTER_DBSCAN,
NVDSINFER_CLUSTER_NMS,
NVDSINFER_CLUSTER_DBSCAN_NMS_HYBRID,
NVDSINFER_CLUSTER_NONE
} NvDsInferClusterMode;
/**
* Holds the initialization parameters required for the NvDsInferContext interface.
*/
typedef struct _NvDsInferContextInitParams
{
/** Holds a unique identifier for the instance. This can be used
to identify the instance that is generating log and error messages. */
unsigned int uniqueID;
/** Holds an internal data format specifier used by the inference engine. */
NvDsInferNetworkMode networkMode;
/** Holds the pathname of the prototxt file. */
char protoFilePath[_PATH_MAX];
/** Holds the pathname of the caffemodel file. */
char modelFilePath[_PATH_MAX];
/** Holds the pathname of the UFF model file. */
char uffFilePath[_PATH_MAX];
/** Holds the pathname of the ONNX model file. */
char onnxFilePath[_PATH_MAX];
/** Holds the pathname of the TLT encoded model file. */
char tltEncodedModelFilePath[_PATH_MAX];
/** Holds the pathname of the INT8 calibration file.
Required only when using INT8 mode. */
char int8CalibrationFilePath[_PATH_MAX];
union {
/** Holds the input dimensions for the model. */
NvDsInferDimsCHW inputDims;
/** Holds the input dimensions for the UFF model. */
NvDsInferDimsCHW uffDimsCHW;
} _DS_DEPRECATED_("Use inferInputDims instead.");
/** Holds the original input order for the UFF model. */
NvDsInferTensorOrder uffInputOrder;
/** Holds the name of the input layer for the UFF model. */
char uffInputBlobName[_MAX_STR_LENGTH];
/** Holds the string key for decoding the TLT encoded model. */
char tltModelKey[_MAX_STR_LENGTH];
/** Holds the pathname of the serialized model engine file.
When using the model engine file, other parameters required for creating
the model engine are ignored. */
char modelEngineFilePath[_PATH_MAX];
/** Holds the maximum number of frames to be inferred together in a batch.
The number of input frames in a batch must be
less than or equal to this. */
unsigned int maxBatchSize;
/** Holds the pathname of the labels file containing strings for the class
labels. The labels file is optional. The file format is described in the
custom models section of the DeepStream SDK documentation. */
char labelsFilePath[_PATH_MAX];
/** Holds the pathname of the mean image file (PPM format). File resolution
must be equal to the network input resolution. */
char meanImageFilePath[_PATH_MAX];
/** Holds the normalization factor with which to scale the input pixels. */
float networkScaleFactor;
/** Holds the network input format. */
NvDsInferFormat networkInputFormat;
/** Holds the per-channel offsets for mean subtraction. This is
an alternative to the mean image file. The number of offsets in the array
must be equal to the number of input channels. */
float offsets[_MAX_CHANNELS];
unsigned int numOffsets;
/** Holds the network type. */
NvDsInferNetworkType networkType;
/** Holds a Boolean; true if DBScan is to be used for object clustering,
or false if OpenCV groupRectangles is to be used. */
_DS_DEPRECATED_("Use NvDsInferClusterMode instead")
int useDBScan;
/** Holds the number of classes detected by a detector network. */
unsigned int numDetectedClasses;
/** Holds per-class detection parameters. The array's size must be equal
to @a numDetectedClasses. */
NvDsInferDetectionParams *perClassDetectionParams;
/** Holds the minimum confidence threshold for the classifier to consider
a label valid. */
float classifierThreshold;
float segmentationThreshold;
/** Holds a pointer to an array of pointers to output layer names. */
char ** outputLayerNames;
/** Holds the number of output layer names. */
unsigned int numOutputLayers;
/** Holds the pathname of the library containing custom methods
required to support the network. */
char customLibPath[_PATH_MAX];
/** Holds the name of the custom bounding box function
in the custom library. */
char customBBoxParseFuncName[_MAX_STR_LENGTH];
/** Name of the custom classifier attribute parsing function in the custom
* library. */
char customClassifierParseFuncName[_MAX_STR_LENGTH];
/** Holds a Boolean; true if the input layer contents are to be copied to
host memory for access by the application. */
int copyInputToHostBuffers;
/** Holds the ID of the GPU which is to run the inference. */
unsigned int gpuID;
/** Holds a Boolean; true if DLA is to be used. */
int useDLA;
/** Holds the ID of the DLA core to use. */
int dlaCore;
/** Holds the number of sets of output buffers (host and device)
to be allocated. */
unsigned int outputBufferPoolSize;
/** Holds the pathname of the configuration file
for custom network creation. This can be used to store custom properties
required by the custom network creation function. */
char customNetworkConfigFilePath[_PATH_MAX];
/** Name of the custom engine creation function in the custom library. */
char customEngineCreateFuncName[_MAX_STR_LENGTH];
/** For model parsers supporting both implicit batch dim and full dims,
* prefer to use implicit batch dim. By default, full dims network mode is
* used. */
int forceImplicitBatchDimension;
/** Max workspace size (unit MB) that will be used as tensorrt build
* settings for cuda engine.
*/
unsigned int workspaceSize;
/** Inference input dimensions for runtime engine */
NvDsInferDimsCHW inferInputDims;
/** Holds the type of clustering mode */
NvDsInferClusterMode clusterMode;
/** Holds the name of the bounding box and instance mask parse function
in the custom library. */
char customBBoxInstanceMaskParseFuncName[_MAX_STR_LENGTH];
/** Can be used to specify the format and datatype for bound output layers.
* For each layer specified the format is
* "<layer-name>:<data-type>:<format>" */
char ** outputIOFormats;
/** Holds number of output IO formats specified. */
unsigned int numOutputIOFormats;
/**Can be used to specify the device type and inference precision of layers.
* For each layer specified the format is
* "<layer-name>:<device-type>:<precision>" */
char ** layerDevicePrecisions;
/** Holds number of layer device precisions specified */
unsigned int numLayerDevicePrecisions;
} NvDsInferContextInitParams;
/**
* Defines a callback function type for asynchronously returning
* the input client buffers to the NvDsInferContext client.
*
* @param[in] data An opaque pointer provided to the input queueing function
* through NvDsInferContextBatchInput.
*/
typedef void (* NvDsInferContextReturnInputAsyncFunc) (void *data);
/**
* Holds information about one batch to be inferred.
*/
typedef struct
{
/** Holds a pointer to an array of pointers to input frame buffers.
The size of the array must be at least @a numInputFrames. */
void** inputFrames;
/** Holds the number of input frames, i.e. the size of the batch. */
unsigned int numInputFrames;
/** Holds the format of the frame contents. */
NvDsInferFormat inputFormat;
/** Holds the pitch of the input frames, in bytes. */
unsigned int inputPitch;
/** Holds a callback for returning the input buffers to the client. */
NvDsInferContextReturnInputAsyncFunc returnInputFunc;
/** A pointer to the data to be supplied with the callback in
@a returnInputFunc. */
void *returnFuncData;
} NvDsInferContextBatchInput;
/**
* Holds information about one detected object.
*/
typedef struct
{
/** Holds the object's offset from the left boundary of the frame. */
float left;
/** Holds the object's offset from the top boundary of the frame. */
float top;
/** Holds the object's width. */
float width;
/** Holds the object's height. */
float height;
/** Holds the index for the object's class. */
int classIndex;
/** Holds a pointer to a string containing a label for the object. */
char *label;
/* confidence score of the detected object. */
float confidence;
/* Instance mask information for the object. */
float *mask;
/** Holds width of mask */
unsigned int mask_width;
/** Holds height of mask */
unsigned int mask_height;
/** Holds size of mask in bytes*/
unsigned int mask_size;
} NvDsInferObject;
/**
* Holds information on all objects detected by a detector network in one
* frame.
*/
typedef struct
{
/** Holds a pointer to an array of objects. */
NvDsInferObject *objects;
/** Holds the number of objects in @a objects. */
unsigned int numObjects;
} NvDsInferDetectionOutput;
/**
* Holds information on all attributes classifed by a classifier network for
* one frame.
*/
typedef struct
{
/** Holds a pointer to an array of attributes. There may be more than
one attribute, depending on the number of output coverage layers
(multi-label classifiers). */
NvDsInferAttribute *attributes;
/** Holds the size of the @a attributes array. */
unsigned int numAttributes;
/** Holds a pointer to a string containing a label for the
classified output. */
char *label;
} NvDsInferClassificationOutput;
/**
* Holds information parsed from segmentation network output for one frame.
*/
typedef struct
{
/** Holds the width of the output. Same as network width. */
unsigned int width;
/** Holds the height of the output. Same as network height. */
unsigned int height;
/** Holds the number of classes supported by the network. */
unsigned int classes;
/** Holds a pointer to an array for the 2D pixel class map.
The output for pixel (x,y) is at index (y*width+x). */
int *class_map;
/** Holds a pointer to an array containing raw probabilities.
The probability for class @a c and pixel (x,y) is at index
(c*width*height + y*width+x). */
float *class_probability_map;
} NvDsInferSegmentationOutput;
/**
* Holds the information inferred by the network on one frame.
*/
typedef struct
{
/** Holds an output type indicating the valid member in the union
of @a detectionOutput, @a classificationOutput, and @a segmentationOutput.
This is basically the network type. */
NvDsInferNetworkType outputType;
/** Holds a union of supported outputs. The valid member is determined by
@a outputType. */
union
{
/** Holds detector output. Valid when @a outputType is
@ref NvDsInferNetworkType_Detector. */
NvDsInferDetectionOutput detectionOutput;
/** Holds classifier output. Valid when @a outputType is
@ref NvDsInferNetworkType_Classifier. */
NvDsInferClassificationOutput classificationOutput;
/** Holds classifier output. Valid when @a outputType is
@ref NvDsInferNetworkType_Classifier. */
NvDsInferSegmentationOutput segmentationOutput;
};
} NvDsInferFrameOutput;
/**
* Holds the output for all of the frames in a batch (an array of frame),
* and related buffer information.
*/
typedef struct
{
/** Holds a pointer to an array of outputs for each frame in the batch. */
NvDsInferFrameOutput *frames;
/** Holds the number of elements in @a frames. */
unsigned int numFrames;
/** Holds a pointer to an array of pointers to output device buffers
for this batch. The array elements are set by */
void **outputDeviceBuffers;
/** Holds the number of elements in @a *outputDeviceBuffers. */
unsigned int numOutputDeviceBuffers;
/** Holds a pointer to an array of pointers to host buffers for this batch.
The array elements are set by */
void **hostBuffers;
/** Holds the number of elements in hostBuffers. */
unsigned int numHostBuffers;
/** Holds a private context pointer for the set of output buffers. */
void* priv;
} NvDsInferContextBatchOutput;
/** An opaque pointer type to be used as a handle for a context instance. */
typedef struct INvDsInferContext * NvDsInferContextHandle;
/**
* @brief Type declaration for a logging callback.
*
* The callaback log NvDsInferContext messages.
*
* @param[in] handle The handle of the NvDsInferContext instance that
* generated the log.
* @param[in] uniqueID Unique ID of the NvDsInferContext instance that
* generated the log.
* @param[in] logLevel Level of the log.
* @param[in] funcName A pointer to the name of the function that
* generated the log.
* @param[in] logMessage A pointer to the log message string.
* @param[in] userCtx An opaque pointer to the user context, supplied
* when creating the NvDsInferContext instance.
*/
typedef void (*NvDsInferContextLoggingFunc)(NvDsInferContextHandle handle,
unsigned int uniqueID, NvDsInferLogLevel logLevel, const char* logMessage,
void* userCtx);
#ifdef __cplusplus
extern "C" {
#endif
/**
* Resets a context parameter structure to default values.
*
* @param[in] initParams A pointer to a context parameter structure.
*/
void NvDsInferContext_ResetInitParams (NvDsInferContextInitParams *initParams);
/**
* Gets the string name of the status.
*
* @param[in] status An inference status code.
* @return A pointer to a string containing the status's name, or NULL if
* the status is unrecognized. Memory is owned by the function; the caller
* may not free it.
*/
_DS_DEPRECATED_("NvDsInferContext_GetStatusName is deprecated. Use NvDsInferStatus2Str instead")
const char * NvDsInferContext_GetStatusName (NvDsInferStatus status);
#ifdef __cplusplus
}
#endif
/** @} */
/**
* @name NvDsInferContext API C++-interface
* This section describes the C++ interface for the NvDsInferContext class.
* @{
*/
#ifdef __cplusplus
#include <string>
#include <vector>
/**
* Holds the DeepStream inference interface class.
*/
struct INvDsInferContext
{
public:
/**
* Queues a batch of input frames for preprocessing and inferencing.
* The input
* frames must be in packed RGB/RGBA/GRAY UINT8 format with the same
* resolution as the network input or preprocessed inputs so that it can be
* fed directly to the inference engine. The frame memories should be
* in CUDA device memory allocated on the same device that the
* NvDsInferContext interface is configured with.
*
* The batch size must not exceed the maximum batch size requested during
* initialization.
*
* @param[in] batchInput Reference to a batch input structure.
* @return NVDSINFER_SUCCESS if preprocessing and queueing succeeded, or
* an error status otherwise.
*/
virtual NvDsInferStatus queueInputBatch(NvDsInferContextBatchInput &batchInput) = 0;
/**
* Dequeues output for a batch of frames. The batch dequeuing order is same
* as the input queuing order. The associated memory must be freed and
* output buffers must be released back to the context using
* releaseBatchOutput() so that the buffers can be reused.
*
* @param[out] batchOutput Reference to the batch output structure
* to which the output is to be appended.
* @return NVDSINFER_SUCCESS if dequeueing succeeded, or an error status
* otherwise.
*/
virtual NvDsInferStatus dequeueOutputBatch(NvDsInferContextBatchOutput &batchOutput) = 0;
/**
* Frees the memory associated with the batch output and releases the set of
* output buffers back to the context for reuse.
*
* @param[in] batchOutput Reference to a batch output structure
* which was filled by dequeueOutputBatch().
*/
virtual void releaseBatchOutput(NvDsInferContextBatchOutput &batchOutput) = 0;
/**
* Fills the input vector with information on all bound layers of the
* inference engine.
*
* @param[in,out] layersInfo Reference to a vector of layer info
* structures to be filled by the function.
*/
virtual void fillLayersInfo(std::vector<NvDsInferLayerInfo> &layersInfo) = 0;
/**
* Gets network input information.
*
* @param[in,out] networkInfo Reference to a network info structure.
*/
virtual void getNetworkInfo(NvDsInferNetworkInfo &networkInfo) = 0;
/**
* \brief Gets the label strings parsed from the labels file.
*
* See the DeepStream NvInfer documentation for the format of the
* labels file for detectors and classifiers.
*
* @return Reference to a vector of vector of string labels.
*/
virtual const std::vector< std::vector<std::string> >& getLabels() = 0;
/**
* Deinitialize the inference engine and frees resources it used.
*/
virtual void destroy() = 0;
/** Destructor for a C++ object. */
virtual ~INvDsInferContext() {}
};
/**
* Creates a new instance of NvDsInferContext initialized using supplied
* parameters.
*
* @param[out] handle A pointer to a NvDsInferContext handle.
* @param[in] initParams A reference to parameters to be used to initialize
* the context.
* @param[in] userCtx A pointer to an opaque user context with callbacks
* generated by the NvDsInferContext instance.
* @param[in] logFunc A log callback function for the instance.
* @return NVDSINFER_SUCCESS if the instance was created successfully,
* or an error status otherwise.
*/
NvDsInferStatus createNvDsInferContext(NvDsInferContextHandle *handle,
NvDsInferContextInitParams &initParams,
void *userCtx = nullptr,
NvDsInferContextLoggingFunc logFunc = nullptr);
#endif
/** @} */
/**
* @name NvDsInferContext API C-interface
* This section describes the C interface for the NvDsInferContext class.
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Creates a new NvDsInferContext object with specified
* initialization parameters.
*
* @param[out] handle A pointer to an NvDsInferContext handle.
* @param[in] initParams A pointer to a parameter structure to be used to
* initialize the context.
* @param[in] userCtx A pointer to an opaque user context, with callbacks,
* generated by the NvDsInferContext instance.
* @param[in] logFunc A log callback for the instance.
* @return NVDSINFER_SUCCESS if creation was successful, or an error status
* otherwise.
*/
NvDsInferStatus NvDsInferContext_Create(NvDsInferContextHandle *handle,
NvDsInferContextInitParams *initParams, void *userCtx,
NvDsInferContextLoggingFunc logFunc);
/**
* Destroys an NvDsInferContext instance and releases its resources.
*
* @param[in] handle The handle to the NvDsInferContext instance to be
* destroyed.
*/
void NvDsInferContext_Destroy (NvDsInferContextHandle handle);
/**
* \brief Queues a batch of input frames for preprocessing and inferencing.
*
* @see NvDsInferContext::queueInputBatch() for details.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] batchInput A reference to a batch input structure.
* @return NVDSINFER_SUCCESS if preprocessing and queueing were successful, or
* an error status otherwise.
*/
NvDsInferStatus NvDsInferContext_QueueInputBatch(NvDsInferContextHandle handle,
NvDsInferContextBatchInput *batchInput);
/**
* Dequeues output for a batch of frames.
*
* @see NvDsInferContext::dequeueOutputBatch() for details.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] batchOutput A reference to the batch output structure
* to which output is to be appended.
* @return NVDSINFER_SUCCESS if dequeueing was successful, or an error status
* otherwise.
*/
NvDsInferStatus NvDsInferContext_DequeueOutputBatch(NvDsInferContextHandle handle,
NvDsInferContextBatchOutput *batchOutput);
/**
* Frees the memory associated with the batch output and releases the set of
* host buffers back to the context for reuse.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] batchOutput A pointer to an NvDsInferContextBatchOutput
* structure filled by
* NvDsInferContext_DequeueOutputBatch().
*/
void NvDsInferContext_ReleaseBatchOutput(NvDsInferContextHandle handle,
NvDsInferContextBatchOutput *batchOutput);
/**
* Gets network input information.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] networkInfo A pointer to an NvDsInferNetworkInfo structure.
*/
void NvDsInferContext_GetNetworkInfo(NvDsInferContextHandle handle,
NvDsInferNetworkInfo *networkInfo);
/**
* Gets the number of the bound layers of the inference engine in an
* NvDsInferContext instance.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @return The number of bound layers of the inference engine.
*/
unsigned int NvDsInferContext_GetNumLayersInfo(NvDsInferContextHandle handle);
/**
* Fills an input vector with information about all of the bound layers of the
* inference engine in an NvDsInferContext instance.
* The size of the array must be at least the value returned by
* NvDsInferContext_GetNumLayersInfo().
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] layersInfo A pointer to an array of NvDsInferLayerInfo
structures to be filled by the function.
*/
void NvDsInferContext_FillLayersInfo(NvDsInferContextHandle handle,
NvDsInferLayerInfo *layersInfo);
/**
* Gets the string label associated with the class ID for detectors and the
* attribute ID and attribute value for classifiers. The string is owned
* by the context; the caller may not modify or free it.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] id Class ID for detectors, or attribute ID for classifiers.
* @param[in] value Attribute value for classifiers; set to 0 for detectors.
* @return A pointer to a string label. The memory is owned by the context.
*/
const char* NvDsInferContext_GetLabel(NvDsInferContextHandle handle,
unsigned int id, unsigned int value);
#ifdef __cplusplus
}
#endif
/** @} */
#endif
/** @} */

View File

@ -0,0 +1,466 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvdsinfer_custom_impl.h
* <b>Defines specification for Custom Method Implementations for custom models </b>
*
* @b Description: This file defines the API that
* implements custom methods required by the GStreamer Gst-nvinfer plugin to
* infer using custom models.
*
* All custom functionality must be implemented in an independent shared
* library. The library is dynamically loaded (using %dlopen()) by the plugin.
* It implements custom methods which are called as required. The custom library
* can be specified in the Gst-nvinfer configuration file by the
* `custom-lib-name` property.
*
* @section customparsingfunc Custom Detector Output Parsing Function
*
* This section describes the custom bounding box parsing function for custom
* detector models.
*
* The custom parsing function should be of the type `NvDsInferParseCustomFunc`.
* The custom parsing function can be specified in the Gst-nvinfer
* configuration file by the properties `parse-bbox-func-name`
* (name of the parsing function) and `custom-lib-name`. `parse-func must be
* set to 0.
*
* The Gst-nvinfer plugin loads the library and looks for the custom parsing
* function symbol. The function is called after each inference call is
* executed.
*
* You can call the macro CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE() after
* defining the function to validate the function definition.
*
*
* @section iplugininterface TensorRT Plugin Factory interface for DeepStream
*
* For the Caffe model, the library must implement
* NvDsInferPluginFactoryCaffeGet().
* During model parsing, "nvinfer" looks for that function' symbol in the
* custom library. If symbol is found, the plugin calls that function to get a
* pointer to the PluginFactory instance required for parsing.
*
* If the IPluginFactory is needed during deserialization of CUDA engines,
* the library must implement %NvDsInferPluginFactoryRuntimeGet().
*
* Each Get function has a corresponding Destroy function which is
* called, if defined, when the returned PluginFactory is to be destroyed.
*
* A library that implements this interface must use the same function names
* as the header file. Gst-nvinfer dynamically loads the library and
* looks for the same symbol names.
*
* See the FasterRCNN sample provided with the SDK for a sample implementation
* of the interface.
*
*
* @section inputlayerinitialization Input layer initialization
*
* By default, Gst-nvinfer works with networks having only one input layer
* for video frames. If a network has more than one input layer, the custom
* library can implement the @ref NvDsInferInitializeInputLayers interface
* for initializing the other input layers. Gst-nvinfer assumes that the other
* input layers have static input information, and hence this method is called
* only once before the first inference.
*
* See the FasterRCNN sample provided with the SDK for a sample implementation
* of the interface.
*
*
* @section customnetworkbuild Interface for building Custom Networks
*
* The "nvinfer" plugin supports two interfaces for to create and build
* custom networks not directly supported by nvinfer.
* - IModelParser / NvDsInferCreateModelParser interface
* - NvDsInferEngineCreateCustomFunc interface
*
* In case of IModelParser / NvDsInferCreateModelParser interface, the custom
* library must derive and implement IModelParser, an interface to parse
* the custom networks and build the TensorRT network (nvinfer1::INetworkDefinition).
* The "nvinfer" plugin will then use this TensorRT network to build the
* inference engine. The plugin will look for the symbol "NvDsInferCreateModelParser"
* in the library and call the function to get an instance of the model parser
* implementation from the library.
*
* Alternatively, you can use the custom engine creation function to build
* networks that are not natively supported by nvinfer. The function must be
* of the type @ref NvDsInferEngineCreateCustomFunc. You can specify it
* in the nvinfer element configuration file using
* the property `engine-create-func-name` (name of the engine creation function)
* in addition to `custom-lib-name`.
*
* The nvinfer plugin loads the custom library dynamically and looks for the
* engine creation symbol. The function is called only once during
* initialization of the nvinfer plugin. The function must build and return
* the `CudaEngine` interface using the supplied nvinfer1::IBuilder instance.
* The builder instance is already configured with properties like
* MaxBatchSize, MaxWorkspaceSize, INT8/FP16 precision parameters, etc.
* The builder instance is managed by nvinfer, and the function may not destroy
* it.
*
* You can call the macro CHECK_CUSTOM_ENGINE_CREATE_FUNC_PROTOTYPE() after
* the function definition to validate the function definition.
*
* Refer to the Yolo sample provided with the SDK for sample implementation of
* both the interfaces.
*/
#ifndef _NVDSINFER_CUSTOM_IMPL_H_
#define _NVDSINFER_CUSTOM_IMPL_H_
#include <string>
#include <vector>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include "NvCaffeParser.h"
#include "NvUffParser.h"
#pragma GCC diagnostic pop
#include "nvdsinfer.h"
/*
* C++ interfaces
*/
#ifdef __cplusplus
/**
* A model parser interface to translate user-defined model to a TensorRT network.
*
* Users can parse any custom model derived from this inferface. Instance would
* be created by a call to @fn NvDsInferCreateModelParser.
*
* Implementations should make sure that all member functions are overriden.
* This parser will be deleted after the engine (nvinfer1::ICudaEngine) is built.
*/
class IModelParser
{
public:
IModelParser() = default;
/**
* Destructor, make sure all external resource would be released here. */
virtual ~IModelParser() = default;
/**
* Function interface for parsing custom model and building tensorrt
* network.
*
* @param[in, out] network NvDsInfer will create the @a network and
* implementation can setup this network layer by layer.
* @return NvDsInferStatus indicating if model parsing was sucessful.
*/
virtual NvDsInferStatus parseModel(
nvinfer1::INetworkDefinition& network) = 0;
/**
* Function interface to check if parser can support full-dimensions.
*/
virtual bool hasFullDimsSupported() const = 0;
/**
* Function interface to get the new model name which is to be used for
* constructing the serialized engine file path.
*/
virtual const char* getModelName() const = 0;
};
#endif
/*
* C interfaces
*/
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds the detection parameters required for parsing objects.
*/
typedef struct
{
/** Holds the number of classes requested to be parsed, starting with
class ID 0. Parsing functions may only output objects with
class ID less than this value. */
unsigned int numClassesConfigured;
/** Holds a per-class vector of detection confidence thresholds
to be applied prior to clustering operation.
Parsing functions may only output an object with detection confidence
greater than or equal to the vector element indexed by the object's
class ID. */
std::vector<float> perClassPreclusterThreshold;
/* Per class threshold to be applied post clustering operation */
std::vector<float> perClassPostclusterThreshold;
/** Deprecated. Use perClassPreclusterThreshold instead. Reference to
* maintain backward compatibility. */
std::vector<float> &perClassThreshold = perClassPreclusterThreshold;
} NvDsInferParseDetectionParams;
/**
* Type definition for the custom bounding box parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the output
* layers of the model.
* @param[in] networkInfo Network information.
* @param[in] detectionParams Detection parameters required for parsing
* objects.
* @param[out] objectList A reference to a vector in which the function
* is to add parsed objects.
*/
typedef bool (* NvDsInferParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferObjectDetectionInfo> &objectList);
/**
* Validates a custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
NvDsInferParseDetectionParams const &detectionParams, \
std::vector<NvDsInferObjectDetectionInfo> &objectList);
/**
* Type definition for the custom bounding box and instance mask parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the output
* layers of the model.
* @param[in] networkInfo Network information.
* @param[in] detectionParams Detection parameters required for parsing
* objects.
* @param[out] objectList A reference to a vector in which the function
* is to add parsed objects and instance mask.
*/
typedef bool (* NvDsInferInstanceMaskParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferInstanceMaskInfo> &objectList);
/**
* Validates a custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_INSTANCE_MASK_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferInstanceMaskParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
NvDsInferParseDetectionParams const &detectionParams, \
std::vector<NvDsInferInstanceMaskInfo> &objectList);
/**
* Type definition for the custom classifier output parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the
* output layers of the model.
* @param[in] networkInfo Network information.
* @param[in] classifierThreshold
Classification confidence threshold.
* @param[out] attrList A reference to a vector in which the function
* is to add the parsed attributes.
* @param[out] descString A reference to a string object in which the
* function may place a description string.
*/
typedef bool (* NvDsInferClassiferParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
float classifierThreshold,
std::vector<NvDsInferAttribute> &attrList,
std::string &descString);
/**
* Validates the classifier custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_CLASSIFIER_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferClassiferParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
float classifierThreshold, \
std::vector<NvDsInferAttribute> &attrList, \
std::string &descString);
typedef struct _NvDsInferContextInitParams NvDsInferContextInitParams;
/**
* Type definition for functions that build and return a @c CudaEngine for
* custom models.
*
* @deprecated The NvDsInferCudaEngineGet interface is replaced by
* NvDsInferEngineCreateCustomFunc().
*
* The implementation of this interface must build the
* nvinfer1::ICudaEngine instance using nvinfer1::IBuilder instance
* @a builder. The builder instance is managed by the caller;
* the implementation must not destroy it.
*
* Properties like @a MaxBatchSize, @a MaxWorkspaceSize, INT8/FP16
* precision parameters, and DLA parameters (if applicable) are set on the
* builder and builderConfig before it is passed to the interface. The
* corresponding Get functions of the nvinfer1::IBuilder and
* nvinfer1::IBuilderConfig interface can be used to get the property values.
*
* The implementation must make sure not to reduce the @a MaxBatchSize of the
* returned @c CudaEngine.
*
* @param[in] builder An nvinfer1::IBuilder instance.
* @param[in] builderConfig A nvinfer1::IBuilderConfig instance.
* @param[in] initParams A pointer to the structure to be used for
* initializing the NvDsInferContext instance.
* @param[in] dataType Data precision.
* @param[out] cudaEngine A pointer to a location where the function is to
* store a reference to the nvinfer1::ICudaEngine
* instance it has built.
* @return True if the engine build was successful, or false otherwise.
*/
typedef bool (* NvDsInferEngineCreateCustomFunc) (
nvinfer1::IBuilder * const builder, nvinfer1::IBuilderConfig * const builderConfig,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine);
/**
* A macro that validates a custom engine creator function definition.
* Call this macro after the function is defined.
*/
#define CHECK_CUSTOM_ENGINE_CREATE_FUNC_PROTOTYPE(customEngineCreateFunc) \
static void checkFunc_ ## customEngineCreateFunc (NvDsInferEngineCreateCustomFunc = customEngineCreateFunc) \
{ checkFunc_ ## customEngineCreateFunc(); }; \
extern "C" bool customEngineCreateFunc ( \
nvinfer1::IBuilder * const builder, \
nvinfer1::IBuilderConfig * const builderConfig, \
const NvDsInferContextInitParams * const initParams, \
nvinfer1::DataType dataType, \
nvinfer1::ICudaEngine *& cudaEngine);
/**
* Specifies the type of the Plugin Factory.
*/
typedef enum
{
/** Specifies nvcaffeparser1::IPluginFactoryV2. Used only for Caffe models. */
PLUGIN_FACTORY_V2 = 2
} NvDsInferPluginFactoryType;
/**
* Holds a pointer to a heap-allocated Plugin Factory object required during
* Caffe model parsing.
*/
typedef union
{
nvcaffeparser1::IPluginFactoryV2 *pluginFactoryV2;
} NvDsInferPluginFactoryCaffe;
/**
* Gets a new instance of a Plugin Factory interface to be used
* during parsing of Caffe models. The function must set the correct @a type and
* the correct field in the @a pluginFactory union, based on the type of the
* Plugin Factory, (i.e. one of @a pluginFactory, @a pluginFactoryExt, or
* @a pluginFactoryV2).
*
* @param[out] pluginFactory A reference to the union that contains
* a pointer to the Plugin Factory object.
* @param[out] type Specifies the type of @a pluginFactory, i.e.
* which member the @a pluginFactory union
* is valid.
* @return True if the Plugin Factory was created successfully, or false
* otherwise.
*/
bool NvDsInferPluginFactoryCaffeGet (NvDsInferPluginFactoryCaffe &pluginFactory,
NvDsInferPluginFactoryType &type);
/**
* Destroys a Plugin Factory instance created by
* NvDsInferPluginFactoryCaffeGet().
*
* @param[in] pluginFactory A reference to the union that contains a
* pointer to the Plugin Factory instance returned
* by NvDsInferPluginFactoryCaffeGet().
*/
void NvDsInferPluginFactoryCaffeDestroy (NvDsInferPluginFactoryCaffe &pluginFactory);
/**
* Returns a new instance of a Plugin Factory interface to be used
* during parsing deserialization of CUDA engines.
*
* @param[out] pluginFactory A reference to nvinfer1::IPluginFactory*
* in which the function is to place a pointer to
* the instance.
* @return True if the Plugin Factory was created successfully, or false
* otherwise.
*/
bool NvDsInferPluginFactoryRuntimeGet (nvinfer1::IPluginFactory *& pluginFactory);
/**
* Destroys a Plugin Factory instance created by
* NvDsInferPluginFactoryRuntimeGet().
*
* @param[in] pluginFactory A pointer to the Plugin Factory instance
* returned by NvDsInferPluginFactoryRuntimeGet().
*/
void NvDsInferPluginFactoryRuntimeDestroy (nvinfer1::IPluginFactory * pluginFactory);
/**
* Initializes the input layers for inference. This function is called only once
* during before the first inference call.
*
* @param[in] inputLayersInfo A reference to a vector containing information
* on the input layers of the model. This does not
* contain the NvDsInferLayerInfo structure for
* the layer for video frame input.
* @param[in] networkInfo A reference to anetwork information structure.
* @param[in] maxBatchSize The maximum batch size for inference.
* The input layer buffers are allocated
* for this batch size.
* @return True if input layers are initialized successfully, or false
* otherwise.
*/
bool NvDsInferInitializeInputLayers (std::vector<NvDsInferLayerInfo> const &inputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
unsigned int maxBatchSize);
/**
* The NvDsInferCudaEngineGet interface has been deprecated and has been
* replaced by NvDsInferEngineCreateCustomFunc function.
*/
bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder,
NvDsInferContextInitParams *initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine)
__attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")));
/**
* Create a customized neural network parser for user-defined models.
*
* User need to implement a new IModelParser class with @a initParams
* referring to any model path and/or customNetworkConfigFilePath.
*
* @param[in] initParams with model paths or config files.
* @return Instance of IModelParser implementation.
*/
IModelParser* NvDsInferCreateModelParser(
const NvDsInferContextInitParams* initParams);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file nvdsinfer_dbscan.h
* <b>NVIDIA DeepStream DBScan based Object Clustering API </b>
*
* @b Description: This file defines the API for the DBScan-based object
* clustering algorithm.
*/
/**
* @defgroup ee_dbscan DBScan Based Object Clustering API
*
* Defines the API for DBScan-based object clustering.
*
* @ingroup NvDsInferApi
* @{
*/
#ifndef __NVDSINFER_DBSCAN_H__
#define __NVDSINFER_DBSCAN_H__
#include <stddef.h>
#include <stdint.h>
#include <nvdsinfer.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Holds an opaque structure for the DBScan object clustering context. */
struct NvDsInferDBScan;
/** Holds an opaque DBScan clustering context handle. */
typedef struct NvDsInferDBScan *NvDsInferDBScanHandle;
/** Holds object clustering parameters required by DBSCAN. */
typedef struct
{
float eps;
uint32_t minBoxes;
/** Holds a Boolean; true enables the area-to-hit ratio (ATHR) filter.
The ATHR is calculated as: ATHR = sqrt(clusterArea) / nObjectsInCluster. */
int enableATHRFilter;
/** Holds the area-to-hit ratio threshold. */
float thresholdATHR;
/** Holds the sum of neighborhood confidence thresholds. */
float minScore;
} NvDsInferDBScanClusteringParams;
/**
* Creates a new DBScan object clustering context.
*
* @return A handle to the created context.
*/
NvDsInferDBScanHandle NvDsInferDBScanCreate();
/**
* Destroys a DBScan object clustering context.
*
* @param[in] handle The handle to the context to be destroyed.
*/
void NvDsInferDBScanDestroy(NvDsInferDBScanHandle handle);
/**
* Clusters an array of objects in place using specified clustering parameters.
*
* @param[in] handle A handle to the context be used for clustering.
* @param[in] params A pointer to a clustering parameter structure.
* @param[in,out] objects A pointer to an array of objects to be
* clustered. The function places the clustered
* objects in the same array.
* @param[in,out] numObjects A pointer to the number of valid objects
* in the @a objects array. The function sets
* this value after clustering.
*/
void NvDsInferDBScanCluster(NvDsInferDBScanHandle handle,
NvDsInferDBScanClusteringParams *params, NvDsInferObjectDetectionInfo *objects,
size_t *numObjects);
/**
* Clusters an array of objects in place using specified clustering parameters.
* The outputs are partially only clustered i.e to merge close neighbors of
* the same cluster together only and the mean normalization of all the
* proposals in a cluster is not performed. The outputs from this stage are
* later fed into another clustering algorithm like NMS to obtain the final
* results.
*
* @param[in] handle A handle to the context be used for clustering.
* @param[in] params A pointer to a clustering parameter structure.
* @param[in,out] objects A pointer to an array of objects to be
* clustered. The function places the clustered
* objects in the same array.
* @param[in,out] numObjects A pointer to the number of valid objects
* in the @a objects array. The function sets
* this value after clustering.
*/
void NvDsInferDBScanClusterHybrid(NvDsInferDBScanHandle handle,
NvDsInferDBScanClusteringParams *params, NvDsInferObjectDetectionInfo *objects,
size_t *numObjects);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream API for importing Transfer Learning Toolkit
* encoded models </b>
*
* @b Description: This file specifies the API to decode and create
* a CUDA engine file from a Tranfer Learning Toolkit (TLT) encoded model.
*/
/**
* @defgroup ee_nvdsinfer_tlt Import Transfer Learning Toolkit Encoded Models
*
* Defines an API for importing Transfer Learning Toolkit encoded models.
*
* @ingroup NvDsInferApi
* @{
*/
#ifndef __NVDSINFER_TLT_H__
#define __NVDSINFER_TLT_H__
#include <nvdsinfer_custom_impl.h>
/**
* \brief Decodes and creates a CUDA engine file from a TLT encoded model.
*
* This function implements the @ref NvDsInferCudaEngineGet interface. The
* correct key and model path must be provided in the @a tltModelKey and
* @a tltEncodedModelFilePath members of @a initParams. Other parameters
* applicable to UFF models also apply to TLT encoded models.
*/
extern "C"
bool NvDsInferCudaEngineGetFromTltModel(nvinfer1::IBuilder * const builder,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine);
#endif
/** @} */

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>Utility functions required by DeepStream Inferance API </b>
*/
#ifndef __NVDSINFER_UTILS_H__
#define __NVDSINFER_UTILS_H__
#include "nvdsinfer_dbscan.h"
#include "nvdsinfer_tlt.h"
#endif

View File

@ -0,0 +1,349 @@
/**
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
syntax = "proto3";
package nvdsinferserver.config;
enum MediaFormat {
MEDIA_FORMAT_NONE = 0;
IMAGE_FORMAT_RGB = 1;
IMAGE_FORMAT_BGR = 2;
IMAGE_FORMAT_GRAY = 3;
}
enum TensorOrder {
TENSOR_ORDER_NONE = 0;
TENSOR_ORDER_LINEAR = 1;
TENSOR_ORDER_NHWC = 2;
}
enum TensorDataType {
TENSOR_DT_NONE = 0;
TENSOR_DT_FP32 = 1;
TENSOR_DT_FP16 = 2;
TENSOR_DT_INT8 = 3;
TENSOR_DT_INT16 = 4;
TENSOR_DT_INT32 = 5;
TENSOR_DT_UINT8 = 6;
TENSOR_DT_UINT16 = 7;
TENSOR_DT_UINT32 = 8;
}
enum FrameScalingHW {
FRAME_SCALING_HW_DEFAULT = 0;
FRAME_SCALING_HW_GPU = 1;
FRAME_SCALING_HW_VIC = 2;
}
/** Custom lib for preload */
message CustomLib {
/** Path point to the custom library */
string path = 1;
}
/** preprocessing settings */
message PreProcessParams {
/** Input data normalization settings */
message ScaleNormalize
{
/** Normalization factor to scale the input pixels with. */
float scale_factor = 1;
/** Per channel offsets for mean subtraction. This is an alternative to
* the mean image file. The number of offsets in the array should be
* exactly equalto the number of input channels.
*/
repeated float channel_offsets = 2;
/** Path to the mean image file (PPM format). Resolution of the file
* should be equal to the network input resolution.
*/
string mean_file = 3;
}
/** Network input format */
MediaFormat network_format = 1;
/** Network input tensor order */
TensorOrder tensor_order = 2;
/** preprocessing data set to network tensor name */
string tensor_name = 3;
/** Indicating if aspect ratio should be maintained when scaling to
* network resolution. Right/bottom areas will be filled with black areas. */
int32 maintain_aspect_ratio = 4;
/** Compute hardware to use for scaling frames / objects. */
FrameScalingHW frame_scaling_hw = 5;
/** Interpolation filter to use while scaling. Refer to
* NvBufSurfTransform_Inter for supported filter values. */
uint32 frame_scaling_filter = 6;
/** Preprocessing methods */
oneof preprocess_method {
/** usual scaling normalization for images */
ScaleNormalize normalize = 7;
}
}
/** Deepstream Detection settings */
message DetectionParams {
/** non-maximum-suppression cluster method */
message Nms
{
/** detection score less this threshold would be rejected */
float confidence_threshold = 1;
/** IOU threshold */
float iou_threshold = 2;
/** top kth detection results to keep after nms. 0), keep all */
int32 topk = 3;
}
/** DBScan object clustering */
message DbScan {
/** Bounding box detection threshold. */
float pre_threshold = 1;
// float post_threshold = 2;
/** Epsilon to control merging of overlapping boxes */
float eps = 3;
/** Minimum boxes in a cluster to be considered an object */
int32 min_boxes = 4;
/** Minimum score in a cluster for it to be considered as an object */
float min_score = 5;
}
/** cluster method based on grouping rectangles*/
message GroupRectangle {
/** detection score less this threshold would be rejected */
float confidence_threshold = 1;
/** how many bbox can be clustered together */
int32 group_threshold = 2;
/** Epsilon to control merging of overlapping boxes */
float eps = 3;
}
/** simple cluster method for confidence filter */
message SimpleCluster
{
/** detection score less this threshold would be rejected */
float threshold = 1;
}
/** specific parameters controled per class*/
message PerClassParams {
/** pre-threshold used for filter out confidence less than the value */
float pre_threshold = 1;
}
/** Number of classes detected by a detector network. */
int32 num_detected_classes = 1;
/** Per class detection parameters. key-value is for
* <class_id:class_parameter> */
map<int32, PerClassParams> per_class_params = 2;
/** Name of the custom bounding box function in the custom library. */
string custom_parse_bbox_func = 3;
/** cluster methods for bbox, choose one only */
oneof clustering_policy {
/** non-maximum-suppression, reserved, not supported yet */
Nms nms = 4;
/** DbScan clustering parameters */
DbScan dbscan = 5;
/** grouping rectagules */
GroupRectangle group_rectangle = 6;
/** simple threshold filter */
SimpleCluster simple_cluster = 7;
}
}
/** Deepstream Classifciation settings */
message ClassificationParams {
/** classifciation threshold */
float threshold = 1;
/** custom function for classification parsing */
string custom_parse_classifier_func = 2;
}
/** Deepstream segmentation settings */
message SegmentationParams {
/** reserved field */
float threshold = 1;
}
/** Other Network settings, need application to do postprocessing */
message OtherNetworkParams {
/** reserved field */
string type_name = 1;
}
/** TRTIS classifcation settings */
message TrtIsClassifyParams
{
/** top k classification results */
uint32 topk = 1;
/** classifciation threshold */
float threshold = 2;
/** [optional] specify which output tensor is used for triton classification.*/
string tensor_name = 3;
}
/** Post-processing settings */
message PostProcessParams {
/** label file path. It relative to config file path if value is not
* absoluate path
*/
string labelfile_path = 1;
/** post-process can only have one of the following types*/
oneof process_type
{
/** deepstream detection parameters */
DetectionParams detection = 2;
/** deepstream classification parameters */
ClassificationParams classification = 3;
/** deepstream segmentation parameters */
SegmentationParams segmentation = 4;
/** deepstream other postprocessing parameters */
OtherNetworkParams other = 5;
/* TRT-IS classification parameters */
TrtIsClassifyParams trtis_classification = 6;
}
}
/** Network Input layer information */
message InputLayer {
/** input tensor name, optional*/
string name = 1;
/** fixed inference shape, only required when backend has wildcard shape */
repeated int32 dims = 2;
/** tensor data type, optional. default TENSOR_DT_NONE */
TensorDataType data_type = 3;
}
/** Network Onput layer information */
message OutputLayer {
/** output tensor name */
string name = 1;
}
/** TRTIS inference backend parameters */
message TrtISParams {
/** TRTIS models repo settings */
message ModelRepo
{
/** root directory for all models
* All models should set same @a root value */
string root = 1;
/** log verbose level, the larger the more logs output
* (0): ERROR;
* (1): WARNING;
* (2): INFO
* (3+): VERBOSE Level
*/
uint32 log_level = 2;
/** enable strict model config
* true: config.pbtxt must exsit.
* false: trtis try to figure model's config file, it may cause failure on
* different input/output dims.
*/
bool strict_model_config = 3;
/** tensorflow gpu memory fraction, default 0.0 */
float tf_gpu_memory_fraction = 4;
/** tensorflow soft placement, allowed by default */
bool tf_disable_soft_placement = 5;
}
/** trt-is model name */
string model_name = 1;
/** model version, -1 is for latest version, required */
int64 version = 2;
oneof server {
/** trt-is server model repo, all models must have same @a model_repo */
ModelRepo model_repo = 3;
}
}
/** Network LSTM Parameters */
message LstmParams {
/** init constant value for lstm input tensors, usually zero or one */
message InitConst {
/** const value */
float value = 1;
}
/** LSTM loop information */
message LstmLoop {
/** input tensor name */
string input = 1;
/** output tensor name */
string output = 2;
/** initialize input tensor for first frame */
oneof init_state {
/** init const value, default is zero */
InitConst init_const = 3;
}
/** enable if need keep lstm output tensor data for application output
* parsing, it's disabled by default */
bool keep_output = 4;
}
repeated LstmLoop loops = 1;
}
/** Network backend Settings */
message BackendParams {
/** input tensors settings, optional */
repeated InputLayer inputs = 1;
/** outputs tensor settings, optional */
repeated OutputLayer outputs = 2;
/** inference framework */
oneof infer_framework
{
/** TRT-IS inference framework */
TrtISParams trt_is = 3;
}
}
/** extrac controls */
message ExtraControl {
/** enable if need copy input tensor data for application output parsing,
* it's disabled by default */
bool copy_input_to_host_buffers = 1;
/** defined how many buffers allocated for output tensors in the pool.
* Optional, default is 2, the value can be in range [2:6] */
int32 output_buffer_pool_size = 2;
}
/** Inference configuration */
message InferenceConfig {
/** unique id, larger than 0, required for multiple models inference */
uint32 unique_id = 1;
/** gpu id settings. Optional. support single gpu only at this time
* default values [0] */
repeated int32 gpu_ids = 2;
/** max batch size. Required, can be reset by plugin */
uint32 max_batch_size = 3;
/** inference backend parameters. required */
BackendParams backend = 4;
/** preprocessing for tensors, required */
PreProcessParams preprocess = 5;
/** postprocessing for all tensor data, required */
PostProcessParams postprocess = 6;
/** Custom libs for tensor output parsing or preload, optional */
CustomLib custom_lib = 7;
/** advanced controls as optional */
oneof advanced
{
/** extrac controls */
ExtraControl extra = 8;
}
/** LSTM controller */
oneof lstm_control {
/** LSTM parameters */
LstmParams lstm = 9;
}
}

View File

@ -0,0 +1,126 @@
/**
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
syntax = "proto3";
package nvdsinferserver.config;
import "nvdsinferserver_config.proto";
/** Plugin Control settings for input / inference / output */
message PluginControl {
/** Color values for Red/Green/Blue/Alpha, all values are in range [0, 1] */
message Color {
/** Red color value */
float r = 1;
/** Green color value */
float g = 2;
/** Blue color value */
float b = 3;
/** Alpha color value */
float a = 4;
}
/** Boudingbox filter */
message BBoxFilter {
/** Boudingbox minimum width */
uint32 min_width = 1;
/** Boudingbox minimum height */
uint32 min_height = 2;
/** Boudingbox maximum width */
uint32 max_width = 3;
/** Boudingbox maximum height */
uint32 max_height = 4;
}
/** Detection of classes filter */
message DetectClassFilter {
/** Detection Bounding box filter */
BBoxFilter bbox_filter = 1;
/** Offset of the RoI from the top of the frame. Only objects within the
* RoI are output */
uint32 roi_top_offset = 2;
/** Offset of the RoI from the bottom of the frame. Only objects within the
* RoI are output */
uint32 roi_bottom_offset = 3;
/** Specify border color for detection bounding boxes */
Color border_color = 4;
/** Specify background color for detection bounding boxes */
Color bg_color = 5;
}
/** Output detection results control */
message OutputDetectionControl {
/** Default detection classes filter */
DetectClassFilter default_filter = 1;
/** specifies detection filters per class instead of default filter */
map<uint32, DetectClassFilter> specific_class_filters = 2;
}
/** Input objects control */
message InputObjectControl {
/** Input bounding box of objects filter */
BBoxFilter bbox_filter = 1;
}
/** Processing Mode */
enum ProcessMode {
/** Processing Default Mode */
PROCESS_MODE_DEFAULT = 0;
/** Processing Full Frame Mode */
PROCESS_MODE_FULL_FRAME = 1;
/** Processing Object Clipping Mode */
PROCESS_MODE_CLIP_OBJECTS = 2;
}
/** Plugin input data control policy */
message InputControl {
/** Processing mode setting, optional */
ProcessMode process_mode = 1;
/** Unique ID of the GIE on whose metadata (bounding boxes) this GIE is to
* operate on. It is used for secondary GIE only. */
int32 operate_on_gie_id = 2;
/** Class IDs of the parent GIE on which this GIE is to operate on.
* It is used for secondary GIE only. */
repeated int32 operate_on_class_ids = 3;
/** Specifies the number of consecutive, batches to be skipped for
* inference. Default is 0. */
uint32 interval = 4;
/** Enables inference on detected objects and asynchronous metadata
* attachments. Works only when tracker-id is valid. It's used for
* classifier with secondary GIE only. */
bool async_mode = 5;
/** Input object filter policy */
oneof object_filter {
/** input object control settings */
InputObjectControl object_control = 6;
}
}
/** Plugin output data control policy */
message OutputControl {
/* Enable attaching inference output tensor metadata */
bool output_tensor_meta = 1;
/* Postprocessing control policy */
oneof postprocess_control {
/* Detection results filter */
OutputDetectionControl detect_control = 2;
}
}
/** Low-level libnvds_infer_server inference configuration settings */
InferenceConfig infer_config =1;
/** Control plugin input buffers, object filter before inference */
InputControl input_control = 2;
/** Control plugin output meta data after inference */
OutputControl output_control = 3;
}

1019
ModelX/primary/includes/nvdsmeta.h Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,309 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Metadata Extension Structures</b>
*
* @b Description: This file defines the NVIDIA DeepStream metadata structures
* used to describe metadata objects.
*/
/**
* @defgroup metadata_extensions Metadata Extension Structures
*
* Defines metadata structures used to describe metadata objects.
*
* @ingroup NvDsMetaApi
* @{
*/
#ifndef NVDSMETA_H_
#define NVDSMETA_H_
#include <glib.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Defines event type flags.
*/
typedef enum NvDsEventType {
NVDS_EVENT_ENTRY,
NVDS_EVENT_EXIT,
NVDS_EVENT_MOVING,
NVDS_EVENT_STOPPED,
NVDS_EVENT_EMPTY,
NVDS_EVENT_PARKED,
NVDS_EVENT_RESET,
/** Reserved for future use. Custom events must be assigned values
greater than this. */
NVDS_EVENT_RESERVED = 0x100,
/** Specifies a custom event. */
NVDS_EVENT_CUSTOM = 0x101,
NVDS_EVENT_FORCE32 = 0x7FFFFFFF
} NvDsEventType;
/**
* Defines object type flags.
*/
typedef enum NvDsObjectType {
NVDS_OBJECT_TYPE_VEHICLE,
NVDS_OBJECT_TYPE_PERSON,
NVDS_OBJECT_TYPE_FACE,
NVDS_OBJECT_TYPE_BAG,
NVDS_OBJECT_TYPE_BICYCLE,
NVDS_OBJECT_TYPE_ROADSIGN,
NVDS_OBJECT_TYPE_VEHICLE_EXT,
NVDS_OBJECT_TYPE_PERSON_EXT,
NVDS_OBJECT_TYPE_FACE_EXT,
/** Reserved for future use. Custom objects must be assigned values
greater than this. */
NVDS_OBJECT_TYPE_RESERVED = 0x100,
/** Specifies a custom object. */
NVDS_OBJECT_TYPE_CUSTOM = 0x101,
/** "object" key will be missing in the schema */
NVDS_OBJECT_TYPE_UNKNOWN = 0x102,
NVDS_OBEJCT_TYPE_FORCE32 = 0x7FFFFFFF
} NvDsObjectType;
/**
* Defines payload type flags.
*/
typedef enum NvDsPayloadType {
NVDS_PAYLOAD_DEEPSTREAM,
NVDS_PAYLOAD_DEEPSTREAM_MINIMAL,
/** Reserved for future use. Custom payloads must be assigned values
greater than this. */
NVDS_PAYLOAD_RESERVED = 0x100,
/** Specifies a custom payload. You must implement the nvds_msg2p_*
interface. */
NVDS_PAYLOAD_CUSTOM = 0x101,
NVDS_PAYLOAD_FORCE32 = 0x7FFFFFFF
} NvDsPayloadType;
/**
* Holds a rectangle's position and size.
*/
typedef struct NvDsRect {
float top; /**< Holds the position of rectangle's top in pixels. */
float left; /**< Holds the position of rectangle's left side in pixels. */
float width; /**< Holds the rectangle's width in pixels. */
float height; /**< Holds the rectangle's height in pixels. */
} NvDsRect;
/**
* Holds geolocation parameters.
*/
typedef struct NvDsGeoLocation {
gdouble lat; /**< Holds the location's latitude. */
gdouble lon; /**< Holds the location's longitude. */
gdouble alt; /**< Holds the location's altitude. */
} NvDsGeoLocation;
/**
* Hold a coordinate's position.
*/
typedef struct NvDsCoordinate {
gdouble x; /**< Holds the coordinate's X position. */
gdouble y; /**< Holds the coordinate's Y position. */
gdouble z; /**< Holds the coordinate's Z position. */
} NvDsCoordinate;
/**
* Holds an object's signature.
*/
typedef struct NvDsObjectSignature {
/** Holds a pointer to an array of signature values. */
gdouble *signature;
/** Holds the number of signature values in @a signature. */
guint size;
} NvDsObjectSignature;
/**
* Holds a vehicle object's parameters.
*/
typedef struct NvDsVehicleObject {
gchar *type; /**< Holds a pointer to the type of the vehicle. */
gchar *make; /**< Holds a pointer to the make of the vehicle. */
gchar *model; /**< Holds a pointer to the model of the vehicle. */
gchar *color; /**< Holds a pointer to the color of the vehicle. */
gchar *region; /**< Holds a pointer to the region of the vehicle. */
gchar *license; /**< Holds a pointer to the license number of the vehicle.*/
} NvDsVehicleObject;
/**
* Holds a person object's parameters.
*/
typedef struct NvDsPersonObject {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person is
wearing, if any. */
gchar *apparel; /**< Holds a pointer to a description of the person's
apparel. */
guint age; /**< Holds the person's age. */
} NvDsPersonObject;
/**
* Holds a face object's parameters.
*/
typedef struct NvDsFaceObject {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person
is wearing, if any. */
gchar *glasses; /**< Holds a pointer to the type of glasses the person
is wearing, if any. */
gchar *facialhair;/**< Holds a pointer to the person's facial hair color. */
gchar *name; /**< Holds a pointer to the person's name. */
gchar *eyecolor; /**< Holds a pointer to the person's eye color. */
guint age; /**< Holds the person's age. */
} NvDsFaceObject;
/**
* Holds a vehicle object's parameters.
*/
typedef struct NvDsVehicleObjectExt {
gchar *type; /**< Holds a pointer to the type of the vehicle. */
gchar *make; /**< Holds a pointer to the make of the vehicle. */
gchar *model; /**< Holds a pointer to the model of the vehicle. */
gchar *color; /**< Holds a pointer to the color of the vehicle. */
gchar *region; /**< Holds a pointer to the region of the vehicle. */
gchar *license; /**< Holds a pointer to the license number of the vehicle.*/
GList *mask; /**< Holds a list of polygons for vehicle mask. */
} NvDsVehicleObjectExt;
/**
* Holds a person object's parameters.
*/
typedef struct NvDsPersonObjectExt {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person is
wearing, if any. */
gchar *apparel; /**< Holds a pointer to a description of the person's
apparel. */
guint age; /**< Holds the person's age. */
GList *mask; /**< Holds a list of polygons for person mask. */
} NvDsPersonObjectExt;
/**
* Holds a face object's parameters.
*/
typedef struct NvDsFaceObjectWithExt {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person
is wearing, if any. */
gchar *glasses; /**< Holds a pointer to the type of glasses the person
is wearing, if any. */
gchar *facialhair;/**< Holds a pointer to the person's facial hair color. */
gchar *name; /**< Holds a pointer to the person's name. */
gchar *eyecolor; /**< Holds a pointer to the person's eye color. */
guint age; /**< Holds the person's age. */
GList *mask; /**< Holds a list of polygons for face mask. */
} NvDsFaceObjectExt;
/**
* Holds event message meta data.
*
* You can attach various types of objects (vehicle, person, face, etc.)
* to an event by setting a pointer to the object in @a extMsg.
*
* Similarly, you can attach a custom object to an event by setting a pointer to the object in @a extMsg.
* A custom object must be handled by the metadata parsing module accordingly.
*/
typedef struct NvDsEventMsgMeta {
/** Holds the event's type. */
NvDsEventType type;
/** Holds the object's type. */
NvDsObjectType objType;
/** Holds the object's bounding box. */
NvDsRect bbox;
/** Holds the object's geolocation. */
NvDsGeoLocation location;
/** Holds the object's coordinates. */
NvDsCoordinate coordinate;
/** Holds the object's signature. */
NvDsObjectSignature objSignature;
/** Holds the object's class ID. */
gint objClassId;
/** Holds the ID of the sensor that generated the event. */
gint sensorId;
/** Holds the ID of the analytics module that generated the event. */
gint moduleId;
/** Holds the ID of the place related to the object. */
gint placeId;
/** Holds the ID of the component (plugin) that generated this event. */
gint componentId;
/** Holds the video frame ID of this event. */
gint frameId;
/** Holds the confidence level of the inference. */
gdouble confidence;
/** Holds the object's tracking ID. */
gint trackingId;
/** Holds a pointer to the generated event's timestamp. */
gchar *ts;
/** Holds a pointer to the detected or inferred object's ID. */
gchar *objectId;
/** Holds a pointer to a string containing the sensor's identity. */
gchar *sensorStr;
/** Holds a pointer to a string containing other attributes associated with
the object. */
gchar *otherAttrs;
/** Holds a pointer to the name of the video file. */
gchar *videoPath;
/** Holds a pointer to event message meta data. This can be used to hold
data that can't be accommodated in the existing fields, or an associated
object (representing a vehicle, person, face, etc.). */
gpointer extMsg;
/** Holds the size of the custom object at @a extMsg. */
guint extMsgSize;
} NvDsEventMsgMeta;
/**
* Holds event information.
*/
typedef struct _NvDsEvent {
/** Holds the type of event. */
NvDsEventType eventType;
/** Holds a pointer to event metadata. */
NvDsEventMsgMeta *metadata;
} NvDsEvent;
/**
* Holds payload metadata.
*/
typedef struct NvDsPayload {
/** Holds a pointer to the payload. */
gpointer payload;
/** Holds the size of the payload. */
guint payloadSize;
/** Holds the ID of the component (plugin) which attached the payload
(optional). */
guint componentId;
} NvDsPayload;
#ifdef __cplusplus
}
#endif
#endif /* NVDSMETA_H_ */
/** @} */

View File

@ -0,0 +1,452 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>DeepStream object tracker API </b>
*
* @b Description: This file defines the DeepStream object tracker API.
*/
/**
* @defgroup ee_NvMOTracker Object Tracker API
*
* Defines the DeepStream object tracker API.
*
* @ingroup NvDsTrackerApi
* @{
*/
#ifndef _NVMOTRACKER_H_
#define _NVMOTRACKER_H_
#include <stdint.h>
#include <time.h>
#include "nvbufsurface.h"
#include "nvds_tracker_meta.h"
#ifdef __cplusplus
extern "C"
{
#endif
#define NVMOT_MAX_TRANSFORMS 4
typedef uint64_t NvMOTStreamId;
/**
* @brief Compute target flags.
*
* You can select multiple targets; the tracker will optimize across them.
* Combinations are allowed, e.g. NVTCOMP_GPU or NVTCOMP_PVA
*/
#define NVMOTCOMP_GPU 0x01 /**< Defines the "GPU" compute target flag. */
#define NVMOTCOMP_CPU 0x02 /**< Defines the "CPU" compute target flag. */
#define NVMOTCOMP_PVA 0x04 /**< Defines the "PVA" compute target flag. */
#define NVMOTCOMP_ANY 0xff /**< Defines a compute target flag for
"any target." */
#define NVMOTCOMP_DEFAULT NVMOTCOMP_ANY
/**< Defines the compute target flag for the
default target. */
/** @} */
typedef uint8_t NvMOTCompute;
/**
* @brief Holds a configuration for batches for an input transform
* (a scaling/color conversion).
*/
typedef struct _NvMOTPerTransformBatchConfig
{
/** Holds the type of buffer. */
NvBufSurfaceMemType bufferType;
/** Holds the maximum width of each frame. */
uint32_t maxWidth;
/** Holds the maximum height of each frame. */
uint32_t maxHeight;
/** Holds the maximum pitch of each buffer. */
uint32_t maxPitch;
/** Holds the maximum size of the buffer in bytes. */
uint32_t maxSize;
/** Holds the color format: RGB, NV12 etc. */
uint32_t colorFormat;
} NvMOTPerTransformBatchConfig;
/**
* @brief Holds miscellaneous configurations.
*/
typedef struct _NvMOTMiscConfig
{
/** Holds the ID of the GPU to be used. */
uint32_t gpuId;
/** Holds the maximum number of objects to track per stream. 0 means
track an unlimited number of objects. */
uint32_t maxObjPerStream;
/** Holds the maximum number of objects to track per batch. 0 means
track an unlimited number of objects. */
uint32_t maxObjPerBatch;
/** Holds a pointer to a callback for logging messages. */
typedef void (*logMsg) (int logLevel, const char * format, ...);
} NvMOTMiscConfig;
/**
* @brief Holds a tracker configuration.
*
* Holds configuration options for the tracker, applied to the whole context.
*
* @note This structure must be deep-copied to be passed to a component that
* is to use it persistently.
*/
typedef struct _NvMOTConfig
{
/** Holds the compute target. @see NvMOTCompute. */
NvMOTCompute computeConfig;
/** Holds the maximum number of streams in a batch. */
uint32_t maxStreams;
/** Holds the number of \ref NvMOTPerTransformBatchConfig entries in
@a perTransformBatchConfig. */
uint8_t numTransforms;
/** Holds a pointer to a list of @a numTransform batch configurations,
one per transform, including type and resolution. */
NvMOTPerTransformBatchConfig *perTransformBatchConfig;
/** Holds miscellaneous configurations. */
NvMOTMiscConfig miscConfig;
/** Holds the length of @a customConfigFilePath. */
uint16_t customConfigFilePathSize;
/** A pointer to the pathname of the tracker's custom configuration file.
A null-terminated string. */
char* customConfigFilePath;
} NvMOTConfig;
/**
* @brief Defines configuration request return codes.
*/
typedef enum
{
NvMOTConfigStatus_OK,
NvMOTConfigStatus_Error,
NvMOTConfigStatus_Invalid,
NvMOTConfigStatus_Unsupported
} NvMOTConfigStatus;
/**
* @brief Holds a tracker's configuration status.
*
* Holds the status of a configuration request; includes both summary and
* per-configuration status.
*/
typedef struct _NvMOTConfigResponse
{
/** Holds the summary status of the entire configuration request. */
NvMOTConfigStatus summaryStatus;
/** Holds the compute target request status. */
NvMOTConfigStatus computeStatus;
/** Holds the transform batch configuration request status:
summary status for all transforms. */
NvMOTConfigStatus transformBatchStatus;
/** Holds the status of the miscellaneous configurations. */
NvMOTConfigStatus miscConfigStatus;
/** Holds the status of the custom configurations. */
NvMOTConfigStatus customConfigStatus;
} NvMOTConfigResponse;
/**
* @brief Defines generic status codes for tracking operations.
*/
typedef enum
{
NvMOTStatus_OK,
NvMOTStatus_Error,
NvMOTStatus_Invalid_Path
} NvMOTStatus;
/**
* @brief Holds the definition of a rectangle.
*/
typedef struct _NvMOTRect
{
/** Holds the left edge position of the object bounding box, in pixels. */
float x;
/** Holds the top edge position of the object bounding box, in pixels. */
float y;
/** Holds the width of the bounding box, in pixels. */
float width;
/** Holds the height of the bounding box, in pixels. */
float height;
} NvMOTRect;
/**
* @brief Holds information about an object to be tracked.
*
* NvMOT creates an instance of this structure for each tracked object.
*/
typedef struct _NvMOTObjToTrack
{
/** Holds the class of the object. */
uint16_t classId;
/** Holds the bounding box of the object. */
NvMOTRect bbox;
/** Holds the detection confidence of the object. */
float confidence;
/** Holds a Boolean which is true if NvMOT is to track this object. */
bool doTracking;
/** Holds a pointer to client data associated with the object. */
void *pPreservedData;
} NvMOTObjToTrack;
/**
* @brief Holds a list of objects.
*/
typedef struct _NvMOTObjToTrackList
{
/** Holds a Boolean which is true if detection was done on this frame
even if the list of objects to track is empty. */
bool detectionDone;
/** Holds a pointer to a list or array of object information blocks. */
NvMOTObjToTrack* list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of populated blocks in the list. */
uint32_t numFilled;
} NvMOTObjToTrackList;
/**
* @brief Holds a frame containing the image and objects to be tracked.
*
* @note @a numBuffers is supposed to be less than or equal to
* @a numTransforms in @ref NvMOTConfig.
* @note The metadata in the NvBufSurfaceParams structures
* which @a bufferList points to must be checked with the parameters
* specified in @a perTransformBatchConfig in NvMOTConfig.
*/
typedef struct _NvMOTFrame
{
/** Holds the stream ID of the stream source for this frame. */
NvMOTStreamId streamID;
/** Holds the sequential frame number that identifies the frame
within the stream. */
uint32_t frameNum;
/** Holds the timestamp of the frame at the time of capture. */
time_t timeStamp;
/** Holds a Boolean which is true if the timestamp value is properly
populated. */
bool timeStampValid;
/** Holds a Boolean which is true if objects in this frame are to be
tracked. */
bool doTracking;
/** Holds a Boolean which is true to reset tracking for the stream. */
bool reset;
/** Holds the number of entries in @a bufferList. */
uint8_t numBuffers;
/** Holds a pointer to an array of pointers to buffer parameter
structures. */
NvBufSurfaceParams** bufferList;
/** Holds a list of objects in this frame which are to be tracked.
Boundary boxes are scaled for the first buffer configuration. */
NvMOTObjToTrackList objectsIn;
} NvMOTFrame;
/**
* @brief Holds information about each tracked object.
*/
typedef struct _NvMOTTrackedObj
{
/** Holds the class ID of the object to be tracked. */
uint16_t classId;
/** Holds a unique ID for the object, assigned by the tracker. */
uint64_t trackingId;
/** Holds the bounding box. */
NvMOTRect bbox;
/** Holds the tracking confidence of the object. */
float confidence;
/** Holds the track length in frames. */
uint32_t age;
/** Holds a pointer to the associated input object, if there is one. */
NvMOTObjToTrack *associatedObjectIn;
uint8_t reserved[128];
} NvMOTTrackedObj;
/**
* @brief Holds a list of tracked objects.
*/
typedef struct _NvMOTTrackedObjList
{
/** Holds the stream ID of the stream associated with objects in the list.*/
NvMOTStreamId streamID;
/** Holds the frame number for objects in the list. */
uint32_t frameNum;
/** Holds a Boolean which is true if this entry in the batch is valid. */
bool valid;
/** Holds a pointer to a list or array of object information blocks. */
NvMOTTrackedObj* list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of populated blocks in the list. */
uint32_t numFilled;
} NvMOTTrackedObjList;
/**
* @brief Holds a batch of lists of tracked objects.
*/
typedef struct _NvMOTTrackedObjBatch
{
/** Holds a pointer to an array of object lists. */
NvMOTTrackedObjList *list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of filled blocks in the list. */
uint32_t numFilled;
} NvMOTTrackedObjBatch;
/**
* @brief Holds parameters for processing a batch.
*
* @see NvMOTProcessFrame.
*/
typedef struct _NvMOTProcessParams
{
uint32_t numFrames; /**< Holds the number of frames in the batch. */
NvMOTFrame *frameList; /**< Holds a pointer to an array of frame data. */
} NvMOTProcessParams;
typedef struct _NvMOTQuery
{
/** Holds flags for supported compute targets. @see NvMOTCompute. */
NvMOTCompute computeConfig;
/** Holds the number of \ref NvMOTPerTransformBatchConfig entries
in perTransformBatchConfig. */
uint8_t numTransforms;
/** Holds the color formats for input buffers; a required value. */
NvBufSurfaceColorFormat colorFormats[NVMOT_MAX_TRANSFORMS];
/** Holds the preferred memory type for input buffers. */
NvBufSurfaceMemType memType;
/** Holds a Boolean which is true if batch processing is supported. */
bool supportBatchProcessing;
/** Holds a Boolean which is true if outputing past frame is supported. */
bool supportPastFrame;
} NvMOTQuery;
/**
* @brief Holds an opaque context handle.
*/
struct NvMOTContext;
typedef struct NvMOTContext* NvMOTContextHandle;
/**
* @brief Initializes a tracking context for a batch of one or more image
* streams.
*
* If successful, the context is configured as specified by @a pConfigIn.
*
* @param [in] pConfigIn A pointer to to a structure specifying
* the configuration.
* @param [out] pContextHandle A pointer to a handle for the stream context.
* The stream context is created and owned
* by the tracker. The returned context handle
* must be included in
* all subsequent calls for the specified stream.
* @param [out] pConfigResponse A pointer to a structure that describes the
* operation's status.
* @return The outcome of the initialization attempt.
*/
NvMOTStatus NvMOT_Init(NvMOTConfig *pConfigIn,
NvMOTContextHandle *pContextHandle,
NvMOTConfigResponse *pConfigResponse);
/**
* @brief Deinitializes a stream context
*
* The specified context is retired and may not be used again.
*
* @param contextHandle The handle for the stream context to be retired.
*/
void NvMOT_DeInit(NvMOTContextHandle contextHandle);
/**
* @brief Processes a batch.
*
* Given a context and a batch of frames, processes the batch as the current
* frames in their respective streams. Once processed, each frame becomes part
* of the history and the previous frame in its stream.
*
* @param [in] contextHandle A context handle obtained from NvMOTInit().
* @param [in] pParams A pointer to parameters for the batch
* to be processed.
* @param [out] pTrackedObjectsBatch
* A pointer to a batch of lists of tracked object
* slots to be filled by the tracker. The batch is
* allocated by the client. Bounding boxes are
* scaled to the resolution of the first input
* image transform buffer.
* @return The status of batch processing.
*/
NvMOTStatus NvMOT_Process(NvMOTContextHandle contextHandle,
NvMOTProcessParams *pParams,
NvMOTTrackedObjBatch *pTrackedObjectsBatch);
/**
* @brief Process the past-frame data in the low-level tracker lib and retrieve
*
* Given a context and batch of frame(s), process the past-frame data of each tracked object stored in the low-level tracker lib
* , put it into the past-frame data strcture, and retrieve it
*
* @param [in] pContext The context handle obtained from NvMOTInit()
* @param [in] pParams Pointer to parameters for the batch of frames with the available stream ID
* @param [out] pPastFrameObjBatch Batch of lists of tracked objects that are stored by the low-level tracker in the past frames
BBoxes are scaled to the resolution of the first input image transform buffer.
* @return Status of batch processing
*/
NvMOTStatus NvMOT_ProcessPast(NvMOTContextHandle contextHandle,
NvMOTProcessParams *pParams,
NvDsPastFrameObjBatch *pPastFrameObjBatch);
/**
* @brief Queries the tracker library's capabilities and requirements.
*
* Answer query for this tracker lib's capabilities and requirements.
* The tracker's custom config file is provided for optional consultation.
*
* @param [in] customConfigFilePathSize Length of the custom configuration
* file's pathname.
* @param [in] pCustomConfigFilePath A pointer to the custom configuration
* file's pathname.
* @param [out] pQuery A pointer to a query structure to be
* filled by the tracker.
* @return Status of the query.
*/
NvMOTStatus NvMOT_Query(uint16_t customConfigFilePathSize, char* pCustomConfigFilePath, NvMOTQuery *pQuery);
/**
* @brief Removes streams from a batch.
*
* An optional function used in batch processing mode only. It notifies
* the tracker library that a stream has been removed,
* and will not be present in future batches. Any per-stream resources
* associated with the removed stream may be freed at this time.
*
* This function may be called only when all processing is quiesced.
*
* @param [in] contextHandle The context handle obtained from NvMOTInit().
* @param [in] streamIdMask A mask for finding streams to remove.
* The function reports removal of all streams
* where (streamId & streamIdMask) == streamIdMask.
*/
void NvMOT_RemoveStreams(NvMOTContextHandle contextHandle, NvMOTStreamId streamIdMask);
/** @} */ // end of API group
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,297 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: On-Screen Display API</b>
*
* This file defines the NvOSD library, used to draw rectangles and text
* over the frame.
*/
/**
* @defgroup NvDsOsdApi On-Screen Display API
*
* Defines the functions of On-screen Display APIs
*
*/
/**
* @defgroup ee_nvosd_api_group On-Screen Display API
* Defines the NvOSD library to be used to draw rectangles and text
* over the frame.
* @ingroup NvDsOsdApi
* @{
*/
#ifndef __NVLL_OSD_API_DEFS__
#define __NVLL_OSD_API_DEFS__
#include "nvll_osd_struct.h"
#include "nvbufsurface.h"
#define NVOSD_PRINT_E(f_, ...) \
printf("libnvosd (%d):(ERROR) : " f_, __LINE__, ##__VA_ARGS__)
#define MAX_IN_BUF 16
#define MAX_BORDER_WIDTH 32
#ifdef __cplusplus
extern "C"
{
#endif
typedef void * NvOSDCtxHandle;
/**
* Holds information about the text in a frame.
*/
typedef struct _NvOSD_FrameTextParams
{
/** Holds a pointer to a buffer containing a frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of strings. */
int num_strings;
/** Holds the strings' text parameters. */
NvOSD_TextParams *text_params_list;
} NvOSD_FrameTextParams;
/** Holds information about the rectangles in a frame. */
typedef struct _NvOSD_FrameRectParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of rectangles. */
int num_rects;
/** Holds the rectangles' parameters. */
NvOSD_RectParams *rect_params_list;
} NvOSD_FrameRectParams;
/** Holds information about the rectangles in a frame. */
typedef struct _NvOSD_FrameSegmentMaskParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of rectangles. */
int num_segments;
/** Holds the rectangles' parameters. */
NvOSD_RectParams *rect_params_list;
/** Holds mask parameters */
NvOSD_MaskParams *mask_params_list;
} NvOSD_FrameSegmentMaskParams;
/** Holds information about the lines in a frame. */
typedef struct _NvOSD_FrameLineParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of lines. */
int num_lines;
/** Holds the lines' parameters. */
NvOSD_LineParams *line_params_list;
} NvOSD_FrameLineParams;
/** Holds information about the arrows in a frame. */
typedef struct _NvOSD_FrameArrowParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of arrows. */
int num_arrows;
/** Holds the parameters of the arrows. */
NvOSD_ArrowParams *arrow_params_list;
} NvOSD_FrameArrowParams;
/** Holds information about the circles in a frame. */
typedef struct _NvOSD_FrameCircleParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of circles. */
int num_circles;
/** Holds the parameters of the circles. */
NvOSD_CircleParams *circle_params_list;
} NvOSD_FrameCircleParams;
/**
* Creates an NvOSD context.
*
* @returns A handle for an NvOSD context if successful, or NULL otherwise.
*/
NvOSDCtxHandle nvll_osd_create_context(void);
/**
* Destroys an NvOSD context.
*
* @param[in] nvosd_ctx A handle for the NvOSD context to be destroyed.
*/
void nvll_osd_destroy_context(NvOSDCtxHandle nvosd_ctx);
/**
* \brief Sets clock parameters for a given context.
*
* The clock is overlayed when nvll_osd_put_text() is called.
* If no other text is to be overlayed, %nvll_osd_put_text() must be called with
* @a num_strings as 0 and @a text_params_list as NULL.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] clk_params A pointer to a structure for the clock
* to be overlayed; NULL to disable the clock.
*/
void nvll_osd_set_clock_params(NvOSDCtxHandle nvosd_ctx, NvOSD_TextParams *clk_params);
/**
* /brief Overlays clock and given text at a given location in a buffer.
*
* To overlay the clock, you must call nvll_osd_set_clock_params().
* You must also ensure that the length of @a text_params_list is at least
* @ref NvOSD_FrameTextParams::num_strings.
*
* @note Currently only the NvOSD_Mode value @ref MODE_CPU is supported.
* Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_text_params A pointer to a structure containing
* information about rectangles to be overlayed.
*
* @returns 0 for success, or -1 for failure.
*/
int nvll_osd_put_text(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameTextParams *frame_text_params);
/**
* \brief Overlays segment masks at a given location in a buffer.
*
* You must ensure that the length of @a mask_params_list is at least
* @a num_segments.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_mask_params A pointer to the FrameSegmentMaskParams struct
* containing mask information to be overlayed.
*
* @returns 0 for success, -1 for failure.
*/
int nvll_osd_draw_segment_masks(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameSegmentMaskParams *frame_mask_params);
/**
* \brief Overlays boxes at a given location in a buffer.
*
* Boxes can be configured with:
* a. Border only:
* Uou must set @a border_width and set
* @a has_bg_color to 0 for the given box.
* b. Border and background color
* To draw boxes with border and background color, you must set @a
* border_width and set @a has_bg_color to 1, and specify background color
* parameters for the given box.
* c. Solid fill acting as mask region
* To draw boxes with solid fill acting as mask region, you must set @a
* border_width to 0 and @a has_bg_color to 1 for the given box.
*
*
* You must ensure that the length of @a rect_params_list is at least
* @a num_rects.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_rect_params A pointer to the FrameRectParams struct
* containing rectangles information to be overlayed.
*
* @returns 0 for success, -1 for failure.
*/
int nvll_osd_draw_rectangles(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameRectParams *frame_rect_params);
/**
* Overlays lines on the buffer.
*
* The length of @a line_params_list must be equal to @a num_lines.
* The client is responsible for allocating this array.
*
* @note Currently only MODE_CPU is supported. Specifying other modes will have
* no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_line_params A pointer to the FrameLineParams struct
* containing line information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_lines(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameLineParams *frame_line_params);
/**
* Overlays arrows on the buffer.
*
* The length of @a arrow_params_list must equal @a num_arrows.
* The client is responsible for allocating this array.
*
* @note Currently only @ref MODE_CPU is supported. Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_arrow_params A pointer to the FrameArrowParams struct
* containing arrow information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_arrows(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameArrowParams *frame_arrow_params);
/**
* Overlays circles on the buffer.
*
* You must ensure that the length of @a circle_params_list equals @a num_circles.
*
* @note Currently only @ref MODE_CPU is supported. Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_circle_params A pointer to the FrameCircleParams struct
* containing circle information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_circles(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameCircleParams *frame_circle_params);
/**
* Sets the resolution of the frames on which the NvOSDContext is to operate.
*
* @param[in] nvosd_ctx A handle for the NvOSD context.
* @param[in] width Width of the input frames.
* @param[in] height Height of the input frames.
*
* @returns 0 if successful, or -1 otherwise.
*/
void *nvll_osd_set_params (NvOSDCtxHandle nvosd_ctx, int width, int height);
/**
* Initializes colors for HW based blending.
*
* Applicable ONLY for Jetson.
*
* @param[in] nvosd_ctx A pointer to NvOSD context.
* @param[in] color_info A pointer to the Color_info struct
* containing color information.
* @param[in] num_classes Number of classes.
*
* @returns A pointer to the internally allocated Host Memory.
*/
int nvll_osd_init_colors_for_hw_blend(void *nvosd_ctx, NvOSD_Color_info * color_info, int num_classes);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,251 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: On-Screen Display Manager</b>
*
* This file defines the NvOSD library used to draw rectangles and text
* over the frame.
*/
/**
* @defgroup ee_nvosd_group On-Screen Display Manager
* Defines the NvOSD library to be used to draw rectangles and text
* over the frame.
* @ingroup NvDsOsdApi
* @{
*/
#ifndef __NVLL_OSD_STRUCT_DEFS__
#define __NVLL_OSD_STRUCT_DEFS__
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Defines modes used to overlay boxes and text.
*/
typedef enum {
MODE_CPU, /**< Specifies using the CPU for OSD processing.
Works with RGBA data only */
MODE_GPU, /**< Specifies using the GPU for OSD processing.
Currently not implemented. */
MODE_HW /**< Specifies the NVIDIA hardware engine
for rectangle drawing and masking.
This mode works with both YUV and RGB data.
It does not consider alpha parameter.
Not applicable to drawing text. */
} NvOSD_Mode;
/**
* Specifies arrow head positions.
*/
typedef enum
{
/** Specifies an arrow head only at start = 0. */
START_HEAD,
/** Specifies an arrow head only at end = 1. */
END_HEAD,
/** Specifies arrow heads at both start and end = 2. */
BOTH_HEAD
} NvOSD_Arrow_Head_Direction;
/**
* Holds unclipped bounding box coordinates of the object.
*/
typedef struct _NvBbox_Coords {
float left; /**< Holds the box's left coordinate
in pixels. */
float top; /**< Holds the box's top coordinate
in pixels. */
float width; /**< Holds the box's width in pixels. */
float height; /**< Holds the box's height in pixels. */
} NvBbox_Coords;
/**
* Holds the color parameters of the box or text to be overlayed.
*/
typedef struct _NvOSD_ColorParams {
double red; /**< Holds the red component of color.
Value must be in the range 0.0-1.0. */
double green; /**< Holds the green component of color.
Value must be in the range 0.0-1.0.*/
double blue; /**< Holds the blue component of color.
Value must be in the range 0.0-1.0.*/
double alpha; /**< Holds the alpha component of color.
Value must be in the range 0.0-1.0.*/
} NvOSD_ColorParams;
/**
* Holds the font parameters of the text to be overlayed.
*/
typedef struct _NvOSD_FontParams {
char * font_name; /**< Holds a pointer to the string containing
the font name. To display a list of
supported fonts, run the fc-list command. */
// char font_name[64]; /**< Holds a pointer to a string containing
// the font name. */
unsigned int font_size; /**< Holds the size of the font. */
NvOSD_ColorParams font_color; /**< Holds the font color. */
} NvOSD_FontParams;
/**
* Holds parameters of text to be overlayed.
*/
typedef struct _NvOSD_TextParams {
char * display_text; /**< Holds the text to be overlayed. */
unsigned int x_offset; /**< Holds the text's horizontal offset from
the top left pixel of the frame. */
unsigned int y_offset; /**< Holds the text's vertical offset from the
top left pixel of the frame. */
NvOSD_FontParams font_params; /**< Holds the font parameters of the text
to be overlaid. */
int set_bg_clr; /**< Holds a Boolean; true if the text has a
background color. */
NvOSD_ColorParams text_bg_clr;/**< Holds the text's background color, if
specified. */
} NvOSD_TextParams;
typedef struct _NvOSD_Color_info {
int id;
NvOSD_ColorParams color;
}NvOSD_Color_info;
/**
* Holds the box parameters of the box to be overlayed.
*/
typedef struct _NvOSD_RectParams {
float left; /**< Holds the box's left coordinate
in pixels. */
float top; /**< Holds the box's top coordinate
in pixels. */
float width; /**< Holds the box's width in pixels. */
float height; /**< Holds the box's height in pixels. */
unsigned int border_width; /**< Holds the box's border width in pixels. */
NvOSD_ColorParams border_color;
/**< Holds the box's border color. */
unsigned int has_bg_color; /**< Holds a Boolean; true if the box has a
background color. */
unsigned int reserved; /**< Holds a field reserved for future use. */
NvOSD_ColorParams bg_color; /**< Holds the box's background color. */
int has_color_info;
int color_id;
} NvOSD_RectParams;
/**
* Holds the mask parameters of the segment to be overlayed
*/
typedef struct _NvOSD_MaskParams {
float *data; /** Mask data */
unsigned int size; /** Mask size */
float threshold; /** Threshold for binarization */
unsigned int width; /** Mask width */
unsigned int height; /** Mask height */
} NvOSD_MaskParams;
/**
* Holds the box parameters of a line to be overlayed.
*/
typedef struct _NvOSD_LineParams {
unsigned int x1; /**< Holds the box's left coordinate
in pixels. */
unsigned int y1; /**< Holds the box's top coordinate
in pixels. */
unsigned int x2; /**< Holds the box's width in pixels. */
unsigned int y2; /**< Holds the box's height in pixels. */
unsigned int line_width; /**< Holds the box's border width in pixels. */
NvOSD_ColorParams line_color; /**< Holds the box's border color. */
} NvOSD_LineParams;
/**
* Holds arrow parameters to be overlaid.
*/
typedef struct _NvOSD_ArrowParams {
unsigned int x1; /**< Holds the start horizontal coordinate in pixels. */
unsigned int y1; /**< Holds the start vertical coordinate in pixels. */
unsigned int x2; /**< Holds the end horizontal coordinate in pixels. */
unsigned int y2; /**< Holds the end vertical coordinate in pixels. */
unsigned int arrow_width; /**< Holds the arrow shaft width in pixels. */
NvOSD_Arrow_Head_Direction arrow_head;
/**< Holds the arrowhead position. */
NvOSD_ColorParams arrow_color;
/**< Holds color parameters of the arrow box. */
unsigned int reserved; /**< Reserved for future use; currently
for internal use only. */
} NvOSD_ArrowParams;
/**
* Holds circle parameters to be overlayed.
*/
typedef struct _NvOSD_CircleParams {
unsigned int xc; /**< Holds the start horizontal coordinate in pixels. */
unsigned int yc; /**< Holds the start vertical coordinate in pixels. */
unsigned int radius; /**< Holds the radius of circle in pixels. */
NvOSD_ColorParams circle_color;
/**< Holds the color parameters of the arrow box. */
unsigned int has_bg_color; /*< Holds a Boolean value indicating whether
the circle has a background color. */
NvOSD_ColorParams bg_color; /*< Holds the circle's background color. */
unsigned int reserved; /**< Reserved for future use; currently
for internal use only. */
} NvOSD_CircleParams;
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/*
* This file defines the NvMsgBroker interface.
* The interfaces is used by applications to send and receive
* messages from remote entities and services to deliver events, allow
* configuration of settings etc.
*/
#ifndef __NV_MSGBROKER_H__
#define __NV_MSGBROKER_H__
#ifdef __cplusplus
extern "C"
{
#endif
#define NV_MSGBROKER_VERSION "1.0"
#define LOG_CAT "DSLOG:NV_MSGBROKER"
/*
* Defines status for operations in the NvMsgBroker interface
*/
typedef enum {
NV_MSGBROKER_API_OK,
NV_MSGBROKER_API_ERR,
NV_MSGBROKER_API_NOT_SUPPORTED
} NvMsgBrokerErrorType;
/*
* Defines structure of a client message packet
*/
typedef struct {
char *topic;
void *payload;
size_t payload_len;
} NvMsgBrokerClientMsg;
/*
* Handle to be used in NvMsgBroker API interface
*/
typedef void *NvMsgBrokerClientHandle;
/** Connect callback method registered during connect
*
* @param[in] h_ptr Msgbroker Connection handle
* @param[in] status Connection status
*/
typedef void (*nv_msgbroker_connect_cb_t)(NvMsgBrokerClientHandle h_ptr, NvMsgBrokerErrorType status );
/** Send callback method registered during send_async
* @param[in] user_ptr Pointer passed during send_async for context
* @param[in] status Completion status of send operation
*/
typedef void (*nv_msgbroker_send_cb_t)(void *user_ptr, NvMsgBrokerErrorType flag);
/** subscribe callback method registered during subscribe
* @param[in] user_ptr Pointer passed during send_async for context
* @param[in] status Completion status of send operation
*/
typedef void (*nv_msgbroker_subscribe_cb_t)(NvMsgBrokerErrorType flag, void *msg, int msglen, char *topic, void *user_ptr);
/** Connect to a remote entity by calling into msgbroker library
*
* @param[in] broker_conn_str A connection string (optional) with format
* @param[in] broker_proto_lib Full Path to Message protocol adapter library
* @param[in] connect_cb A pointer to a callback function for events associated with the connection.
* @param[in] cfg Pathname of a configuration file passed to be passed to the message protocol adapter
*
* @return A connection handle.
*/
NvMsgBrokerClientHandle nv_msgbroker_connect(char *broker_conn_str, char *broker_proto_lib, nv_msgbroker_connect_cb_t connect_cb, char *cfg);
/* Send a message asynchronously based on non-blocking semantics
* @param[in] h_ptr connection handle to Message Broker library
* @param[in] message Message packet which has details of message, topic, priority ..etc
* @param[in] cb callback to be invoked to notify status of send
* @param[in] user_ctx pointer to pass to callback for context
*
* @return Completion status of send operation */
NvMsgBrokerErrorType nv_msgbroker_send_async (NvMsgBrokerClientHandle h_ptr, NvMsgBrokerClientMsg message, nv_msgbroker_send_cb_t cb, void *user_ctx);
/** Subscribe to a remote entity for receiving messages on particular topic(s)
* @param[in] h_ptr Connection handle
* @param[in] topics pointer to array of topics (cannot be NULL)
* @param[in] num_topics number of topics
* @param[in] cb A pointer to a callback function to forward consumed message
* @param[in] user_ctx user ptr to be passed to callback for context
* @return Completion status of send operation
**/
NvMsgBrokerErrorType nv_msgbroker_subscribe(NvMsgBrokerClientHandle h_ptr, char ** topics, int num_topics, nv_msgbroker_subscribe_cb_t cb, void *user_ctx);
/* Disconnect call to notify msgbroker library for connection termination
* @param[in] h_ptr Connection handle
*
* @return status of disconnect
*/
NvMsgBrokerErrorType nv_msgbroker_disconnect(NvMsgBrokerClientHandle h_ptr);
/* Version of Nvmsgbroker interface
*
* @return [out] version of Nvmsgbroker interface supported by msgbroker library in MAJOR.MINOR format
*/
char *nv_msgbroker_version(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,5 @@
stick
wheelchair
person
NO-stick
stroller

View File

@ -0,0 +1,59 @@
################################################################################
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
CUDA_VER?=10.2
ifeq ($(CUDA_VER),)
$(error "CUDA_VER is not set")
endif
CC:= g++
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
CFLAGS+= -I../includes -I/usr/local/cuda-$(CUDA_VER)/include
LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group
INCS:= $(wildcard *.h)
SRCFILES:= nvdsinfer_yolo_engine.cpp \
nvdsparsebbox_Yolo.cpp \
trt_utils.cpp \
yolo.cpp \
yoloPlugins.cpp
TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so
TARGET_OBJS:= $(SRCFILES:.cpp=.o)
TARGET_OBJS:= $(TARGET_OBJS:.cu=.o)
all: $(TARGET_LIB)
%.o: %.cpp $(INCS) Makefile
$(CC) -c -o $@ $(CFLAGS) $<
%.o: %.cu $(INCS) Makefile
$(NVCC) -c -o $@ --compiler-options '-fPIC' $<
$(TARGET_LIB) : $(TARGET_OBJS)
$(CC) -o $@ $(TARGET_OBJS) $(LFLAGS)
clean:
rm -rf $(TARGET_LIB)

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2019 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Smart recording API</b>
*/
/**
* @defgroup custom_gstreamer Custom Gstreamer APIs
*
* This section defines custom Gstreamer APIs
*
*/
#ifndef NVDSSR_H_
#define NVDSSR_H_
#include <gst/gst.h>
/**
*
* @defgroup gstreamer_nvdssr Smart Record
*
* Specifies APIs relating to smart recording.
*
* @ingroup custom_gstreamer
* @{
*/
#ifdef __cplusplus
extern "C"
{
#endif
typedef struct NvDsSRRecordingInfo NvDsSRRecordingInfo;
typedef gpointer (*NvDsSRCallbackFunc) (NvDsSRRecordingInfo *info, gpointer userData);
typedef guint32 NvDsSRSessionId;
/**
* Specifies container types.
*/
typedef enum {
NVDSSR_CONTAINER_MP4,
NVDSSR_CONTAINER_MKV
} NvDsSRContainerType;
/**
* Specifies API return status.
*/
typedef enum {
NVDSSR_STATUS_OK,
NVDSSR_STATUS_INVALID_VAL,
NVDSSR_STATUS_INVALID_OP,
NVDSSR_STATUS_ERROR,
NVDSSR_STATUS_CUSTOM1 = 100,
NVDSSR_STATUS_CUSTOM2 = 101,
NVDSSR_STATUS_CUSTOM3 = 102
} NvDsSRStatus;
/**
* Holds initializtion paramerters required to create \ref NvDsSRContext.
*/
typedef struct NvDsSRInitParams
{
/** callback function gets called once recording is complete */
NvDsSRCallbackFunc callback;
/** recording video container, MP4 / MKV */
NvDsSRContainerType containerType;
/** optional, recording video width, 0 means no transcode */
guint width;
/** optional, recording video height, 0 means no transcode */
guint height;
/** recorded file name prefix */
gchar *fileNamePrefix;
/** store recorded file under directory path */
gchar *dirpath;
/** default recording duration in seconds */
guint defaultDuration;
/** size of video cache in seconds. */
guint videoCacheSize;
} NvDsSRInitParams;
/**
* Holds information about smart record instance.
*/
typedef struct NvDsSRContext
{
/** parent bin element. */
GstElement *recordbin;
/** queue element to cache the content. */
GstElement *recordQue;
/** child bin to save the content to file. */
GstElement *encodebin;
/** filesink element */
GstElement *filesink;
/** flag to check the key frame. */
gboolean gotKeyFrame;
/** flag to check if recording is on */
gboolean recordOn;
/** flag to check if encodebin is reset */
gboolean resetDone;
/** flag to check if encodebin is in playing state. */
gboolean isPlaying;
/** initialization parameters */
NvDsSRInitParams initParams;
/** mutex to control the flow */
GMutex flowLock;
/** thread to reset the encodebin */
GThread *resetThread;
/** pointer to user provided data */
gpointer uData;
/** pointer to private data */
gpointer privData;
} NvDsSRContext;
/**
* Hold information about video recorded.
*/
typedef struct NvDsSRRecordingInfo
{
/** SR bin context */
NvDsSRContext *ctx;
/** recording session-id */
NvDsSRSessionId sessionId;
/** recorded file name */
gchar *filename;
/** recorded file dir path */
gchar *dirpath;
/** duration in milliseconds */
guint64 duration;
/** recorded video container, MP4 / MKV */
NvDsSRContainerType containerType;
/** recorded video width*/
guint width;
/** recorded video height*/
guint height;
} NvDsSRRecordingInfo;
/**
* \brief Creates the instance of smart record.
*
* This function creates the instance of smart record and returns the pointer
* to an allocated \ref NvDsSRContext. The \a params structure must be filled
* with initialization parameters required to create the instance.
*
* recordbin of \ref NvDsSRContext is smart record bin which must be added
* to the pipeline. It expects encoded frames which will be muxed and saved to
* the file. Add this bin after parser element in the pipeline.
*
* Call NvDsSRDestroy() to free resources allocated by this function.
*
* @param[out] ctx An indirect pointer to the smart record instance.
* @param[in] params A pointer to a \ref NvDsSRInitParams structure.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRCreate (NvDsSRContext **ctx, NvDsSRInitParams *params);
/**
* \brief Starts the video recording.
*
* This function starts writing the cached video data to a file. It returns
* the session id which later can be used in NvDsSRStop() to stop the
* corresponding recording.
*
* Here startTime specifies the seconds before the current time and duration
* specifies the seconds after the start of recording.
* If current time is t1, content from t1 - startTime to t1 + duration will
* be saved to file. Therefore a total of startTime + duration seconds of data
* will be recorded.
*
* @param[in] ctx A pointer to a \ref NvDsSRContext.
* @param[out] sessionId A pointer to a \ref NvDsSRSessionId.
* @param[in] startTime Seconds before the current time. Should be less than video cache size.
* @param[in] duration Duration value in seconds after the start of recording.
* @param[in] userData A pointer to user specified data.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRStart (NvDsSRContext *ctx, NvDsSRSessionId *sessionId,
guint startTime, guint duration, gpointer userData);
/**
* \brief Stops the previously started recording.
*
* @param[in] ctx A pointer to a \ref NvDsSRContext.
* @param[in] sessionId Id of seesion to stop.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRStop (NvDsSRContext *ctx, NvDsSRSessionId sessionId);
/**
* \brief Destroys the instance of smart record.
*
* This function releases the resources previously allocated by NvDsSRCreate().
*
* @param[in] ctx A pointer to a \ref NvDsSRContext to be freed.
*
* @return NVDSSR_STATUS_OK if successful, or corresponding error otherwise.
*/
NvDsSRStatus NvDsSRDestroy (NvDsSRContext *ctx);
#ifdef __cplusplus
}
#endif
#endif /* NVDSSR_H_ */
/** @} */

View File

@ -0,0 +1,158 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Custom Events</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer custom
* event functions, used to map events to individual sources which
* are batched together by Gst-nvstreammux.
*
*/
/**
* @defgroup gstreamer_nvevent Events: Custom Events API
*
* Specifies GStreamer custom event functions, used to map events
* to individual sources which are batched together by Gst-nvstreammux.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
#ifndef __GST_NVEVENT_H__
#define __GST_NVEVENT_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#define FLAG(name) GST_EVENT_TYPE_##name
/** Defines supported types of custom events. */
typedef enum {
/** Specifies a custom event to indicate Pad Added. */
GST_NVEVENT_PAD_ADDED
= GST_EVENT_MAKE_TYPE (400, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate Pad Deleted. */
GST_NVEVENT_PAD_DELETED
= GST_EVENT_MAKE_TYPE (401, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate EOS of a particular stream
in a batch. */
GST_NVEVENT_STREAM_EOS
= GST_EVENT_MAKE_TYPE (402, FLAG(DOWNSTREAM) | FLAG(SERIALIZED)),
/** Specifies a custom event to indicate a stream segment. */
GST_NVEVENT_STREAM_SEGMENT
= GST_EVENT_MAKE_TYPE (403, FLAG(DOWNSTREAM) | FLAG(SERIALIZED))
} GstNvEventType;
#undef FLAG
/**
* Creates a "custom pad added" event for the specified source.
*
* @param[in] source_id Source ID of the stream to be added to the pipeline;
* also the pad ID of the sinkpad of the
* Gst-nvstreammux plugin for which the source
* is configured.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_pad_added (guint source_id);
/**
* Creates a "custom pad deleted" event for the specified source.
*
* @param[in] source_id Source ID of the stream to be removed
* from the pipeline; also the pad ID of the sinkpad
* of the Gst-nvstreammux plugin for which
* the source is configured.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_pad_deleted (guint source_id);
/**
* Creates a "custom EOS" event for the specified source.
*
* @param[in] source_id Source ID of the stream for which EOS is to be sent;
* also the pad ID of the sinkpad of the
* Gst-nvstreammux plugin for which
* the source is configured.
* @return A pointer to the event corresponding to request if sucxessful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_stream_eos (guint source_id);
/**
* Creates a "custom segment" event for the specified source.
*
* @param[in] source_id Source ID of the stream for which a segment event
* is to be sent; also the pad ID of the sinkpad
* of the Gst-nvstreammux plugin for which the source
* is configured.
* @param[in] segment A pointer to a copy of the segment to be sent
* with the event; corresponds to the original segment
* for the specified source.
* @return A pointer to the event corresponding to the request if successful,
* or NULL otherwise.
*/
GstEvent * gst_nvevent_new_stream_segment (guint source_id, GstSegment *segment);
/**
* Parses a "pad added" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the pad is added to Gst-nvstreammux.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_pad_added (GstEvent * event, guint * source_id);
/**
* Parses a "pad deleted" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the pad is deleted from Gst-nvstreammux.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_pad_deleted (GstEvent * event, guint * source_id);
/**
* Parses a "stream EOS" event received on the sinkpad.
*
* @param[in] event A pointer to the event received on the sinkpad
* when the source ID sends the EOS event.
* @param[out] source_id A pointer to the parsed source ID for the event.
*/
void gst_nvevent_parse_stream_eos (GstEvent * event, guint * source_id);
/**
* Parses a "stream segment" event received on the sinkpad.
*
* @param[in] event The event received on the sinkpad
* when the source ID sends a segment event.
* @param[out] source_id A pointer to the parsed source ID for which
* the event is sent.
* @param[out] segment A double pointer to the parsed segment
* corresponding to source ID for the event.
*/
void gst_nvevent_parse_stream_segment (GstEvent * event, guint * source_id,
GstSegment **segment);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Custom Message Functions</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer custom
* message functions.
*
*/
/**
* @defgroup gst_mess_evnt_qry Events, Messages and Query based APIs
*
* Defines Events, Messages and Query-based APIs
*
*/
#ifndef __GST_NVMESSAGE_H__
#define __GST_NVMESSAGE_H__
#include <gst/gst.h>
G_BEGIN_DECLS
/**
* @defgroup gst_message_plugin Messages: Custom Message Functions API
* Helper functions for custom GStreamer messages posted by DeepStream GStreamer
* plugins.
*
* DeepStream GStreamer plugins post the following custom messages:
* - Stream EOS - Posted by the `NvStreamMuxer` element when it recieves EOS
* on one of its sink pads.
*
* @ingroup gst_mess_evnt_qry
* @{
*/
/**
* Creates a new Stream EOS message.
*
* params[in] obj The GStreamer object creating the message.
* params[in] eos_stream_id Stream ID of the stream for which EOS
* has been received.
*
* @return A pointer to the new message.
*/
GstMessage * gst_nvmessage_new_stream_eos (GstObject *obj, guint eos_stream_id);
/**
* Determines whether a message is a stream EOS message.
*
* params[in] message A pointer to the nessage to be checked.
*
* @return A Boolean; true if the message is a stream EOS message.
*/
gboolean gst_nvmessage_is_stream_eos (GstMessage * message);
/**
* \brief Parses the stream ID from a stream EOS message.
*
* The stream ID is the index of the stream which sent the EOS event to
* Gst-streammux.
*
* params[in] message A pointer to a stream EOS message.
* params[out] eos_stream_id A pointer to an unsigned integer in which
* the stream ID is stored.
*
* @return A Boolean; true if the message was successfully parsed.
*/
gboolean gst_nvmessage_parse_stream_eos (GstMessage * message, guint * eos_stream_id);
/** @} */
G_END_DECLS
#endif

View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Helper Queries</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer helper
* query functions.
*
*/
#ifndef __GST_NVQUERY_H__
#define __GST_NVQUERY_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup gst_query_plugin Query Functions
* Gets information such as the batch size and the number of streams.
* @ingroup gst_mess_evnt_qry
* @{
*/
/**
* Creates a new batch-size query, which can be used by elements to query
* the number of buffers in upstream elements' batched buffers.
*
* @return A pointer to the new batch size query.
*/
GstQuery * gst_nvquery_batch_size_new (void);
/**
* Determines whether a query is a batch size query.
*
* params[in] query A pointer to the query to be checked.
*
* @return True if the query is a batch size query.
*/
gboolean gst_nvquery_is_batch_size (GstQuery * query);
/**
* Sets the batch size, used by the elements responding to the batch size query.
*
* This function fails if the query is not a batch size query.
*
* params[in] query A pointer to a batch size query.
* params[in] batch_size The batch size to be set.
*/
void gst_nvquery_batch_size_set (GstQuery * query, guint batch_size);
/**
* Parses batch size from a batch size query.
*
* params[in] query A pointer to a batch size query.
* params[out] batch_size A pointer to an unsigned integer in which the
* batch size is stored.
*
* @return True if the query was successfully parsed.
*/
gboolean gst_nvquery_batch_size_parse (GstQuery * query, guint * batch_size);
/**
* Creates a number of streams query, used by elements to query
* upstream the number of input sources.
*
* @return A pointer to the new query.
*/
GstQuery * gst_nvquery_numStreams_size_new (void);
/**
* Determines whether a query is a number-of-streams query.
*
* params[in] query A pointer to the query to be checked.
*
* @return A Boolean; true if the query is a number of streams query.
*/
gboolean gst_nvquery_is_numStreams_size (GstQuery * query);
/**
* \brief Sets the number of input sources.
*
* This function is used by elements responding to
* a number of streams query. It fails if the query is not of the correct type.
*
* params[in] query A pointer to a number-of-streams query.
* params[in] numStreams_size The number of input sources.
*/
void gst_nvquery_numStreams_size_set (GstQuery * query, guint numStreams_size);
/**
* Parses the number of streams from a number of streams query.
*
* params[in] query A pointer to a number-of-streams query.
* params[out] batch_size A pointer to an unsigned integer in which
* the number of streams is stored.
*
* @return True if the query was successfully parsed.
*/
gboolean gst_nvquery_numStreams_size_parse (GstQuery * query, guint * numStreams_size);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GSTNVDSBUFFERPOOL_H_
#define GSTNVDSBUFFERPOOL_H_
#include <gst/gst.h>
G_BEGIN_DECLS
typedef struct _GstNvDsBufferPool GstNvDsBufferPool;
typedef struct _GstNvDsBufferPoolClass GstNvDsBufferPoolClass;
typedef struct _GstNvDsBufferPoolPrivate GstNvDsBufferPoolPrivate;
#define GST_TYPE_NVDS_BUFFER_POOL (gst_nvds_buffer_pool_get_type())
#define GST_IS_NVDS_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_NVDS_BUFFER_POOL))
#define GST_NVDS_BUFFER_POOL(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_NVDS_BUFFER_POOL, GstNvDsBufferPool))
#define GST_NVDS_BUFFER_POOL_CAST(obj) ((GstNvDsBufferPool*)(obj))
#define GST_NVDS_MEMORY_TYPE "nvds"
#define GST_BUFFER_POOL_OPTION_NVDS_META "GstBufferPoolOptionNvDsMeta"
struct _GstNvDsBufferPool
{
GstBufferPool bufferpool;
GstNvDsBufferPoolPrivate *priv;
};
struct _GstNvDsBufferPoolClass
{
GstBufferPoolClass parent_class;
};
GType gst_nvds_buffer_pool_get_type (void);
GstBufferPool* gst_nvds_buffer_pool_new (void);
G_END_DECLS
#endif /* GSTNVDSBUFFERPOOL_H_ */

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file gstnvdsinfer.h
* <b>NVIDIA DeepStream GStreamer NvInfer API Specification </b>
*
* @b Description: This file specifies the APIs and function definitions for
* the DeepStream GStreamer NvInfer Plugin.
*/
/**
* @defgroup gstreamer_nvinfer_api NvInfer Plugin
* Defines an API for the GStreamer NvInfer plugin.
* @ingroup custom_gstreamer
* @{
*/
G_BEGIN_DECLS
#include "nvdsinfer.h"
/**
* Function definition for the inference raw output generated callback of
* Gst-NvInfer plugin.
*
* The callback function can be registered by setting "raw-output-generated-callback"
* property on an "nvinfer" element instance. Additionally, a pointer to
* user data can be set through the "raw-output-generated-userdata" property.
* This pointer will be passed to the raw output generated callback function
* through the userdata parameter.
*
* Refer to the reference deepstream-app sources for a sample implementation
* of the callback.
*
* @param[in] buf Pointer to the GstBuffer on whose contents inference has been
* executed. The implementation should assume the buffer to be
* read-only and should not modify the buffer in any way.
* @param[in] network_info Network information for the model specified for the
* nvinfer element instance.
* @param[in] layers_info Pointer to the array containing information for all
* bound layers for the inference engine.
* @param[in] num_layers Number of layers bound for the inference engine i.e.
* number of elements in the layers_info array.
* @param[in] batch_size Number of valid input frames in the batch.
* @param[in] user_data Pointer to the user data set through the
* "raw-output-generated-userdata" property.
*/
typedef void (* gst_nvinfer_raw_output_generated_callback) (GstBuffer *buf,
NvDsInferNetworkInfo *network_info, NvDsInferLayerInfo *layers_info,
guint num_layers, guint batch_size, gpointer user_data);
/**
* Holds the raw tensor output information for one frame / one object.
*
* The "nvinfer" plugins adds this meta when the "output-tensor-meta" property
* of the element instance is set to TRUE.
*
* This meta data is added as NvDsUserMeta to the frame_user_meta_list of the
* corresponding frame_meta or object_user_meta_list of the corresponding object
* with the meta_type set to NVDSINFER_TENSOR_OUTPUT_META.
*/
typedef struct
{
/** Unique ID of the gst-nvinfer instance which attached this meta. */
guint unique_id;
/** Number of bound output layers. */
guint num_output_layers;
/** Pointer to the array containing information for the bound output layers.
* Size of the array will be equal to num_output_layers. Pointers inside
* the NvDsInferLayerInfo structure are not valid for this array. */
NvDsInferLayerInfo *output_layers_info;
/** Array of pointers to the output host buffers for the frame / object. */
void **out_buf_ptrs_host;
/** Array of pointers to the output device buffers for the frame / object. */
void **out_buf_ptrs_dev;
/** GPU device ID on which the device buffers have been allocated. */
gint gpu_id;
/** Private data used for the meta producer's internal memory management. */
void *priv_data;
/** Network information for the model specified for the nvinfer element instance. */
NvDsInferNetworkInfo network_info;
} NvDsInferTensorMeta;
/**
* Holds the segmentation model output information for one frame / one object.
*
* The "nvinfer" plugins adds this meta for segmentation models.
*
* This meta data is added as NvDsUserMeta to the frame_user_meta_list of the
* corresponding frame_meta or object_user_meta_list of the corresponding object
* with the meta_type set to NVDSINFER_SEGMENTATION_META.
*/
typedef struct
{
/** Number of classes in the segmentation output. */
guint classes;
/** Width of the segmentation output class map. */
guint width;
/** Height of the segmentation output class map. */
guint height;
/** Pointer to the array for 2D pixel class map. The output for pixel (x,y)
* will be at index (y * width + x). */
gint* class_map;
/** Pointer to the raw array containing the probabilities. The probability for
* class c and pixel (x,y) will be at index (c * width *height + y * width + x). */
gfloat *class_probabilities_map;
/** Private data used for the meta producer's internal memory management. */
void *priv_data;
} NvDsInferSegmentationMeta;
G_END_DECLS
/** @} */

View File

@ -0,0 +1,178 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA GStreamer DeepStream: Metadata Extension</b>
*
* @b Description: This file defines the Metadata structure used to
* carry DeepStream metadata or any other metadata in GStreamer pipeline.
*/
/**
* @defgroup gstreamer_metagroup_api DeepStream Metadata Extension
*
* Defines an API for managing GStreamer DeepStream metadata.
* @ingroup custom_gstreamer
* @{
*
* DeepStream Metadata is attached to a buffer with gst_buffer_add_nvds_meta().
* Its metadata type is set to @ref NVDS_BATCH_GST_META.
*
* Multiple groups of metadata may be attached by different elements.
* gst_buffer_get_nvds_meta() gets the last added @ref NvDsMeta.
*
* The NvDsMeta::meta_data structure member must be cast
* to a meaningful structure pointer based on the @a meta_type.
* For example, for @a meta_type = NVDS_BATCH_GST_META,
* @a meta_data must be cast as `(NvDsBatchMeta *)`.
*/
#ifndef GST_NVDS_META_API_H
#define GST_NVDS_META_API_H
#include <gst/gst.h>
#include <gst/video/video.h>
#include <gst/base/gstbasetransform.h>
#include "nvdsmeta.h"
#include "nvds_latency_meta.h"
#ifdef __cplusplus
extern "C"
{
#endif
GType nvds_meta_api_get_type (void);
#define NVDS_META_API_TYPE (nvds_meta_api_get_type())
const GstMetaInfo *nvds_meta_get_info (void);
#define NVDS_META_INFO (nvds_meta_get_info())
#define NVDS_META_STRING "nvdsmeta"
/**
* Defines the type of metadata. NVIDIA-defined %GstNvDsMetaType values are
* in the range from @ref NVDS_BATCH_GST_META to @ref NVDS_START_USER_META.
*/
typedef enum {
NVDS_GST_INVALID_META=-1,
/* Specifies information of a formed batch. */
NVDS_BATCH_GST_META = NVDS_GST_CUSTOM_META + 1,
NVDS_DECODER_GST_META,
/* Specifies information of dewarped surfaces. */
NVDS_DEWARPER_GST_META,
NVDS_RESERVED_GST_META = NVDS_GST_CUSTOM_META + 4096,
/* Specifies the first value that may be assigned to a user-defined type. */
NVDS_GST_META_FORCE32 = 0x7FFFFFFF
} GstNvDsMetaType;
/**
* Holds DeepSteam metadata.
* */
typedef struct _NvDsMeta {
GstMeta meta;
/** Holds a pointer to metadata. Must be cast to another structure based
on @a meta_type. */
gpointer meta_data;
/** Holds a pointer to user-specific data . */
gpointer user_data;
/** Holds the type of metadata, one of values of enum @ref GstNvDsMetaType. */
gint meta_type;
/** A callback to be called when @a meta_data is to be copied or transformed
from one buffer to other. @a meta_data and @a user_data are passed
as arguments. */
NvDsMetaCopyFunc copyfunc;
/** A callback to be called when @a meta_data is to be destroyed.
@a meta_data and @a user_data are passed as arguments. */
NvDsMetaReleaseFunc freefunc;
/**
* A callback to be called when @a meta_data is transformed into
* @a NvDsUserMeta.
* This function must be provided by a GStreamer plugin that precedes
* @a Gst-nvstreammux in the DeepStream pipeline.
* Gst-nvstreammux copies @a meta_data to
* user meta data at frame level inside @ref NvDsFrameMeta.
* @a meta_data and @a user_data are passed as arguments.
*
* To retrive the content of @a meta_data, iterate over
* @ref NvDsFrameMetaList. Then search for @a meta_type of @ref NvDsUserMeta
* which the user has attached. (See deepstream-gst-metadata-test source
* code for more details.)
*
* @a meta_data and @a user_data are passed as arguments.
*/
NvDsMetaCopyFunc gst_to_nvds_meta_transform_func;
/**
* A callback to be called when @a meta_data transformed into
* @a NvDsUserMeta is to be destroyed.
* This function must be provided by a GStreamer plugin that precedes
* @a Gst-nvstreammux in the DeepStream pipeline.
*/
NvDsMetaReleaseFunc gst_to_nvds_meta_release_func;
} NvDsMeta;
/**
* Adds %GstMeta of type @ref NvDsMeta to the GstBuffer and sets the @a meta_data
* member of @ref NvDsMeta.
*
* @param[in] buffer A pointer to a %GstBuffer to which the function adds
* metadata.
* @param[in] meta_data A pointer at which the function sets the @a meta_data
* member of @ref NvDsMeta.
* @param[in] user_data A pointer to the user-specific data.
* @param[in] copy_func The callback to be called when
* NvDsMeta is to be copied. The function is called with
* @a meta_data and @a user_data as parameters.
* @param[in] release_func
* The callback to be called when
* NvDsMeta is to be destroyed. The function is called with
* @a meta_data and @a user_data as parameters.
*
* @return A pointer to the attached NvDsMeta structure if successful,
* or NULL otherwise.
*/
NvDsMeta *gst_buffer_add_nvds_meta (GstBuffer *buffer, gpointer meta_data,
gpointer user_data, NvDsMetaCopyFunc copy_func,
NvDsMetaReleaseFunc release_func);
/**
* Gets the @ref NvDsMeta last added to a GstBuffer.
*
* @param[in] buffer A pointer to the GstBuffer.
*
* @return A pointer to the last added NvDsMeta structure, or NULL if no
* %NvDsMeta was attached.
*/
NvDsMeta* gst_buffer_get_nvds_meta (GstBuffer *buffer);
/**
* Gets the @ref NvDsBatchMeta added to a GstBuffer.
*
* @param[in] buffer A pointer to the GstBuffer.
*
* @return A pointer to the NvDsBatchMeta structure, or NULL if no
* NvDsMeta was attached.
*/
NvDsBatchMeta * gst_buffer_get_nvds_batch_meta (GstBuffer *buffer);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

Binary file not shown.

View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file nvbufsurface.h
* <b>NvBufSurface Interface </b>
*
* This file specifies the NvBufSurface management API.
*
* The NvBufSurface API provides methods to allocate / deallocate, map / unmap
* and copy batched buffers.
*/
/**
* @defgroup ds_nvbuf_api Buffer Management API module
*
* This section describes types and functions of NvBufSurface application
* programming interface.
*
*/
#ifndef NVBUFSURFACE_H_
#define NVBUFSURFACE_H_
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/** @defgroup ds_aaa NvBufSurface Types and Functions
* Defines types and functions of \ref NvBufSurface application
* programming interface.
* @ingroup ds_nvbuf_api
* @{ */
/** Defines the default padding length for reserved fields of structures. */
#define STRUCTURE_PADDING 4
/** Defines the maximum number of planes. */
#define NVBUF_MAX_PLANES 4
/**
* Specifies mapping types for \ref NvBufSurface.
*/
typedef enum
{
NVBUF_MAP_READ, /**< Specifies \ref NvBufSurface mapping type "read." */
NVBUF_MAP_WRITE, /**< Specifies \ref NvBufSurface mapping type
"write." */
NVBUF_MAP_READ_WRITE, /**< Specifies \ref NvBufSurface mapping type
"read/write." */
} NvBufSurfaceMemMapFlags;
/**
* Specifies color formats for \ref NvBufSurface.
*/
typedef enum
{
/** Specifies an invalid color format. */
NVBUF_COLOR_FORMAT_INVALID,
/** Specifies 8 bit GRAY scale - single plane */
NVBUF_COLOR_FORMAT_GRAY8,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420,
/** Specifies BT.601 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YVU420,
/** Specifies BT.601 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_ER,
/** Specifies BT.601 colorspace - YVU420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YVU420_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_ER,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV21_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_UYVY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_VYUY_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YUYV_ER,
/** Specifies BT.601 colorspace - YUV 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU,
/** Specifies BT.601 colorspace - YUV ER 4:2:2 planar. */
NVBUF_COLOR_FORMAT_YVYU_ER,
/** Specifies BT.601 colorspace - YUV444 multi-planar. */
NVBUF_COLOR_FORMAT_YUV444,
/** Specifies RGBA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBA,
/** Specifies BGRA-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRA,
/** Specifies ARGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ARGB,
/** Specifies ABGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_ABGR,
/** Specifies RGBx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGBx,
/** Specifies BGRx-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGRx,
/** Specifies xRGB-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xRGB,
/** Specifies xBGR-8-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_xBGR,
/** Specifies RGB-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_RGB,
/** Specifies BGR-8-8-8 single plane. */
NVBUF_COLOR_FORMAT_BGR,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE,
/** Specifies BT.601 colorspace - Y/CbCr 4:2:0 12-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_12LE,
/** Specifies BT.709 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709,
/** Specifies BT.709 colorspace - YUV420 ER multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_709_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_709_ER,
/** Specifies BT.2020 colorspace - YUV420 multi-planar. */
NVBUF_COLOR_FORMAT_YUV420_2020,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 multi-planar. */
NVBUF_COLOR_FORMAT_NV12_2020,
/** Specifies BT.601 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_ER,
/** Specifies BT.709 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709,
/** Specifies BT.709 colorspace - Y/CbCr ER 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_709_ER,
/** Specifies BT.2020 colorspace - Y/CbCr 4:2:0 10-bit multi-planar. */
NVBUF_COLOR_FORMAT_NV12_10LE_2020,
/** Specifies color format for packed 2 signed shorts */
NVBUF_COLOR_FORMAT_SIGNED_R16G16,
NVBUF_COLOR_FORMAT_LAST
} NvBufSurfaceColorFormat;
/**
* Specifies layout formats for \ref NvBufSurface video planes.
*/
typedef enum
{
/** Specifies pitch layout. */
NVBUF_LAYOUT_PITCH,
/** Specifies block linear layout. */
NVBUF_LAYOUT_BLOCK_LINEAR,
} NvBufSurfaceLayout;
/**
* Specifies memory types for \ref NvBufSurface.
*/
typedef enum
{
/** Specifies the default memory type, i.e. \ref NVBUF_MEM_CUDA_DEVICE
for dGPU, \ref NVBUF_MEM_SURFACE_ARRAY for Jetson. Use \ref NVBUF_MEM_DEFAULT
to allocate whichever type of memory is appropriate for the platform. */
NVBUF_MEM_DEFAULT,
/** Specifies CUDA Host memory type. */
NVBUF_MEM_CUDA_PINNED,
/** Specifies CUDA Device memory type. */
NVBUF_MEM_CUDA_DEVICE,
/** Specifies CUDA Unified memory type. */
NVBUF_MEM_CUDA_UNIFIED,
/** Specifies NVRM Surface Array type. Valid only for Jetson. */
NVBUF_MEM_SURFACE_ARRAY,
/** Specifies NVRM Handle type. Valid only for Jetson. */
NVBUF_MEM_HANDLE,
/** Specifies memory allocated by malloc(). */
NVBUF_MEM_SYSTEM,
} NvBufSurfaceMemType;
/**
* Holds the planewise parameters of a buffer.
*/
typedef struct NvBufSurfacePlaneParams
{
/** Holds the number of planes. */
uint32_t num_planes;
/** Holds the widths of planes. */
uint32_t width[NVBUF_MAX_PLANES];
/** Holds the heights of planes. */
uint32_t height[NVBUF_MAX_PLANES];
/** Holds the pitches of planes in bytes. */
uint32_t pitch[NVBUF_MAX_PLANES];
/** Holds the offsets of planes in bytes. */
uint32_t offset[NVBUF_MAX_PLANES];
/** Holds the sizes of planes in bytes. */
uint32_t psize[NVBUF_MAX_PLANES];
/** Holds the number of bytes occupied by a pixel in each plane. */
uint32_t bytesPerPix[NVBUF_MAX_PLANES];
void * _reserved[STRUCTURE_PADDING * NVBUF_MAX_PLANES];
} NvBufSurfacePlaneParams;
/**
* Holds parameters required to allocate an \ref NvBufSurface.
*/
typedef struct NvBufSurfaceCreateParams {
/** Holds the GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the amount of memory to be allocated. Optional; if set, all other
parameters (width, height, etc.) are ignored. */
uint32_t size;
/** Holds a "contiguous memory" flag. If set, contiguous memory is allocated
for the batch. Valid only for CUDA memory types. */
bool isContiguous;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds the surface layout. May be Block Linear (BL) or Pitch Linear (PL).
For a dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds the type of memory to be allocated. */
NvBufSurfaceMemType memType;
} NvBufSurfaceCreateParams;
/**
* Holds pointers for a mapped buffer.
*/
typedef struct NvBufSurfaceMappedAddr {
/** Holds planewise pointers to a CPU mapped buffer. */
void * addr[NVBUF_MAX_PLANES];
/** Holds a pointer to a mapped EGLImage. */
void *eglImage;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceMappedAddr;
/**
* Holds information about a single buffer in a batch.
*/
typedef struct NvBufSurfaceParams {
/** Holds the width of the buffer. */
uint32_t width;
/** Holds the height of the buffer. */
uint32_t height;
/** Holds the pitch of the buffer. */
uint32_t pitch;
/** Holds the color format of the buffer. */
NvBufSurfaceColorFormat colorFormat;
/** Holds BL or PL. For dGPU, only PL is valid. */
NvBufSurfaceLayout layout;
/** Holds a DMABUF FD. Valid only for \ref NVBUF_MEM_SURFACE_ARRAY and
\ref NVBUF_MEM_HANDLE type memory. */
uint64_t bufferDesc;
/** Holds the amount of allocated memory. */
uint32_t dataSize;
/** Holds a pointer to allocated memory. Not valid for
\ref NVBUF_MEM_SURFACE_ARRAY or \ref NVBUF_MEM_HANDLE. */
void * dataPtr;
/** Holds planewise information (width, height, pitch, offset, etc.). */
NvBufSurfacePlaneParams planeParams;
/** Holds pointers to mapped buffers. Initialized to NULL
when the structure is created. */
NvBufSurfaceMappedAddr mappedAddr;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurfaceParams;
/**
* Holds information about batched buffers.
*/
typedef struct NvBufSurface {
/** Holds a GPU ID. Valid only for a multi-GPU system. */
uint32_t gpuId;
/** Holds the batch size. */
uint32_t batchSize;
/** Holds the number valid and filled buffers. Initialized to zero when
an instance of the structure is created. */
uint32_t numFilled;
/** Holds an "is contiguous" flag. If set, memory allocated for the batch
is contiguous. */
bool isContiguous;
/** Holds type of memory for buffers in the batch. */
NvBufSurfaceMemType memType;
/** Holds a pointer to an array of batched buffers. */
NvBufSurfaceParams *surfaceList;
void * _reserved[STRUCTURE_PADDING];
} NvBufSurface;
/**
* \brief Allocates a batch of buffers.
*
* Allocates memory for \a batchSize buffers and returns a pointer to an
* allocated \ref NvBufSurface. The \a params structure must have
* the allocation parameters of a single buffer. If \a params.size
* is set, a buffer of that size is allocated, and all other
* parameters (width, height, color format, etc.) are ignored.
*
* Call NvBufSurfaceDestroy() to free resources allocated by this function.
*
* @param[out] surf An indirect pointer to the allocated batched
* buffers.
* @param[in] batchSize Batch size of buffers.
* @param[in] params A pointer to an \ref NvBufSurfaceCreateParams
* structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCreate (NvBufSurface **surf, uint32_t batchSize,
NvBufSurfaceCreateParams *params);
/**
* \brief Frees batched buffers previously allocated by NvBufSurfaceCreate().
*
* @param[in] surf A pointer to an \ref NvBufSurface to be freed.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceDestroy (NvBufSurface *surf);
/**
* \brief Maps hardware batched buffers to the HOST or CPU address space.
*
* Valid for \ref NVBUF_MEM_CUDA_UNIFIED type memory for dGPU and
* \ref NVBUF_MEM_SURFACE_ARRAY and \ref NVBUF_MEM_HANDLE type memory for
* Jetson.
*
* This function fills an array of pointers at
* \a surf->surfaceList->mappedAddr->addr.
* \a surf is a pointer to an \ref NvBufSurface.
* \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a addr is declared as an array of pointers to void, and holds pointers
* to the buffers.
*
* The client must call NvBufSurfaceSyncForCpu() with the virtual address
* populated by this function before accessing mapped memory in the CPU.
*
* After memory mapping is complete, mapped memory modification
* must be coordinated between the CPU and the hardware device as
* follows:
* - CPU: If the CPU modifies mapped memory, the client must call
* NvBufSurfaceSyncForDevice() before any hardware device accesses the memory.
* - Hardware device: If a hardware device modifies mapped memory, the client
* must call NvBufSurfaceSyncForCpu() before the CPU accesses the memory.
*
* Use NvBufSurfaceUnMap() to unmap buffer(s) and release any resource.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores pointers to the buffers in a descendant of this
* structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in buffer. -1 refers to all planes
* in the buffer.
* @param[in] type A flag for mapping type.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMap (NvBufSurface *surf, int index, int plane, NvBufSurfaceMemMapFlags type);
/**
* \brief Unmaps previously mapped buffer(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 indicates
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 indicates
* all planes in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMap (NvBufSurface *surf, int index, int plane);
/**
* \brief Copies the content of source batched buffer(s) to destination
* batched buffer(s).
*
* You can use this function to copy source buffer(s) of one memory type
* to destination buffer(s) of another memory type,
* e.g. CUDA host to CUDA device, malloc'ed memory to CUDA device, etc.
*
* The source and destination \ref NvBufSurface objects must have same
* buffer and batch size.
*
* @param[in] srcSurf A pointer to the source NvBufSurface structure.
* @param[in] dstSurf A pointer to the destination NvBufSurface structure.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceCopy (NvBufSurface *srcSurf, NvBufSurface *dstSurf);
/**
* \brief Syncs the hardware memory cache for the CPU.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of the buffer in the batch. -1 refers to
* all buffers in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForCpu (NvBufSurface *surf, int index, int plane);
/**
* \brief Syncs the hardware memory cache for the device.
*
* Valid only for memory types \ref NVBUF_MEM_SURFACE_ARRAY and
* \ref NVBUF_MEM_HANDLE.
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceSyncForDevice (NvBufSurface *surf, int index, int plane);
/**
* \brief Gets the \ref NvBufSurface from the DMABUF FD.
*
* @param[in] dmabuf_fd DMABUF FD of the buffer.
* @param[out] buffer A pointer to the NvBufSurface.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceFromFd (int dmabuf_fd, void **buffer);
/**
* \brief Fills each byte of the buffer(s) in an \ref NvBufSurface with a
* provided value.
*
* You can also use this function to reset the buffer(s) in the batch.
*
* @param[in] surf A pointer to the NvBufSurface structure.
* @param[in] index Index of a buffer in the batch. -1 refers to all buffers
* in the batch.
* @param[in] plane Index of a plane in the buffer. -1 refers to all planes
* in the buffer.
* @param[in] value The value to be used as fill.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceMemSet (NvBufSurface *surf, int index, int plane, uint8_t value);
/**
* \brief Creates an EGLImage from the memory of one or more
* \ref NvBufSurface buffers.
*
* Only memory type \ref NVBUF_MEM_SURFACE_ARRAY is supported.
*
* This function returns the created EGLImage by storing its address at
* \a surf->surfaceList->mappedAddr->eglImage. (\a surf is a pointer to
* an NvBufSurface. \a surfaceList is a pointer to an \ref NvBufSurfaceParams.
* \a mappedAddr is a pointer to an \ref NvBufSurfaceMappedAddr.
* \a eglImage is declared as a pointer to void, and holds an
* EGLImageKHR.)
*
* You can use this function in scenarios where a CUDA operation on Jetson
* hardware memory (identified by \ref NVBUF_MEM_SURFACE_ARRAY) is required.
* The EGLImageKHR struct provided by this function can then be registered
* with CUDA for further CUDA operations.
*
* @param[in,out] surf A pointer to an NvBufSurface structure. The function
* stores a pointer to the created EGLImage in
* a descendant of this structure; see the notes above.
* @param[in] index Index of a buffer in the batch. -1 specifies all buffers
* in the batch.
*
* @return 0 for success, or -1 otherwise.
*/
int NvBufSurfaceMapEglImage (NvBufSurface *surf, int index);
/**
* \brief Destroys the previously created EGLImage object(s).
*
* @param[in] surf A pointer to an \ref NvBufSurface structure.
* @param[in] index The index of a buffer in the batch. -1 specifies all
* buffers in the batch.
*
* @return 0 if successful, or -1 otherwise.
*/
int NvBufSurfaceUnMapEglImage (NvBufSurface *surf, int index);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* NVBUFSURFACE_H_ */

View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvbufsurftransform.h
* <b>NvBufSurfTransform Interface </b>
*
* This file specifies the NvBufSurfTransform image transformation APIs.
*
* The NvBufSurfTransform API provides methods to set and get session parameters
* and to transform and composite APIs.
*/
#ifndef NVBUFSURFTRANSFORM_H_
#define NVBUFSURFTRANSFORM_H_
#include <cuda.h>
#include <cuda_runtime.h>
#include "nvbufsurface.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @defgroup ds_bbb NvBufSurfTransform Types and Functions
* Defines types and functions of the \ref NvBufSurfTransform
* application programming interface.
* @ingroup ds_nvbuf_api
* @{ */
/**
* Specifies compute devices used by \ref NvBufSurfTransform.
*/
typedef enum
{
/** Specifies VIC as a compute device for Jetson or dGPU for an x86_64
system. */
NvBufSurfTransformCompute_Default,
/** Specifies that the GPU is the compute device. */
NvBufSurfTransformCompute_GPU,
/** Specifies that the VIC as a compute device. Supported only for Jetson. */
NvBufSurfTransformCompute_VIC
} NvBufSurfTransform_Compute;
/**
* Specifies video flip methods. Supported only for Jetson.
*/
typedef enum
{
/** Specifies no video flip. */
NvBufSurfTransform_None,
/** Specifies rotating 90 degrees clockwise. */
NvBufSurfTransform_Rotate90,
/** Specifies rotating 180 degree clockwise. */
NvBufSurfTransform_Rotate180,
/** Specifies rotating 270 degree clockwise. */
NvBufSurfTransform_Rotate270,
/** Specifies video flip with respect to the X-axis. */
NvBufSurfTransform_FlipX,
/** Specifies video flip with respect to the Y-axis. */
NvBufSurfTransform_FlipY,
/** Specifies video flip transpose. */
NvBufSurfTransform_Transpose,
/** Specifies video flip inverse transpose. */
NvBufSurfTransform_InvTranspose,
} NvBufSurfTransform_Flip;
/**
* Specifies video interpolation methods.
*/
typedef enum
{
/** Specifies Nearest Interpolation Method interpolation. */
NvBufSurfTransformInter_Nearest = 0,
/** Specifies Bilinear Interpolation Method interpolation. */
NvBufSurfTransformInter_Bilinear,
/** Specifies GPU-Cubic, VIC-5 Tap interpolation. */
NvBufSurfTransformInter_Algo1,
/** Specifies GPU-Super, VIC-10 Tap interpolation. */
NvBufSurfTransformInter_Algo2,
/** Specifies GPU-Lanzos, VIC-Smart interpolation. */
NvBufSurfTransformInter_Algo3,
/** Specifies GPU-Ignored, VIC-Nicest interpolation. */
NvBufSurfTransformInter_Algo4,
/** Specifies GPU-Nearest, VIC-Nearest interpolation. */
NvBufSurfTransformInter_Default
} NvBufSurfTransform_Inter;
/**
* Specifies error codes returned by \ref NvBufSurfTransform functions.
*/
typedef enum
{
/** Specifies an error in source or destination ROI. */
NvBufSurfTransformError_ROI_Error = -4,
/** Specifies invalid input parameters. */
NvBufSurfTransformError_Invalid_Params = -3,
/** Specifies a runtime execution error. */
NvBufSurfTransformError_Execution_Error = -2,
/** Specifies an unsupported feature or format. */
NvBufSurfTransformError_Unsupported = -1,
/** Specifies a successful operation. */
NvBufSurfTransformError_Success = 0
} NvBufSurfTransform_Error;
/**
* Specifies transform types.
*/
typedef enum {
/** Specifies a transform to crop the source rectangle. */
NVBUFSURF_TRANSFORM_CROP_SRC = 1,
/** Specifies a transform to crop the destination rectangle. */
NVBUFSURF_TRANSFORM_CROP_DST = 1 << 1,
/** Specifies a transform to set the filter type. */
NVBUFSURF_TRANSFORM_FILTER = 1 << 2,
/** Specifies a transform to set the flip method. */
NVBUFSURF_TRANSFORM_FLIP = 1 << 3,
} NvBufSurfTransform_Transform_Flag;
/**
* Specifies types of composition operations.
*/
typedef enum {
/** Specifies a flag to describe the requested compositing operation. */
NVBUFSURF_TRANSFORM_COMPOSITE = 1,
} NvBufSurfTransform_Composite_Flag;
/**
* Holds the coordinates of a rectangle.
*/
typedef struct
{
/** Holds the rectangle top. */
uint32_t top;
/** Holds the rectangle left side. */
uint32_t left;
/** Holds the rectangle width. */
uint32_t width;
/** Holds the rectangle height. */
uint32_t height;
}NvBufSurfTransformRect;
/**
* Holds configuration parameters for a transform/composite session.
*/
typedef struct _NvBufSurfTransformConfigParams
{
/** Holds the mode of operation: VIC (Jetson) or GPU (iGPU + dGPU)
If VIC is configured, \a gpu_id is ignored. */
NvBufSurfTransform_Compute compute_mode;
/** Holds the GPU ID to be used for processing. */
int32_t gpu_id;
/** User configure stream to be used. If NULL, the default stream is used.
Ignored if VIC is used. */
cudaStream_t cuda_stream;
} NvBufSurfTransformConfigParams;
/**
* Holds transform parameters for a transform call.
*/
typedef struct _NvBufSurfaceTransformParams
{
/** Holds a flag that indicates which transform parameters are valid. */
uint32_t transform_flag;
/** Holds the flip method. */
NvBufSurfTransform_Flip transform_flip;
/** Holds a transform filter. */
NvBufSurfTransform_Inter transform_filter;
/** Holds a pointer to a list of source rectangle coordinates for
a crop operation. */
NvBufSurfTransformRect *src_rect;
/** Holds a pointer to list of destination rectangle coordinates for
a crop operation. */
NvBufSurfTransformRect *dst_rect;
}NvBufSurfTransformParams;
/**
* Holds composite parameters for a composite call.
*/
typedef struct _NvBufSurfTransformCompositeParams
{
/** Holds a flag that indicates which composition parameters are valid. */
uint32_t composite_flag;
/** Holds the number of input buffers to be composited. */
uint32_t input_buf_count;
/** Holds source rectangle coordinates of input buffers for compositing. */
NvBufSurfTransformRect *src_comp_rect;
/** Holds destination rectangle coordinates of input buffers for
compositing. */
NvBufSurfTransformRect *dst_comp_rect;
}NvBufSurfTransformCompositeParams;
/**
* \brief Sets user-defined session parameters.
*
* If user-defined session parameters are set, they override the
* NvBufSurfTransform() function's default session.
*
* @param[in] config_params A pointer to a structure that is populated
* with the session parameters to be used.
*
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformSetSessionParams
(NvBufSurfTransformConfigParams *config_params);
/**
* \brief Gets the session parameters used by NvBufSurfTransform().
*
* @param[out] config_params A pointer to a caller-allocated structure to be
* populated with the session parameters used.
*
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformGetSessionParams
(NvBufSurfTransformConfigParams *config_params);
/**
* \brief Performs a transformation on batched input images.
*
* If user-defined session parameters are to be used, call
* NvBufSurfTransformSetSessionParams() before calling this function.
*
* @param[in] src A pointer to input batched buffers to be transformed.
* @param[out] dst A pointer to a caller-allocated location where
* transformed output is to be stored.
* @par When destination cropping is performed, memory outside
* the crop location is not touched, and may contain stale
* information. The caller must perform a memset before
* calling this function if stale information must be
* eliminated.
* @param[in] transform_params
* A pointer to an \ref NvBufSurfTransformParams structure
* which specifies the type of transform to be performed. They
* may include any combination of scaling, format conversion,
* and cropping for both source and destination.
* Flipping and rotation are supported on VIC.
* @return An \ref NvBufSurfTransform_Error value indicating
* success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransform (NvBufSurface *src, NvBufSurface *dst,
NvBufSurfTransformParams *transform_params);
/**
* \brief Composites batched input images.
*
* The compositer scales and stitches
* batched buffers indicated by \a src into a single destination buffer, \a dst.
*
* If user-defined session parameters are to be used, call
* NvBufSurfTransformSetSessionParams() before calling this function.
*
* @param[in] src A pointer to input batched buffers to be transformed.
* @param[out] dst A pointer a caller-allocated location (a single buffer)
* where composited output is to be stored.
* @param[in] composite_params
* A pointer to an \ref NvBufSurfTransformCompositeParams
* structure which specifies the compositing operation to be
* performed, e.g., the source and destination rectangles
* in \a src and \a dst.
* @return An \ref NvBufSurfTransform_Error value indicating success or failure.
*/
NvBufSurfTransform_Error NvBufSurfTransformComposite (NvBufSurface *src,
NvBufSurface *dst, NvBufSurfTransformCompositeParams *composite_params);
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines analytics metadata </b>
*
* @b Description: This file defines metadata concerning nvdsanalytics plugin.
*/
/**
* @defgroup NvDsMetaApi Metadata APIs
*
* This section describes types and functions of Metadata APIs
* programming interface.
*
*/
#ifndef _NVDS_ANALYTICS_META_H_
#define _NVDS_ANALYTICS_META_H_
#include <gst/gst.h>
#include <vector>
#include <unordered_map>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @defgroup ee_analytics_group Analytics Metadata
* Defines metadata concerning nvdsanalytics plugin.
* @ingroup NvDsMetaApi
* @{
*/
#define NVDS_USER_FRAME_META_NVDSANALYTICS (nvds_get_user_meta_type((gchar*)"NVIDIA.DSANALYTICSFRAME.USER_META"))
#define NVDS_USER_OBJ_META_NVDSANALYTICS (nvds_get_user_meta_type((gchar*)"NVIDIA.DSANALYTICSOBJ.USER_META"))
/**
* Holds a set of nvdsanalytics object level metadata.
*/
typedef struct
{
/** Holds the array of ROI labels in which object is present */
std::vector <std::string> roiStatus;
/** Holds the array of OverCrowding labels in which object is present */
std::vector <std::string> ocStatus;
/** Holds the array of line crossing labels which object has crossed */
std::vector <std::string> lcStatus;
/** Holds the direction string for the tracked object */
std::string dirStatus;
/** Holds unique identifier for nvdsanalytics instance */
guint unique_id;
} NvDsAnalyticsObjInfo;
/**
* Holds a set of nvdsanalytics framelevel metadata.
*/
typedef struct
{
/** Holds a map of boolean status of overcrowding for configured ROIs,
* which can be accessed using key, value pair; where key is the ROI label
*/
std::unordered_map<std::string, bool> ocStatus;
/** Holds a map of total count of valid objects in ROI for configured ROIs,
* which can be accessed using key, value pair; where key is the ROI label
*/
std::unordered_map<std::string, uint32_t> objInROIcnt;
/** Holds a map of total count of Line crossing in current frame for configured lines,
* which can be accessed using key, value pair; where key is the line crossing label
*/
std::unordered_map<std::string, uint64_t> objLCCurrCnt;
/** Holds a map of total cumulative count of Line crossing for configured lines,
* can be accessed using key, value pair; where key is the line crossing label
*/
std::unordered_map<std::string, uint64_t> objLCCumCnt;
/** Holds unique identifier for nvdsanalytics instance */
guint unique_id;
/** Holds a map of total count of objects for each class ID,
* can be accessed using key, value pair; where key is class ID
*/
std::unordered_map<int, uint32_t> objCnt;
} NvDsAnalyticsFrameMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines dewarping metadata </b>
*
* @b Description: This file defines metadata concerning dewarping.
*/
/**
* @defgroup ee_dewarping_group Dewarping Metadata
* Defines metadata concerning dewarping.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_DEWARPER_META_H_
#define _NVDS_DEWARPER_META_H_
#include <gst/gst.h>
#ifdef __cplusplus
extern "C"
{
#endif
/*
* Defines DeepStream Dewarper metadata.
*/
/**
* Maximum number of dewarped surfaces per frame supported
*/
#define MAX_DEWARPED_VIEWS 4
/**
* Defines metadata surface types.
*/
typedef enum
{
NVDS_META_SURFACE_NONE=0,
/** Defines the pushbroom surface type. */
NVDS_META_SURFACE_FISH_PUSHBROOM=1,
/** Defines the vertical radical cylindrical surface type. */
NVDS_META_SURFACE_FISH_VERTCYL=2,
/** Defines the perspective projection with Brown distortion model surface. */
NVDS_META_SURFACE_PERSPECTIVE_PERSPECTIVE=3,
} NvDsSurfaceType;
/**
* Holds a set of dewarper metadata.
*/
typedef struct _NvDewarperSurfaceMeta {
/** Holds an array of the types of dewarped surfaces. */
guint type[MAX_DEWARPED_VIEWS];
/** Holds an array of indices of dewarped surfaces. */
guint index[MAX_DEWARPED_VIEWS];
/** Holds the source ID of the frame, e.g. the camera ID. */
guint source_id;
/** Holds the number of filled surfaces in the frame. */
guint num_filled_surfaces;
}NvDewarperSurfaceMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines Latency API</b>
*
* @b Description: This file defines an API for measuring module latency.
*/
/**
* @defgroup ee_nvlatency_group Latency Measurement API
* Defines an API for measuring latency in modules.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDSMETA_LATENCY_H_
#define _NVDSMETA_LATENCY_H_
#include "glib.h"
#include "gmodule.h"
#include "nvdsmeta.h"
#define MAX_COMPONENT_LEN 64
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds information about latency of the given component
*/
typedef struct _NvDsMetaCompLatency {
/** Holds the name of the component for which latency is measured. */
gchar component_name[MAX_COMPONENT_LEN];
/** Holds the system timestamp of the buffer when it arrives
at the input of the component. */
gdouble in_system_timestamp;
/** Holds the system timestamp of buffer when it
is sent to the downstream component. */
gdouble out_system_timestamp;
/** Holds the source ID of the component, e.g. the camera ID. */
guint source_id;
/** Holds the current frame number for which latency is measured. */
guint frame_num;
/** Holds the pad or port index of the stream muxer for the frame
in the batch. */
guint pad_index;
}NvDsMetaCompLatency;
/**
* Holds information about the latency of a given frame.
*/
typedef struct
{
/** Holds the source ID of the component, e.g. the camera ID. */
guint source_id;
/** Holds the current frame number for which latency is measured. */
guint frame_num;
/** Holds the system timestamp of the buffer when it arrives
at the input of the first component in the pipeline. By default,
the decoder is considered to be the first component in the pipeline. */
gdouble comp_in_timestamp;
/** Holds the latency of the frame in milliseconds. */
gdouble latency;
} NvDsFrameLatencyInfo;
/**
* Sets the system timestamp when the Gst Buffer arrives as input at the
* component.
*
* @param[in] buffer A pointer to the arriving Gst Buffer.
* @param[in] element_name A pointer to the name of the component for which
* latency is to be measured.
*
* @returns A pointer to a @ref NvDsUserMeta structure which holds an
* @ref NvDsMetaCompLatency as @a NvDsUserMeta.user_meta_data.
*/
NvDsUserMeta *nvds_set_input_system_timestamp(GstBuffer * buffer,
gchar *element_name);
/**
* \brief Sets the system timestamp when a Gst Buffer that is pushed
* to the downstream component.
*
* This is a corresponding function to nvds_set_input_system_timestamp().
*
* @param[in] buffer A pointer to a Gst Buffer to be pushed
* to the downstream component.
* @param[in] element_name A pointer to the name of the component for which
* latency is to be measured.
*
* @returns TRUE if the timestamp is attached successfully, or FALSE otherwise.
*/
gboolean nvds_set_output_system_timestamp(GstBuffer * buffer, gchar *element_name);
/**
* \brief Measures the latency of all frames present in the current batch.
*
* The latency is computed from decoder input up to the point this API is called.
* You can install the probe on either pad of the component and call
* this function to measure the latency.
*
* @param[in] buf A pointer to a Gst Buffer to which
* @ref NvDsBatchMeta is attached as metadata.
* @param[out] latency_info A pointer to an NvDsFrameLatencyInfo structure
* allocated for a batch of this size. The function
* fills it with information about all of the sources.
*/
guint nvds_measure_buffer_latency(GstBuffer *buf,
NvDsFrameLatencyInfo *latency_info);
/**
* Indicates whether the environment variable
* @c NVDS_ENABLE_LATENCY_MEASUREMENT is exported.
*
* @returns True if the environment variable is exported, or false otherwise.
*/
gboolean nvds_get_enable_latency_measurement(void);
/**
* Defines a pseudo-variable whose value is the return value of
* @ref nvds_get_enable_latency_measurement(). It indicates whether latency
* measurement is enabled.
*/
#define nvds_enable_latency_measurement (nvds_get_enable_latency_measurement())
/** @} */
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018-2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream logging API</b>
*
* @b Description: This file defines an API for logging and debugging
* DeepStream applications.
*/
/**
* @defgroup ee_logging_group Logging API
*
* Defines an API for logging and debugging DeepStream applications.
*
* @ingroup NvDsLoggerApi
* @{
*/
#ifndef NVDS_LOGGER_H
#define NVDS_LOGGER_H
#include <syslog.h>
#define DSLOG_SYSLOG_IDENT "DSLOG"
//define additional categories here
#define DSLOG_CAT_CR "CR"
#define DSLOG_CAT_SG "SG"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Opens a connection to the logger.
*
* This function must be called once per DeepStream application execution,
* prior to use of the logger.
*/
void nvds_log_open();
/**
* Closes a connection to the logger.
*/
void nvds_log_close();
// Category is user (application) defined; priority is based on syslog levels
// data is message to be logged
/**
* Logs a message to a location determined by the setup script.
*
* @param[in] category A pointer to a string which specifies the category of
* this message. Categories are user-defined.
* @param[in] priority Severity of the event to be logged, based on syslog
* levels. For more information, see
* ./src/utils/nvds_logger/README relative to the
* directory that contains the DeepStream SDK.
*
* @param[in] data A pointer to a string containing the message. The
* message may contain the format specifiers recognized
* by %printf() in C/C++. @a data may be followeded by an
* arbitary number of parameters that supply values for
* the format specifiers.
*/
void nvds_log(const char *category, int priority, const char *data, ...);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream mask utils API</b>
*
* @b Description: This file specifies the APIs used to transform mask buffers
*/
#ifndef _NVDS_MAKS_UTILS_H_
#define _NVDS_MAKS_UTILS_H_
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* @brief resize FP32 Tensor and apply threshold to create INT32 binary tensor
* Output INT32 tensor pixels are assumed ARGB32
* For resized pixels > threshold, transparency is set to 50%
* and other pixels are set to 100% transparent; RGB = 0 for all pixels
* @param src [IN/OUT] source FP32 tensor
* @param dst [IN/OUT] dst INT32 (ARGB32) tensor
* @param src_width [IN] source FP32 tensor width
* @param src_height [IN] source FP32 tensor height
* @param dst_width [IN] dst INT32 (ARGB32) tensor width
* @param dst_height [IN] dst INT32 (ARGB32) tensor height
* @param channel [IN]
* @param threshold [IN]
* @param argb_32 [IN] The pixel value in dst when src pixel > threshold
* @param interpolation [IN] The NPP interpolation method to use
* Enumeration copied below:
* NPPI_INTER_NN =1, Nearest neighbor filtering.
* NPPI_INTER_LINEAR Linear interpolation.
* NPPI_INTER_CUBIC Cubic interpolation.
* NPPI_INTER_CUBIC2P_BSPLINE Two-parameter cubic filter (B=1, C=0)
* NPPI_INTER_CUBIC2P_CATMULLROM Two-parameter cubic filter (B=0, C=1/2)
* NPPI_INTER_CUBIC2P_B05C03 Two-parameter cubic filter (B=1/2, C=3/10)
* NPPI_INTER_SUPER Super sampling.
* NPPI_INTER_LANCZOS Lanczos filtering.
* NPPI_INTER_LANCZOS3_ADVANCED Generic Lanczos filtering with order 3.
* NPPI_SMOOTH_EDGE Smooth edge filtering.
* @param stream [IN] The cuda-stream to use for scaling operation on GPU
*/
bool nvds_mask_utils_resize_to_binary_argb32(float *src, uint32_t* dst,
uint32_t src_width, uint32_t src_height,
uint32_t dst_width, uint32_t dst_height,
uint32_t channel, float threshold,
uint32_t argb32_px, uint32_t interpolation,
cudaStream_t stream);
/**
* @brief resize FP32 Tensor and apply threshold to create INT8 binary tensor
* Output INT8 tensor pixels are assumed INT8
* For resized pixels > threshold, pixel = 1
* and other pixels are set to 0
* @param src [IN/OUT] source FP32 tensor
* @param dst [IN/OUT] dst INT8 (binary) tensor
* @param src_width [IN] source FP32 tensor width
* @param src_height [IN] source FP32 tensor height
* @param dst_width [IN] dst INT8 (binary) tensor width
* @param dst_height [IN] dst INT8 (binary) tensor height
* @param channel [IN]
* @param threshold [IN]
* @param interpolation [IN] The NPP interpolation method to use
* Enumeration copied below:
* NPPI_INTER_NN =1, Nearest neighbor filtering.
* NPPI_INTER_LINEAR Linear interpolation.
* NPPI_INTER_CUBIC Cubic interpolation.
* NPPI_INTER_CUBIC2P_BSPLINE Two-parameter cubic filter (B=1, C=0)
* NPPI_INTER_CUBIC2P_CATMULLROM Two-parameter cubic filter (B=0, C=1/2)
* NPPI_INTER_CUBIC2P_B05C03 Two-parameter cubic filter (B=1/2, C=3/10)
* NPPI_INTER_SUPER Super sampling.
* NPPI_INTER_LANCZOS Lanczos filtering.
* NPPI_INTER_LANCZOS3_ADVANCED Generic Lanczos filtering with order 3.
* NPPI_SMOOTH_EDGE Smooth edge filtering.
* @param stream [IN] The cuda-stream to use for scaling operation on GPU
*/
bool nvds_mask_utils_resize_to_binary_uint8(float *src, uint8_t* dst,
uint32_t src_width, uint32_t src_height,
uint32_t dst_width, uint32_t dst_height,
uint32_t channel, float threshold,
uint32_t interpolation,
cudaStream_t stream);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,224 @@
/*
* Copyright (c) 2018-2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>DeepStream Messaging API </b>
*
* @b Description: This file defines the DeepStream API for exchanging
* messages with remote entities and services.
*/
/**
* @defgroup ee_nvmessaging_group Messaging API
*
* Defines an API for exchanging messages with remote entities and services.
*
* @ingroup NvDsMsgApi
* @{
*/
#ifndef __NVDS_MSGAPI_H__
#define __NVDS_MSGAPI_H__
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
/** Defines the handle used by messaging API functions. */
typedef void *NvDsMsgApiHandle;
/**
* Defines events associated with connections to remote entities.
*/
typedef enum {
/** Specifies that a connection attempt was Successful*/
NVDS_MSGAPI_EVT_SUCCESS,
/** Specifies disconnection of a connection handle. */
NVDS_MSGAPI_EVT_DISCONNECT,
/** Specifies that the remote service is down. */
NVDS_MSGAPI_EVT_SERVICE_DOWN
} NvDsMsgApiEventType;
/**
* Defines completion codes for operations in the messaging API.
*/
typedef enum {
NVDS_MSGAPI_OK,
NVDS_MSGAPI_ERR,
NVDS_MSGAPI_UNKNOWN_TOPIC
} NvDsMsgApiErrorType;
/**
* Type definition for a "send" callback.
*
* @param[in] user_ptr A context pointer passed by async_send. The
* pointer may refer to any type of information
* that is useful to the callback.
* @param[in] completion_flag The completion code from a send operation.
*/
typedef void (*nvds_msgapi_send_cb_t)(void *user_ptr, NvDsMsgApiErrorType completion_flag);
/**
* @brief Type definition for callback registered during subscribe.
*
* This callback reports any event (success or error)
* during message consume
* If success, this callback reports the consumed message,
* on a subscribed topic
*
* @param[in] flag Message Consume Status
* @param[in] msg Received message/payload
* @param[in] msg_len Length of message/payload
* @param[in] topic Topic name where the message was received
* @param[in] user_ptr pointer passed during subscribe() for context
*/
typedef void (*nvds_msgapi_subscribe_request_cb_t)(NvDsMsgApiErrorType flag, void *msg, int msg_len, char *topic, void *user_ptr);
/**
* @brief Type definition for a "handle" callback.
*
* This callback reports any event (success or error)
* during a call to nvds_msgapi_connect().
*
* @param[in] h_ptr A handle for the event.
* @param[in] ds_evt Type of the event.
*/
typedef void (*nvds_msgapi_connect_cb_t)(NvDsMsgApiHandle h_ptr, NvDsMsgApiEventType ds_evt);
/**
* Connects to a remote agent by calling into a protocol adapter.
*
* @param[in] connection_str A connection string with format
* `"url;port;topic"`.
* @param[in] connect_cb A callback function for events associated with
* the connection.
* @param[in] config_path A pointer to the pathname of a configuration
* file passed to the protocol adapter.
* @return A connection handle.
*/
NvDsMsgApiHandle nvds_msgapi_connect(char *connection_str, nvds_msgapi_connect_cb_t connect_cb, char *config_path);
/**
* Sends a message synchronously over a connection.
*
* @param[in] h_ptr A connection handle.
* @param[in] topic A pointer to a string which specifies the topic
* to which to send the message.
* @param[in] payload A pointer to a byte array containing the message. The
* message may but need not be a NULL-terminated string.
* @param[in] nbuf The number of bytes of data to send, including the
* terminating NULL if the message is a string.
*
* @return A completion code for the send operation.
*/
NvDsMsgApiErrorType nvds_msgapi_send(NvDsMsgApiHandle h_ptr, char *topic, const uint8_t *payload, size_t nbuf);
/**
* Sends message asynchronously over a connection.
*
* @param[in] h_ptr A connection handle.
* @param[in] topic A pointer to a string which specifies the topic
* to which to send the message.
* @param[in] payload A pointer to a byte array containing the message.
* The message may but need not be a NULL-terminated
* string.
* @param[in] nbuf The number of bytes of data to send, including the
* terminating NULL if the message is a string.
* @param[in] send_callback A callback to be invoked when operation completes.
* @param[in] user_ptr A context pointer to pass to callback.
*
* @return A completion code for the send operation.
*/
NvDsMsgApiErrorType nvds_msgapi_send_async(NvDsMsgApiHandle h_ptr, char *topic, const uint8_t *payload, size_t nbuf, nvds_msgapi_send_cb_t send_callback, void *user_ptr);
/**
* Subscribe to a remote entity for receiving messages on a particular topic(s)
*
* @param[in] h_ptr Connection handle
* @param[in] topics Array of topics to subscribe for messages
* @param[in] num_topics num of topics
* @param[in] cb A pointer to a callback function for notifying the DS event handler
* @param[in] user_ctx user ptr to be passed to callback for context
*
* @return Status of the subscribe operation.
*/
NvDsMsgApiErrorType nvds_msgapi_subscribe (NvDsMsgApiHandle h_ptr, char ** topics, int num_topics, nvds_msgapi_subscribe_request_cb_t cb, void *user_ctx);
/**
* Calls into the adapter to allow for execution of undnerlying protocol logic.
* In this call the adapter is expected to service pending incoming and
* outgoing messages. It can also perform periodic housekeeping tasks
* such as sending heartbeats.
*
* This design gives the client control over when protocol logic gets executed.
* The client must call it periodically, according to the individual adapter's
* requirements.
*
* @param[in] h_ptr A connection handle.
*/
void nvds_msgapi_do_work(NvDsMsgApiHandle h_ptr);
/**
* Terminates a connection.
*
* @param[in] h_ptr The connection handle.
*
* @return A completion code for the "terminate" operation.
*/
NvDsMsgApiErrorType nvds_msgapi_disconnect(NvDsMsgApiHandle h_ptr);
/**
* Gets the version number of the messaging API interface supported by the
* protocol adapter.
*
* @return A pointer to a string that contains version number in
* `"major.minor"` format.
*/
char *nvds_msgapi_getversion(void);
/**
* Gets the name of the protocol used in the adapter.
*
* @return A pointer to a string
*/
char *nvds_msgapi_get_protocol_name(void);
/**
* Fetch the connection signature by parsing broker_connection string and cfg file
*
* A connection signature is a unique string used to identify a connection.
* It is generated by parsing all the connection params provided in broker_str and cfg file
*
* Connection signature can be retreived only if the cfg option share-connection = 1
*
* @param[in] broker_str Broker connection string used to create connection
* @param[in] cfg Path to config file
* @param[out] output_str connection signature
* @param[in] max_len max len of output_str
*
* @return Valid connection signature if success
* Empty string("") in case of errors or if share-connection cfg option is not set to 1
*/
NvDsMsgApiErrorType nvds_msgapi_connection_signature(char *broker_str, char *cfg, char *output_str, int max_len);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Object Encoder</b>
*/
/**
* @defgroup ee_object_encoder Object Encoder API
*
* Defines APIs for Object Encoder.
*
* @ingroup NvllBatchJpegEncodeinterfaceApi
* @{
*/
#ifndef __NVDS_ENCODE_OBJECT_H__
#define __NVDS_ENCODE_OBJECT_H__
#ifdef __cplusplus
extern "C"
{
#endif
#define FILE_NAME_SIZE (1024)
struct _NvDsObjEncCtx;
/** Opaque Handle to the Object Encoder Context */
typedef struct _NvDsObjEncCtx * NvDsObjEncCtxHandle;
/**
* Holds output parameters. This structure is available to the user.
*/
typedef struct _NvDsObjEncOutParams
{
/** Pointer to the JPEG Encoded Object */
uint8_t *outBuffer;
/** Length of the JPEG Encoded Object */
uint64_t outLen;
} NvDsObjEncOutParams;
/**
* Holds user parameters for a nvds_obj_enc_process call.
*/
typedef struct _NvDsObjEncUsrArgs
{
/** Boolean variable to save image */
bool saveImg;
/** Variable to attach user metadata.
* Metadata Type is "NVDS_CROP_IMAGE_META".
*/
bool attachUsrMeta;
/** If user specifies the filename then it'll be used otherwise the
* following naming convention is used to create filename of the encoded
* objects -
* "frame-number_stream-number_object-number_object-type_widthxheight.jpg".
* For example - 0_2_3_PERSON_126x148.jpg
*/
char fileNameImg[FILE_NAME_SIZE];
/** Object number in the frame */
int objNum;
} NvDsObjEncUsrArgs;
/** Create context and return a handle to NvObjEncCtx */
NvDsObjEncCtxHandle nvds_obj_enc_create_context (void);
/** Enqueue an object crop for JPEG encode.
* This is a non-blocking call and user should call nvds_obj_enc_finish()
* to make sure all enqueued object crops have been processed.
*/
bool nvds_obj_enc_process (NvDsObjEncCtxHandle, NvDsObjEncUsrArgs *,
NvBufSurface *, NvDsObjectMeta *, NvDsFrameMeta *);
/** Wait for all enqueued crops to be encoded */
void nvds_obj_enc_finish (NvDsObjEncCtxHandle);
/** Destroy context */
void nvds_obj_enc_destroy_context (NvDsObjEncCtxHandle);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Optical Flow Metadata </b>
*
* @b Description: This file defines the optical flow metadata.
*/
/**
* @defgroup ee_opticalflow_meta Optical flow metadata
*
* Defines the optical flow metadata.
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_OPTICALFLOW_META_H_
#define _NVDS_OPTICALFLOW_META_H_
#include <gst/gst.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds motion vector information about an element.
*/
typedef struct _NvOFFlowVector
{
/** Holds the motion vector X component. */
gshort flowx;
/** Holds the motion vector Y component. */
gshort flowy;
} NvOFFlowVector;
/**
* Holds optical flow metadata about a frame.
*/
typedef struct
{
/** Holds the number of rows in the frame for a given block size,
e.g. if block size is 4 and frame height is 720, then the number of
rows is (720/4) = 180. */
guint rows;
/** Holds the number of columns in the frame for given block size,
e.g. if block size is 4 and frame width is 1280, then the number of
columns is (1280/4) = 320. */
guint cols;
/** Holds the size of the motion vector. @see NvOFFlowVector. */
guint mv_size;
/** Holds the current frame number of the source. */
gulong frame_num;
/** Holds a pointer to the motion vector. */
void *data;
/** Reserved for internal use. */
void *priv;
/** Reserved for internal use. */
void *reserved;
} NvDsOpticalFlowMeta;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>Defines Tracker Metadata</b>
*/
/**
* @defgroup ee_tracker_group Tracker Metadata
*
* Specifies metadata concerning tracking.
*
* @ingroup NvDsMetaApi
* @{
*/
#ifndef _NVDS_TRACKER_META_H_
#define _NVDS_TRACKER_META_H_
#include <stdint.h>
#include "nvll_osd_struct.h"
#include "nvdsmeta.h"
#ifdef __cplusplus
extern "C"
{
#endif
typedef struct _NvDsPastFrameObj
{
uint32_t frameNum;
NvOSD_RectParams tBbox;
float confidence;
uint32_t age;
} NvDsPastFrameObj;
/**
* One object in several past frames
*/
typedef struct _NvDsPastFrameObjList
{
/**< Pointer to past frame info of this object. */
NvDsPastFrameObj *list;
/**< Number of frames this object appreared in the past. */
uint32_t numObj;
/**< Object tracking id. */
uint64_t uniqueId;
/**< Object class id. */
uint16_t classId;
/**< An array of the string describing the object class. */
gchar objLabel[MAX_LABEL_SIZE];
} NvDsPastFrameObjList;
/**
* List of objects in each stream
* */
typedef struct _NvDsPastFrameObjStream
{
NvDsPastFrameObjList *list; /**< Pointer to objects inside this stream. */
uint32_t streamID; /**< Stream id the same as frame_meta->pad_index. */
uint64_t surfaceStreamID; /**< Stream id used inside tracker plugin. */
uint32_t numAllocated; /**< Maximum number of objects allocated. */
uint32_t numFilled; /**< Number of objects in this frame. */
} NvDsPastFrameObjStream;
/**
* Batch of lists of buffered objects
*/
typedef struct _NvDsPastFrameObjBatch
{
NvDsPastFrameObjStream *list; /**< Pointer to array of stream lists. */
uint32_t numAllocated; /**< Number of blocks allocated for the list. */
uint32_t numFilled; /**< Number of filled blocks in the list. */
} NvDsPastFrameObjBatch;
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream version API</b>
*
* @b Description: This file specifies the APIs used to view the version of
* NVIDIA DEEPSTREAM and its dependencies, such as TensorRT, CUDA and cuDNN.
*/
/**
* @defgroup ee_version Version Number API
*
* Defines the API used to get the current version number of DeepStream and
* its dependencies.
*
* @ingroup NvDsUtilsApi
* @{
*/
#ifndef _NVDS_VERSION_H_
#define _NVDS_VERSION_H_
#define NVDS_VERSION_MAJOR 5
#define NVDS_VERSION_MINOR 0
#define NVDS_VERSION_MICRO 0
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Get the DEEPSTREAM_SDK major and minor version
* numbers and return them in major and minor variable pointers.
*
* @param[in] major holds the major part of DEEPSTREAM_SDK version.
* @param[in] minor holds the minor part of DEEPSTREAM_SDK version.
*/
void nvds_version (unsigned int * major, unsigned int * minor);
/**
* Print the version as major.minor.
* To obtain major and minor, this function calls @ref nvds_version.
*/
void nvds_version_print (void);
/**
* Print the versions of dependencies such as Cuda, cuDNN and TensorRT.
*/
void nvds_dependencies_version_print (void);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>Defines NVIDIA DeepStream GStreamer Utilities</b>
*
* @b Description: This file specifies the NVIDIA DeepStream GStreamer utility
* functions.
*
*/
/**
* @defgroup gstreamer_utils Utilities: Gstreamer utilities API
*
* Specifies GStreamer utilities functions, used to configure the source to generate NTP Sync values.
*
* @ingroup NvDsUtilsApi
* @{
*/
#ifndef __NVDS_GSTUTILS_H__
#define __NVDS_GSTUTILS_H__
#include <gst/gst.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <gst/gst.h>
/**
* Configure the source to generate NTP sync values for RTSP sources.
*
* These values are used by the DeepStream GStreamer element NvStreamMux to
* calculate the NTP time of the frames at the source.
*
* This functionality is dependent on the RTSP sending the RTCP Sender Reports.
* source.
*
* This function only works for RTSP sources i.e. GStreamer elements "rtspsrc"
* or "uridecodebin" with an RTSP uri.
*
* params[in] src_elem GStreamer source element to be configured.
*/
void configure_source_for_ntp_sync (GstElement *src_elem);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,293 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream inference specifications </b>
*
* @b Description: This file defines common elements used in the API
* exposed by the Gst-nvinfer plugin.
*/
/**
* @defgroup ee_nvinf Gst-infer API Common Elements
*
* Defines common elements used in the API exposed by the Gst-inference plugin.
* @ingroup NvDsInferApi
* @{
*/
#ifndef _NVDSINFER_H_
#define _NVDSINFER_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C"
{
#endif
#define NVDSINFER_MAX_DIMS 8
#define _DS_DEPRECATED_(STR) __attribute__ ((deprecated (STR)))
/**
* Holds the dimensions of a layer.
*/
typedef struct
{
/** Holds the number of dimesions in the layer.*/
unsigned int numDims;
/** Holds the size of the layer in each dimension. */
unsigned int d[NVDSINFER_MAX_DIMS];
/** Holds the number of elements in the layer, including all dimensions.*/
unsigned int numElements;
} NvDsInferDims;
/**
* Holds the dimensions of a three-dimensional layer.
*/
typedef struct
{
/** Holds the channel count of the layer.*/
unsigned int c;
/** Holds the height of the layer.*/
unsigned int h;
/** Holds the width of the layer.*/
unsigned int w;
} NvDsInferDimsCHW;
/**
* Specifies the data type of a layer.
*/
typedef enum
{
/** Specifies FP32 format. */
FLOAT = 0,
/** Specifies FP16 format. */
HALF = 1,
/** Specifies INT8 format. */
INT8 = 2,
/** Specifies INT32 format. */
INT32 = 3
} NvDsInferDataType;
/**
* Holds information about one layer in the model.
*/
typedef struct
{
/** Holds the data type of the layer. */
NvDsInferDataType dataType;
/** Holds the dimensions of the layer. */
union {
NvDsInferDims inferDims;
NvDsInferDims dims _DS_DEPRECATED_("dims is deprecated. Use inferDims instead");
};
/** Holds the TensorRT binding index of the layer. */
int bindingIndex;
/** Holds the name of the layer. */
const char* layerName;
/** Holds a pointer to the buffer for the layer data. */
void *buffer;
/** Holds a Boolean; true if the layer is an input layer,
or false if an output layer. */
int isInput;
} NvDsInferLayerInfo;
/**
* Holds information about the model network.
*/
typedef struct
{
/** Holds the input width for the model. */
unsigned int width;
/** Holds the input height for the model. */
unsigned int height;
/** Holds the number of input channels for the model. */
unsigned int channels;
} NvDsInferNetworkInfo;
/**
* Sets values on a @ref NvDsInferDimsCHW structure from a @ref NvDsInferDims
* structure.
*/
#define getDimsCHWFromDims(dimsCHW,dims) \
do { \
(dimsCHW).c = (dims).d[0]; \
(dimsCHW).h = (dims).d[1]; \
(dimsCHW).w = (dims).d[2]; \
} while (0)
/**
* Holds information about one parsed object from a detector's output.
*/
typedef struct
{
/** Holds the ID of the class to which the object belongs. */
unsigned int classId;
/** Holds the horizontal offset of the bounding box shape for the object. */
float left;
/** Holds the vertical offset of the object's bounding box. */
float top;
/** Holds the width of the object's bounding box. */
float width;
/** Holds the height of the object's bounding box. */
float height;
/** Holds the object detection confidence level; must in the range
[0.0,1.0]. */
float detectionConfidence;
} NvDsInferObjectDetectionInfo;
/**
* A typedef defined to maintain backward compatibility.
*/
typedef NvDsInferObjectDetectionInfo NvDsInferParseObjectInfo;
/**
* Holds information about one parsed object and instance mask from a detector's output.
*/
typedef struct
{
/** Holds the ID of the class to which the object belongs. */
unsigned int classId;
/** Holds the horizontal offset of the bounding box shape for the object. */
float left;
/** Holds the vertical offset of the object's bounding box. */
float top;
/** Holds the width of the object's bounding box. */
float width;
/** Holds the height of the object's bounding box. */
float height;
/** Holds the object detection confidence level; must in the range
[0.0,1.0]. */
float detectionConfidence;
/** Holds object segment mask */
float *mask;
/** Holds width of mask */
unsigned int mask_width;
/** Holds height of mask */
unsigned int mask_height;
/** Holds size of mask in bytes*/
unsigned int mask_size;
} NvDsInferInstanceMaskInfo;
/**
* Holds information about one classified attribute.
*/
typedef struct
{
/** Holds the index of the attribute's label. This index corresponds to
the order of output layers specified in the @a outputCoverageLayerNames
vector during initialization. */
unsigned int attributeIndex;
/** Holds the the attribute's output value. */
unsigned int attributeValue;
/** Holds the attribute's confidence level. */
float attributeConfidence;
/** Holds a pointer to a string containing the attribute's label.
Memory for the string must not be freed. */
const char *attributeLabel;
} NvDsInferAttribute;
/**
* Enum for the status codes returned by NvDsInferContext.
*/
typedef enum {
/** NvDsInferContext operation succeeded. */
NVDSINFER_SUCCESS = 0,
/** Failed to configure the NvDsInferContext instance possibly due to an
* erroneous initialization property. */
NVDSINFER_CONFIG_FAILED,
/** Custom Library interface implementation failed. */
NVDSINFER_CUSTOM_LIB_FAILED,
/** Invalid parameters were supplied. */
NVDSINFER_INVALID_PARAMS,
/** Output parsing failed. */
NVDSINFER_OUTPUT_PARSING_FAILED,
/** CUDA error was encountered. */
NVDSINFER_CUDA_ERROR,
/** TensorRT interface failed. */
NVDSINFER_TENSORRT_ERROR,
/** Resource error was encountered. */
NVDSINFER_RESOURCE_ERROR,
/** TRT-IS error was encountered. */
NVDSINFER_TRTIS_ERROR,
/** Unknown error was encountered. */
NVDSINFER_UNKNOWN_ERROR
} NvDsInferStatus;
/**
* Enum for the log levels of NvDsInferContext.
*/
typedef enum {
NVDSINFER_LOG_ERROR = 0,
NVDSINFER_LOG_WARNING,
NVDSINFER_LOG_INFO,
NVDSINFER_LOG_DEBUG,
} NvDsInferLogLevel;
/**
* Get the string name for the status.
*
* @param[in] status An NvDsInferStatus value.
* @return String name for the status. Memory is owned by the function. Callers
* should not free the pointer.
*/
const char* NvDsInferStatus2Str(NvDsInferStatus status);
#ifdef __cplusplus
}
#endif
/* C++ data types */
#ifdef __cplusplus
/**
* Enum for selecting between minimum/optimal/maximum dimensions of a layer
* in case of dynamic shape network.
*/
typedef enum
{
kSELECTOR_MIN = 0,
kSELECTOR_OPT,
kSELECTOR_MAX,
kSELECTOR_SIZE
} NvDsInferProfileSelector;
/**
* Holds full dimensions (including batch size) for a layer.
*/
typedef struct
{
int batchSize = 0;
NvDsInferDims dims = {0};
} NvDsInferBatchDims;
/**
* Extended structure for bound layer information which additionally includes
* min/optimal/max full dimensions of a layer in case of dynamic shape.
*/
struct NvDsInferBatchDimsLayerInfo : NvDsInferLayerInfo
{
NvDsInferBatchDims profileDims[kSELECTOR_SIZE];
};
#endif
#endif
/** @} */

View File

@ -0,0 +1,852 @@
/**
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvdsinfer_context.h
* <b>NVIDIA DeepStream Inference Interface </b>
*
* @b Description: This file specifies the DeepStream inference interface API.
*/
/**
* @defgroup gstreamer_nvinfer_context Inference Interface API
*
* Defines the DeepStream inference interface API. In C++, defines the
* NvDsInferContext class.
*
* The DeepStream inference API "NvDsInfer" provides methods to initialize and
* deinitialize the inference engine, pre-process the input frames as required
* by the network, and parse the output from the raw tensor buffers.
*
* Both C and C++ interfaces are available, with the C interface being a simple
* wrapper over the C++ interface.
*
* You can create an opaque handle to an instance of the context required by
* the API by calling the factory function createNvDsInferContext() or
* NvDsInferContext_Create(). Both functions accept an instance of
* @ref NvDsInferContextInitParams to initialize the context.
* Both let you specify a logging
* callback to get detailed information about failures and warnings.
*
* Initialization parameters allow you to configure the network data type,
* network type (Detector, Classifier, or Other), preprocessing parameters
* (mean subtraction and normalization), model-related parameters like
* Caffe/Uff/Onnx model file paths, output layer names, etc.
*
* Batches of frames can be queued for inferencing, using
* NvDsInferContext::queueInputBatch() or NvDsInferContext_QueueInputBatch().
* The input frame memories must be accessible
* to the GPU device configured during initialization. You can provide
* an asynchronous callback function to return the input buffers to the caller
* as soon as the input is consumed.
*
* Inference output can be dequeued using NvDsInferContext::dequeueOutputBatch()
* or NvDsInferContext_DequeueOutputBatch(). The order of dequeued outputs
* corresponds
* to the input queueing order. In case of failure, the output of the batch is
* lost. The dequeued output must be released back to the context using
* NvDsInferContext::releaseBatchOutput() or
* NvDsInferContext_ReleaseBatchOutput()
* to free the associated memory and return the output layer buffers for reuse
* by the context.
*
* Detectors output an array of detected objects for each frame in the batch.
* Classifiers classify entire frames and output an array of attributes for
* each frame. Segmentation classifies each pixel in the frame. A special
* network type (Other) has been provided whose output layers are not
* parsed. The caller can parse the device and host output layer buffers.
* You can also use this network type
* with the Gst-infer plugin to flow the output buffers as metadata.
*
* Other methods and functions get parsed labels from a label's
* file and properties of all layers bound by the inference engine.
*
* You can extend the Gst-nvinfer API using the custom method implementations.
* Refer to the Custom Method Implementations section for more details.
*
* @ingroup NvDsInferApi
* @{
*/
/**
* @name NvDsInferContext DeepStream Inference Interface API
*/
#ifndef __NVDSINFER_CONTEXT_H__
#define __NVDSINFER_CONTEXT_H__
#include "nvdsinfer.h"
/** @name NvDsInferContext API common types and functions.
* This section describes the common types and functions for both the C and C++
* interfaces for the NvDsInferContext class.
*/
/** @{ */
/** Maximum length of a file path parameter. */
#define _PATH_MAX 4096
/** Defines the maximum number of channels supported by the API
for image input layers. */
#define _MAX_CHANNELS 4
/** Defines the maximum length of string parameters. */
#define _MAX_STR_LENGTH 1024
/** Defines the maximum batch size supported by nvdsinfer. */
#define NVDSINFER_MAX_BATCH_SIZE 1024
/** Defines the minimum number of sets of output buffers that must be
allocated. */
#define NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE 2
/**
* Defines internal data formats used by the inference engine.
*/
typedef enum
{
NvDsInferNetworkMode_FP32,
NvDsInferNetworkMode_INT8,
NvDsInferNetworkMode_FP16
} NvDsInferNetworkMode;
/**
* Defines network types.
*/
typedef enum
{
/** Specifies a detector. Detectors find objects and their coordinates,
and their classes in an input frame. */
NvDsInferNetworkType_Detector,
/** Specifies a classifier. Classifiers classify an entire frame into
one of several classes. */
NvDsInferNetworkType_Classifier,
/** Specifies a segmentation network. A segmentation network classifies
each pixel into one of several classes. */
NvDsInferNetworkType_Segmentation,
/** Specifies a instance segmentation network. A instance segmentation
network detects objects, bounding box and mask for objects, and
their classes in an input frame */
NvDsInferNetworkType_InstanceSegmentation,
/** Specifies other. Output layers of an "other" network are not parsed by
NvDsInferContext. This is useful for networks that produce custom output.
Output can be parsed by the NvDsInferContext client or can be combined
with the Gst-nvinfer feature to flow output tensors as metadata. */
NvDsInferNetworkType_Other = 100
} NvDsInferNetworkType;
/**
* Defines color formats.
*/
typedef enum
{
/** Specifies 24-bit interleaved R-G-B format. */
NvDsInferFormat_RGB,
/** Specifies 24-bit interleaved B-G-R format. */
NvDsInferFormat_BGR,
/** Specifies 8-bit Luma format. */
NvDsInferFormat_GRAY,
/** Specifies 32-bit interleaved R-G-B-A format. */
NvDsInferFormat_RGBA,
/** Specifies 32-bit interleaved B-G-R-x format. */
NvDsInferFormat_BGRx,
/** NCHW planar */
NvDsInferFormat_Tensor,
NvDsInferFormat_Unknown = 0xFFFFFFFF,
} NvDsInferFormat;
/**
* Defines UFF input layer orders.
*/
typedef enum {
NvDsInferTensorOrder_kNCHW,
NvDsInferTensorOrder_kNHWC,
NvDsInferTensorOrder_kNC,
NvDsInferUffOrder_kNCHW _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNCHW instead") = NvDsInferTensorOrder_kNCHW,
NvDsInferUffOrder_kNHWC _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNCWH instead") = NvDsInferTensorOrder_kNHWC,
NvDsInferUffOrder_kNC _DS_DEPRECATED_("Use NvDsInferTensorOrder_kNC instead") = NvDsInferTensorOrder_kNC
} NvDsInferTensorOrder;
#define NvDsInferUffOrder _Pragma \
("GCC warning \"'NvDsInferUffOrder' macro is deprecated. Use NvDsInferTensorOrder instead.\"") \
NvDsInferTensorOrder
/**
* Holds detection and bounding box grouping parameters.
*/
typedef struct
{
/** Holds the bounding box detection threshold to be applied prior
* to clustering operation. */
union {
float threshold _DS_DEPRECATED_("Use preclusterThreshold instead.");
float preClusterThreshold;
};
/** Hold the bounding box detection threshold to be applied post
* clustering operation. */
float postClusterThreshold;
/** Holds the epsilon to control merging of overlapping boxes. Refer to OpenCV
* groupRectangles and DBSCAN documentation for more information on epsilon. */
float eps;
/** Holds the minimum number of boxes in a cluster to be considered
an object during grouping using DBSCAN. */
int minBoxes;
/** Holds the minimum number boxes in a cluster to be considered
an object during grouping using OpenCV groupRectangles. */
int groupThreshold;
/** Minimum score in a cluster for the cluster to be considered an object
during grouping. Different clustering may cause the algorithm
to use different scores. */
float minScore;
/** IOU threshold to be used with NMS mode of clustering. */
float nmsIOUThreshold;
/** Number of objects with objects to be filtered in the decensding order
* of probability */
int topK;
} NvDsInferDetectionParams;
/**
* Enum for clustering mode for detectors
*/
typedef enum
{
NVDSINFER_CLUSTER_GROUP_RECTANGLES = 0,
NVDSINFER_CLUSTER_DBSCAN,
NVDSINFER_CLUSTER_NMS,
NVDSINFER_CLUSTER_DBSCAN_NMS_HYBRID,
NVDSINFER_CLUSTER_NONE
} NvDsInferClusterMode;
/**
* Holds the initialization parameters required for the NvDsInferContext interface.
*/
typedef struct _NvDsInferContextInitParams
{
/** Holds a unique identifier for the instance. This can be used
to identify the instance that is generating log and error messages. */
unsigned int uniqueID;
/** Holds an internal data format specifier used by the inference engine. */
NvDsInferNetworkMode networkMode;
/** Holds the pathname of the prototxt file. */
char protoFilePath[_PATH_MAX];
/** Holds the pathname of the caffemodel file. */
char modelFilePath[_PATH_MAX];
/** Holds the pathname of the UFF model file. */
char uffFilePath[_PATH_MAX];
/** Holds the pathname of the ONNX model file. */
char onnxFilePath[_PATH_MAX];
/** Holds the pathname of the TLT encoded model file. */
char tltEncodedModelFilePath[_PATH_MAX];
/** Holds the pathname of the INT8 calibration file.
Required only when using INT8 mode. */
char int8CalibrationFilePath[_PATH_MAX];
union {
/** Holds the input dimensions for the model. */
NvDsInferDimsCHW inputDims;
/** Holds the input dimensions for the UFF model. */
NvDsInferDimsCHW uffDimsCHW;
} _DS_DEPRECATED_("Use inferInputDims instead.");
/** Holds the original input order for the UFF model. */
NvDsInferTensorOrder uffInputOrder;
/** Holds the name of the input layer for the UFF model. */
char uffInputBlobName[_MAX_STR_LENGTH];
/** Holds the string key for decoding the TLT encoded model. */
char tltModelKey[_MAX_STR_LENGTH];
/** Holds the pathname of the serialized model engine file.
When using the model engine file, other parameters required for creating
the model engine are ignored. */
char modelEngineFilePath[_PATH_MAX];
/** Holds the maximum number of frames to be inferred together in a batch.
The number of input frames in a batch must be
less than or equal to this. */
unsigned int maxBatchSize;
/** Holds the pathname of the labels file containing strings for the class
labels. The labels file is optional. The file format is described in the
custom models section of the DeepStream SDK documentation. */
char labelsFilePath[_PATH_MAX];
/** Holds the pathname of the mean image file (PPM format). File resolution
must be equal to the network input resolution. */
char meanImageFilePath[_PATH_MAX];
/** Holds the normalization factor with which to scale the input pixels. */
float networkScaleFactor;
/** Holds the network input format. */
NvDsInferFormat networkInputFormat;
/** Holds the per-channel offsets for mean subtraction. This is
an alternative to the mean image file. The number of offsets in the array
must be equal to the number of input channels. */
float offsets[_MAX_CHANNELS];
unsigned int numOffsets;
/** Holds the network type. */
NvDsInferNetworkType networkType;
/** Holds a Boolean; true if DBScan is to be used for object clustering,
or false if OpenCV groupRectangles is to be used. */
_DS_DEPRECATED_("Use NvDsInferClusterMode instead")
int useDBScan;
/** Holds the number of classes detected by a detector network. */
unsigned int numDetectedClasses;
/** Holds per-class detection parameters. The array's size must be equal
to @a numDetectedClasses. */
NvDsInferDetectionParams *perClassDetectionParams;
/** Holds the minimum confidence threshold for the classifier to consider
a label valid. */
float classifierThreshold;
float segmentationThreshold;
/** Holds a pointer to an array of pointers to output layer names. */
char ** outputLayerNames;
/** Holds the number of output layer names. */
unsigned int numOutputLayers;
/** Holds the pathname of the library containing custom methods
required to support the network. */
char customLibPath[_PATH_MAX];
/** Holds the name of the custom bounding box function
in the custom library. */
char customBBoxParseFuncName[_MAX_STR_LENGTH];
/** Name of the custom classifier attribute parsing function in the custom
* library. */
char customClassifierParseFuncName[_MAX_STR_LENGTH];
/** Holds a Boolean; true if the input layer contents are to be copied to
host memory for access by the application. */
int copyInputToHostBuffers;
/** Holds the ID of the GPU which is to run the inference. */
unsigned int gpuID;
/** Holds a Boolean; true if DLA is to be used. */
int useDLA;
/** Holds the ID of the DLA core to use. */
int dlaCore;
/** Holds the number of sets of output buffers (host and device)
to be allocated. */
unsigned int outputBufferPoolSize;
/** Holds the pathname of the configuration file
for custom network creation. This can be used to store custom properties
required by the custom network creation function. */
char customNetworkConfigFilePath[_PATH_MAX];
/** Name of the custom engine creation function in the custom library. */
char customEngineCreateFuncName[_MAX_STR_LENGTH];
/** For model parsers supporting both implicit batch dim and full dims,
* prefer to use implicit batch dim. By default, full dims network mode is
* used. */
int forceImplicitBatchDimension;
/** Max workspace size (unit MB) that will be used as tensorrt build
* settings for cuda engine.
*/
unsigned int workspaceSize;
/** Inference input dimensions for runtime engine */
NvDsInferDimsCHW inferInputDims;
/** Holds the type of clustering mode */
NvDsInferClusterMode clusterMode;
/** Holds the name of the bounding box and instance mask parse function
in the custom library. */
char customBBoxInstanceMaskParseFuncName[_MAX_STR_LENGTH];
/** Can be used to specify the format and datatype for bound output layers.
* For each layer specified the format is
* "<layer-name>:<data-type>:<format>" */
char ** outputIOFormats;
/** Holds number of output IO formats specified. */
unsigned int numOutputIOFormats;
/**Can be used to specify the device type and inference precision of layers.
* For each layer specified the format is
* "<layer-name>:<device-type>:<precision>" */
char ** layerDevicePrecisions;
/** Holds number of layer device precisions specified */
unsigned int numLayerDevicePrecisions;
} NvDsInferContextInitParams;
/**
* Defines a callback function type for asynchronously returning
* the input client buffers to the NvDsInferContext client.
*
* @param[in] data An opaque pointer provided to the input queueing function
* through NvDsInferContextBatchInput.
*/
typedef void (* NvDsInferContextReturnInputAsyncFunc) (void *data);
/**
* Holds information about one batch to be inferred.
*/
typedef struct
{
/** Holds a pointer to an array of pointers to input frame buffers.
The size of the array must be at least @a numInputFrames. */
void** inputFrames;
/** Holds the number of input frames, i.e. the size of the batch. */
unsigned int numInputFrames;
/** Holds the format of the frame contents. */
NvDsInferFormat inputFormat;
/** Holds the pitch of the input frames, in bytes. */
unsigned int inputPitch;
/** Holds a callback for returning the input buffers to the client. */
NvDsInferContextReturnInputAsyncFunc returnInputFunc;
/** A pointer to the data to be supplied with the callback in
@a returnInputFunc. */
void *returnFuncData;
} NvDsInferContextBatchInput;
/**
* Holds information about one detected object.
*/
typedef struct
{
/** Holds the object's offset from the left boundary of the frame. */
float left;
/** Holds the object's offset from the top boundary of the frame. */
float top;
/** Holds the object's width. */
float width;
/** Holds the object's height. */
float height;
/** Holds the index for the object's class. */
int classIndex;
/** Holds a pointer to a string containing a label for the object. */
char *label;
/* confidence score of the detected object. */
float confidence;
/* Instance mask information for the object. */
float *mask;
/** Holds width of mask */
unsigned int mask_width;
/** Holds height of mask */
unsigned int mask_height;
/** Holds size of mask in bytes*/
unsigned int mask_size;
} NvDsInferObject;
/**
* Holds information on all objects detected by a detector network in one
* frame.
*/
typedef struct
{
/** Holds a pointer to an array of objects. */
NvDsInferObject *objects;
/** Holds the number of objects in @a objects. */
unsigned int numObjects;
} NvDsInferDetectionOutput;
/**
* Holds information on all attributes classifed by a classifier network for
* one frame.
*/
typedef struct
{
/** Holds a pointer to an array of attributes. There may be more than
one attribute, depending on the number of output coverage layers
(multi-label classifiers). */
NvDsInferAttribute *attributes;
/** Holds the size of the @a attributes array. */
unsigned int numAttributes;
/** Holds a pointer to a string containing a label for the
classified output. */
char *label;
} NvDsInferClassificationOutput;
/**
* Holds information parsed from segmentation network output for one frame.
*/
typedef struct
{
/** Holds the width of the output. Same as network width. */
unsigned int width;
/** Holds the height of the output. Same as network height. */
unsigned int height;
/** Holds the number of classes supported by the network. */
unsigned int classes;
/** Holds a pointer to an array for the 2D pixel class map.
The output for pixel (x,y) is at index (y*width+x). */
int *class_map;
/** Holds a pointer to an array containing raw probabilities.
The probability for class @a c and pixel (x,y) is at index
(c*width*height + y*width+x). */
float *class_probability_map;
} NvDsInferSegmentationOutput;
/**
* Holds the information inferred by the network on one frame.
*/
typedef struct
{
/** Holds an output type indicating the valid member in the union
of @a detectionOutput, @a classificationOutput, and @a segmentationOutput.
This is basically the network type. */
NvDsInferNetworkType outputType;
/** Holds a union of supported outputs. The valid member is determined by
@a outputType. */
union
{
/** Holds detector output. Valid when @a outputType is
@ref NvDsInferNetworkType_Detector. */
NvDsInferDetectionOutput detectionOutput;
/** Holds classifier output. Valid when @a outputType is
@ref NvDsInferNetworkType_Classifier. */
NvDsInferClassificationOutput classificationOutput;
/** Holds classifier output. Valid when @a outputType is
@ref NvDsInferNetworkType_Classifier. */
NvDsInferSegmentationOutput segmentationOutput;
};
} NvDsInferFrameOutput;
/**
* Holds the output for all of the frames in a batch (an array of frame),
* and related buffer information.
*/
typedef struct
{
/** Holds a pointer to an array of outputs for each frame in the batch. */
NvDsInferFrameOutput *frames;
/** Holds the number of elements in @a frames. */
unsigned int numFrames;
/** Holds a pointer to an array of pointers to output device buffers
for this batch. The array elements are set by */
void **outputDeviceBuffers;
/** Holds the number of elements in @a *outputDeviceBuffers. */
unsigned int numOutputDeviceBuffers;
/** Holds a pointer to an array of pointers to host buffers for this batch.
The array elements are set by */
void **hostBuffers;
/** Holds the number of elements in hostBuffers. */
unsigned int numHostBuffers;
/** Holds a private context pointer for the set of output buffers. */
void* priv;
} NvDsInferContextBatchOutput;
/** An opaque pointer type to be used as a handle for a context instance. */
typedef struct INvDsInferContext * NvDsInferContextHandle;
/**
* @brief Type declaration for a logging callback.
*
* The callaback log NvDsInferContext messages.
*
* @param[in] handle The handle of the NvDsInferContext instance that
* generated the log.
* @param[in] uniqueID Unique ID of the NvDsInferContext instance that
* generated the log.
* @param[in] logLevel Level of the log.
* @param[in] funcName A pointer to the name of the function that
* generated the log.
* @param[in] logMessage A pointer to the log message string.
* @param[in] userCtx An opaque pointer to the user context, supplied
* when creating the NvDsInferContext instance.
*/
typedef void (*NvDsInferContextLoggingFunc)(NvDsInferContextHandle handle,
unsigned int uniqueID, NvDsInferLogLevel logLevel, const char* logMessage,
void* userCtx);
#ifdef __cplusplus
extern "C" {
#endif
/**
* Resets a context parameter structure to default values.
*
* @param[in] initParams A pointer to a context parameter structure.
*/
void NvDsInferContext_ResetInitParams (NvDsInferContextInitParams *initParams);
/**
* Gets the string name of the status.
*
* @param[in] status An inference status code.
* @return A pointer to a string containing the status's name, or NULL if
* the status is unrecognized. Memory is owned by the function; the caller
* may not free it.
*/
_DS_DEPRECATED_("NvDsInferContext_GetStatusName is deprecated. Use NvDsInferStatus2Str instead")
const char * NvDsInferContext_GetStatusName (NvDsInferStatus status);
#ifdef __cplusplus
}
#endif
/** @} */
/**
* @name NvDsInferContext API C++-interface
* This section describes the C++ interface for the NvDsInferContext class.
* @{
*/
#ifdef __cplusplus
#include <string>
#include <vector>
/**
* Holds the DeepStream inference interface class.
*/
struct INvDsInferContext
{
public:
/**
* Queues a batch of input frames for preprocessing and inferencing.
* The input
* frames must be in packed RGB/RGBA/GRAY UINT8 format with the same
* resolution as the network input or preprocessed inputs so that it can be
* fed directly to the inference engine. The frame memories should be
* in CUDA device memory allocated on the same device that the
* NvDsInferContext interface is configured with.
*
* The batch size must not exceed the maximum batch size requested during
* initialization.
*
* @param[in] batchInput Reference to a batch input structure.
* @return NVDSINFER_SUCCESS if preprocessing and queueing succeeded, or
* an error status otherwise.
*/
virtual NvDsInferStatus queueInputBatch(NvDsInferContextBatchInput &batchInput) = 0;
/**
* Dequeues output for a batch of frames. The batch dequeuing order is same
* as the input queuing order. The associated memory must be freed and
* output buffers must be released back to the context using
* releaseBatchOutput() so that the buffers can be reused.
*
* @param[out] batchOutput Reference to the batch output structure
* to which the output is to be appended.
* @return NVDSINFER_SUCCESS if dequeueing succeeded, or an error status
* otherwise.
*/
virtual NvDsInferStatus dequeueOutputBatch(NvDsInferContextBatchOutput &batchOutput) = 0;
/**
* Frees the memory associated with the batch output and releases the set of
* output buffers back to the context for reuse.
*
* @param[in] batchOutput Reference to a batch output structure
* which was filled by dequeueOutputBatch().
*/
virtual void releaseBatchOutput(NvDsInferContextBatchOutput &batchOutput) = 0;
/**
* Fills the input vector with information on all bound layers of the
* inference engine.
*
* @param[in,out] layersInfo Reference to a vector of layer info
* structures to be filled by the function.
*/
virtual void fillLayersInfo(std::vector<NvDsInferLayerInfo> &layersInfo) = 0;
/**
* Gets network input information.
*
* @param[in,out] networkInfo Reference to a network info structure.
*/
virtual void getNetworkInfo(NvDsInferNetworkInfo &networkInfo) = 0;
/**
* \brief Gets the label strings parsed from the labels file.
*
* See the DeepStream NvInfer documentation for the format of the
* labels file for detectors and classifiers.
*
* @return Reference to a vector of vector of string labels.
*/
virtual const std::vector< std::vector<std::string> >& getLabels() = 0;
/**
* Deinitialize the inference engine and frees resources it used.
*/
virtual void destroy() = 0;
/** Destructor for a C++ object. */
virtual ~INvDsInferContext() {}
};
/**
* Creates a new instance of NvDsInferContext initialized using supplied
* parameters.
*
* @param[out] handle A pointer to a NvDsInferContext handle.
* @param[in] initParams A reference to parameters to be used to initialize
* the context.
* @param[in] userCtx A pointer to an opaque user context with callbacks
* generated by the NvDsInferContext instance.
* @param[in] logFunc A log callback function for the instance.
* @return NVDSINFER_SUCCESS if the instance was created successfully,
* or an error status otherwise.
*/
NvDsInferStatus createNvDsInferContext(NvDsInferContextHandle *handle,
NvDsInferContextInitParams &initParams,
void *userCtx = nullptr,
NvDsInferContextLoggingFunc logFunc = nullptr);
#endif
/** @} */
/**
* @name NvDsInferContext API C-interface
* This section describes the C interface for the NvDsInferContext class.
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Creates a new NvDsInferContext object with specified
* initialization parameters.
*
* @param[out] handle A pointer to an NvDsInferContext handle.
* @param[in] initParams A pointer to a parameter structure to be used to
* initialize the context.
* @param[in] userCtx A pointer to an opaque user context, with callbacks,
* generated by the NvDsInferContext instance.
* @param[in] logFunc A log callback for the instance.
* @return NVDSINFER_SUCCESS if creation was successful, or an error status
* otherwise.
*/
NvDsInferStatus NvDsInferContext_Create(NvDsInferContextHandle *handle,
NvDsInferContextInitParams *initParams, void *userCtx,
NvDsInferContextLoggingFunc logFunc);
/**
* Destroys an NvDsInferContext instance and releases its resources.
*
* @param[in] handle The handle to the NvDsInferContext instance to be
* destroyed.
*/
void NvDsInferContext_Destroy (NvDsInferContextHandle handle);
/**
* \brief Queues a batch of input frames for preprocessing and inferencing.
*
* @see NvDsInferContext::queueInputBatch() for details.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] batchInput A reference to a batch input structure.
* @return NVDSINFER_SUCCESS if preprocessing and queueing were successful, or
* an error status otherwise.
*/
NvDsInferStatus NvDsInferContext_QueueInputBatch(NvDsInferContextHandle handle,
NvDsInferContextBatchInput *batchInput);
/**
* Dequeues output for a batch of frames.
*
* @see NvDsInferContext::dequeueOutputBatch() for details.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] batchOutput A reference to the batch output structure
* to which output is to be appended.
* @return NVDSINFER_SUCCESS if dequeueing was successful, or an error status
* otherwise.
*/
NvDsInferStatus NvDsInferContext_DequeueOutputBatch(NvDsInferContextHandle handle,
NvDsInferContextBatchOutput *batchOutput);
/**
* Frees the memory associated with the batch output and releases the set of
* host buffers back to the context for reuse.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] batchOutput A pointer to an NvDsInferContextBatchOutput
* structure filled by
* NvDsInferContext_DequeueOutputBatch().
*/
void NvDsInferContext_ReleaseBatchOutput(NvDsInferContextHandle handle,
NvDsInferContextBatchOutput *batchOutput);
/**
* Gets network input information.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] networkInfo A pointer to an NvDsInferNetworkInfo structure.
*/
void NvDsInferContext_GetNetworkInfo(NvDsInferContextHandle handle,
NvDsInferNetworkInfo *networkInfo);
/**
* Gets the number of the bound layers of the inference engine in an
* NvDsInferContext instance.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @return The number of bound layers of the inference engine.
*/
unsigned int NvDsInferContext_GetNumLayersInfo(NvDsInferContextHandle handle);
/**
* Fills an input vector with information about all of the bound layers of the
* inference engine in an NvDsInferContext instance.
* The size of the array must be at least the value returned by
* NvDsInferContext_GetNumLayersInfo().
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in,out] layersInfo A pointer to an array of NvDsInferLayerInfo
structures to be filled by the function.
*/
void NvDsInferContext_FillLayersInfo(NvDsInferContextHandle handle,
NvDsInferLayerInfo *layersInfo);
/**
* Gets the string label associated with the class ID for detectors and the
* attribute ID and attribute value for classifiers. The string is owned
* by the context; the caller may not modify or free it.
*
* @param[in] handle A handle to an NvDsInferContext instance.
* @param[in] id Class ID for detectors, or attribute ID for classifiers.
* @param[in] value Attribute value for classifiers; set to 0 for detectors.
* @return A pointer to a string label. The memory is owned by the context.
*/
const char* NvDsInferContext_GetLabel(NvDsInferContextHandle handle,
unsigned int id, unsigned int value);
#ifdef __cplusplus
}
#endif
/** @} */
#endif
/** @} */

View File

@ -0,0 +1,510 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file nvdsinfer_custom_impl.h
* <b>Defines specification for Custom Method Implementations for custom models </b>
*
* @b Description: This file defines the API that
* implements custom methods required by the GStreamer Gst-nvinfer plugin to
* infer using custom models.
*
* All custom functionality must be implemented in an independent shared
* library. The library is dynamically loaded (using %dlopen()) by the plugin.
* It implements custom methods which are called as required. The custom library
* can be specified in the Gst-nvinfer configuration file by the
* `custom-lib-name` property.
*
* @section customparsingfunc Custom Detector Output Parsing Function
*
* This section describes the custom bounding box parsing function for custom
* detector models.
*
* The custom parsing function should be of the type `NvDsInferParseCustomFunc`.
* The custom parsing function can be specified in the Gst-nvinfer
* configuration file by the properties `parse-bbox-func-name`
* (name of the parsing function) and `custom-lib-name`. `parse-func must be
* set to 0.
*
* The Gst-nvinfer plugin loads the library and looks for the custom parsing
* function symbol. The function is called after each inference call is
* executed.
*
* You can call the macro CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE() after
* defining the function to validate the function definition.
*
*
* @section iplugininterface TensorRT Plugin Factory interface for DeepStream
*
* Based on the type of the model (Caffe or UFF), the library
* must implement one of the functions NvDsInferPluginFactoryCaffeGet() or
* NvDsInferPluginFactoryUffGet().
* During model parsing, "nvinfer" looks for either of these functions'
* symbols in the custom library based on the model framework. If either
* symbol is found, the plugin calls that function to get a pointer to
* the PluginFactory instance required for parsing.
*
* If the IPluginFactory is needed during deserialization of CUDA engines,
* the library must implement %NvDsInferPluginFactoryRuntimeGet().
*
* Each Get function has a corresponding Destroy function which is
* called, if defined, when the returned PluginFactory is to be destroyed.
*
* A library that implements this interface must use the same function names
* as the header file. Gst-nvinfer dynamically loads the library and
* looks for the same symbol names.
*
* See the FasterRCNN sample provided with the SDK for a sample implementation
* of the interface.
*
*
* @section inputlayerinitialization Input layer initialization
*
* By default, Gst-nvinfer works with networks having only one input layer
* for video frames. If a network has more than one input layer, the custom
* library can implement the @ref NvDsInferInitializeInputLayers interface
* for initializing the other input layers. Gst-nvinfer assumes that the other
* input layers have static input information, and hence this method is called
* only once before the first inference.
*
* See the FasterRCNN sample provided with the SDK for a sample implementation
* of the interface.
*
*
* @section customnetworkbuild Interface for building Custom Networks
*
* The "nvinfer" plugin supports two interfaces for to create and build
* custom networks not directly supported by nvinfer.
* - IModelParser / NvDsInferCreateModelParser interface
* - NvDsInferEngineCreateCustomFunc interface
*
* In case of IModelParser / NvDsInferCreateModelParser interface, the custom
* library must derive and implement IModelParser, an interface to parse
* the custom networks and build the TensorRT network (nvinfer1::INetworkDefinition).
* The "nvinfer" plugin will then use this TensorRT network to build the
* inference engine. The plugin will look for the symbol "NvDsInferCreateModelParser"
* in the library and call the function to get an instance of the model parser
* implementation from the library.
*
* Alternatively, you can use the custom engine creation function to build
* networks that are not natively supported by nvinfer. The function must be
* of the type @ref NvDsInferEngineCreateCustomFunc. You can specify it
* in the nvinfer element configuration file using
* the property `engine-create-func-name` (name of the engine creation function)
* in addition to `custom-lib-name`.
*
* The nvinfer plugin loads the custom library dynamically and looks for the
* engine creation symbol. The function is called only once during
* initialization of the nvinfer plugin. The function must build and return
* the `CudaEngine` interface using the supplied nvinfer1::IBuilder instance.
* The builder instance is already configured with properties like
* MaxBatchSize, MaxWorkspaceSize, INT8/FP16 precision parameters, etc.
* The builder instance is managed by nvinfer, and the function may not destroy
* it.
*
* You can call the macro CHECK_CUSTOM_ENGINE_CREATE_FUNC_PROTOTYPE() after
* the function definition to validate the function definition.
*
* Refer to the Yolo sample provided with the SDK for sample implementation of
* both the interfaces.
*/
#ifndef _NVDSINFER_CUSTOM_IMPL_H_
#define _NVDSINFER_CUSTOM_IMPL_H_
#include <string>
#include <vector>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include "NvCaffeParser.h"
#include "NvUffParser.h"
#pragma GCC diagnostic pop
#include "nvdsinfer.h"
/*
* C++ interfaces
*/
#ifdef __cplusplus
/**
* A model parser interface to translate user-defined model to a TensorRT network.
*
* Users can parse any custom model derived from this inferface. Instance would
* be created by a call to @fn NvDsInferCreateModelParser.
*
* Implementations should make sure that all member functions are overriden.
* This parser will be deleted after the engine (nvinfer1::ICudaEngine) is built.
*/
class IModelParser
{
public:
IModelParser() = default;
/**
* Destructor, make sure all external resource would be released here. */
virtual ~IModelParser() = default;
/**
* Function interface for parsing custom model and building tensorrt
* network.
*
* @param[in, out] network NvDsInfer will create the @a network and
* implementation can setup this network layer by layer.
* @return NvDsInferStatus indicating if model parsing was sucessful.
*/
virtual NvDsInferStatus parseModel(
nvinfer1::INetworkDefinition& network) = 0;
/**
* Function interface to check if parser can support full-dimensions.
*/
virtual bool hasFullDimsSupported() const = 0;
/**
* Function interface to get the new model name which is to be used for
* constructing the serialized engine file path.
*/
virtual const char* getModelName() const = 0;
};
#endif
/*
* C interfaces
*/
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Holds the detection parameters required for parsing objects.
*/
typedef struct
{
/** Holds the number of classes requested to be parsed, starting with
class ID 0. Parsing functions may only output objects with
class ID less than this value. */
unsigned int numClassesConfigured;
/** Holds a per-class vector of detection confidence thresholds
to be applied prior to clustering operation.
Parsing functions may only output an object with detection confidence
greater than or equal to the vector element indexed by the object's
class ID. */
std::vector<float> perClassPreclusterThreshold;
/* Per class threshold to be applied post clustering operation */
std::vector<float> perClassPostclusterThreshold;
/** Deprecated. Use perClassPreclusterThreshold instead. Reference to
* maintain backward compatibility. */
std::vector<float> &perClassThreshold = perClassPreclusterThreshold;
} NvDsInferParseDetectionParams;
/**
* Type definition for the custom bounding box parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the output
* layers of the model.
* @param[in] networkInfo Network information.
* @param[in] detectionParams Detection parameters required for parsing
* objects.
* @param[out] objectList A reference to a vector in which the function
* is to add parsed objects.
*/
typedef bool (* NvDsInferParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferObjectDetectionInfo> &objectList);
/**
* Validates a custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
NvDsInferParseDetectionParams const &detectionParams, \
std::vector<NvDsInferObjectDetectionInfo> &objectList);
/**
* Type definition for the custom bounding box and instance mask parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the output
* layers of the model.
* @param[in] networkInfo Network information.
* @param[in] detectionParams Detection parameters required for parsing
* objects.
* @param[out] objectList A reference to a vector in which the function
* is to add parsed objects and instance mask.
*/
typedef bool (* NvDsInferInstanceMaskParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferInstanceMaskInfo> &objectList);
/**
* Validates a custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_INSTANCE_MASK_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferInstanceMaskParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
NvDsInferParseDetectionParams const &detectionParams, \
std::vector<NvDsInferInstanceMaskInfo> &objectList);
/**
* Type definition for the custom classifier output parsing function.
*
* @param[in] outputLayersInfo A vector containing information on the
* output layers of the model.
* @param[in] networkInfo Network information.
* @param[in] classifierThreshold
Classification confidence threshold.
* @param[out] attrList A reference to a vector in which the function
* is to add the parsed attributes.
* @param[out] descString A reference to a string object in which the
* function may place a description string.
*/
typedef bool (* NvDsInferClassiferParseCustomFunc) (
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
float classifierThreshold,
std::vector<NvDsInferAttribute> &attrList,
std::string &descString);
/**
* Validates the classifier custom parser function definition. Must be called
* after defining the function.
*/
#define CHECK_CUSTOM_CLASSIFIER_PARSE_FUNC_PROTOTYPE(customParseFunc) \
static void checkFunc_ ## customParseFunc (NvDsInferClassiferParseCustomFunc func = customParseFunc) \
{ checkFunc_ ## customParseFunc (); }; \
extern "C" bool customParseFunc (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, \
NvDsInferNetworkInfo const &networkInfo, \
float classifierThreshold, \
std::vector<NvDsInferAttribute> &attrList, \
std::string &descString);
typedef struct _NvDsInferContextInitParams NvDsInferContextInitParams;
/**
* Type definition for functions that build and return a @c CudaEngine for
* custom models.
*
* @deprecated The NvDsInferCudaEngineGet interface is replaced by
* NvDsInferEngineCreateCustomFunc().
*
* The implementation of this interface must build the
* nvinfer1::ICudaEngine instance using nvinfer1::IBuilder instance
* @a builder. The builder instance is managed by the caller;
* the implementation must not destroy it.
*
* Properties like @a MaxBatchSize, @a MaxWorkspaceSize, INT8/FP16
* precision parameters, and DLA parameters (if applicable) are set on the
* builder before it is passed to the interface. The corresponding Get
* functions of the nvinfer1::IBuilder interface can be used to get
* the property values.
*
* The implementation must make sure not to reduce the @a MaxBatchSize of the
* returned @c CudaEngine.
*
* @param[in] builder An nvinfer1::IBuilder instance.
* @param[in] initParams A pointer to the structure to be used for
* initializing the NvDsInferContext instance.
* @param[in] dataType Data precision.
* @param[out] cudaEngine A pointer to a location where the function is to
* store a reference to the nvinfer1::ICudaEngine
* instance it has built.
* @return True if the engine build was successful, or false otherwise. TBD Shaunak asked to have the original "deprecated" description restored. That would be redundant; there's a @deprecated command near the top of the comment.
*/
typedef bool (* NvDsInferEngineCreateCustomFunc) (
nvinfer1::IBuilder * const builder,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine);
/**
* A macro that validates a custom engine creator function definition.
* Call this macro after the function is defined.
*/
#define CHECK_CUSTOM_ENGINE_CREATE_FUNC_PROTOTYPE(customEngineCreateFunc) \
static void checkFunc_ ## customEngineCreateFunc (NvDsInferEngineCreateCustomFunc = customEngineCreateFunc) \
{ checkFunc_ ## customEngineCreateFunc(); }; \
extern "C" bool customEngineCreateFunc ( \
nvinfer1::IBuilder * const builder, \
const NvDsInferContextInitParams const *initParams, \
nvinfer1::DataType dataType, \
nvinfer1::ICudaEngine *& cudaEngine);
/**
* Specifies the type of the Plugin Factory.
*/
typedef enum
{
/** Specifies nvcaffeparser1::IPluginFactory or
nvuffparser::IPluginFactory. */
PLUGIN_FACTORY,
/** Specifies nvcaffeparser1::IPluginFactoryExt or
nvuffparser::IPluginFactoryExt. */
PLUGIN_FACTORY_EXT,
/** Specifies nvcaffeparser1::IPluginFactoryV2. Used only for Caffe models. */
PLUGIN_FACTORY_V2
} NvDsInferPluginFactoryType;
/**
* Holds a pointer to a heap-allocated Plugin Factory object required during
* Caffe model parsing.
*/
typedef union
{
// nvcaffeparser1::IPluginFactory *pluginFactory;
// nvcaffeparser1::IPluginFactoryExt *pluginFactoryExt;
nvcaffeparser1::IPluginFactoryV2 *pluginFactoryV2;
} NvDsInferPluginFactoryCaffe;
/**
* Holds a pointer to a heap-allocated Plugin Factory object required during
* UFF model parsing.
*/
typedef union
{
// nvuffparser::IPluginFactory *pluginFactory;
// nvuffparser::IPluginFactoryExt *pluginFactoryExt;
} NvDsInferPluginFactoryUff;
/**
* Gets a new instance of a Plugin Factory interface to be used
* during parsing of Caffe models. The function must set the correct @a type and
* the correct field in the @a pluginFactory union, based on the type of the
* Plugin Factory, (i.e. one of @a pluginFactory, @a pluginFactoryExt, or
* @a pluginFactoryV2).
*
* @param[out] pluginFactory A reference to the union that contains
* a pointer to the Plugin Factory object.
* @param[out] type Specifies the type of @a pluginFactory, i.e.
* which member the @a pluginFactory union
* is valid.
* @return True if the Plugin Factory was created successfully, or false
* otherwise.
*/
bool NvDsInferPluginFactoryCaffeGet (NvDsInferPluginFactoryCaffe &pluginFactory,
NvDsInferPluginFactoryType &type);
/**
* Destroys a Plugin Factory instance created by
* NvDsInferPluginFactoryCaffeGet().
*
* @param[in] pluginFactory A reference to the union that contains a
* pointer to the Plugin Factory instance returned
* by NvDsInferPluginFactoryCaffeGet().
*/
void NvDsInferPluginFactoryCaffeDestroy (NvDsInferPluginFactoryCaffe &pluginFactory);
/**
* Returns a new instance of a Plugin Factory interface to be used
* during parsing of UFF models. The function must set the correct @a type and
* the correct field in the @a pluginFactory union, based on the type of the
* Plugin Factory (i.e. @a pluginFactory or @a pluginFactoryExt).
*
* @param[out] pluginFactory A reference to a union that contains a pointer
* to the Plugin Factory object.
* @param[out] type Specifies the type of @a pluginFactory, i.e.
* which member of the @a pluginFactory union
* is valid.
* @return True if the Plugin Factory was created successfully, or false
* otherwise.
*/
bool NvDsInferPluginFactoryUffGet (NvDsInferPluginFactoryUff &pluginFactory,
NvDsInferPluginFactoryType &type);
/**
* Destroys a Plugin Factory instance created by NvDsInferPluginFactoryUffGet().
*
* @param[in] pluginFactory A reference to the union that contains a
* pointer to the Plugin Factory instance returned
* by NvDsInferPluginFactoryUffGet().
*/
void NvDsInferPluginFactoryUffDestroy (NvDsInferPluginFactoryUff &pluginFactory);
/**
* Returns a new instance of a Plugin Factory interface to be used
* during parsing deserialization of CUDA engines.
*
* @param[out] pluginFactory A reference to nvinfer1::IPluginFactory*
* in which the function is to place a pointer to
* the instance.
* @return True if the Plugin Factory was created successfully, or false
* otherwise.
*/
bool NvDsInferPluginFactoryRuntimeGet (nvinfer1::IPluginFactory *& pluginFactory);
/**
* Destroys a Plugin Factory instance created by
* NvDsInferPluginFactoryRuntimeGet().
*
* @param[in] pluginFactory A pointer to the Plugin Factory instance
* returned by NvDsInferPluginFactoryRuntimeGet().
*/
void NvDsInferPluginFactoryRuntimeDestroy (nvinfer1::IPluginFactory * pluginFactory);
/**
* Initializes the input layers for inference. This function is called only once
* during before the first inference call.
*
* @param[in] inputLayersInfo A reference to a vector containing information
* on the input layers of the model. This does not
* contain the NvDsInferLayerInfo structure for
* the layer for video frame input.
* @param[in] networkInfo A reference to anetwork information structure.
* @param[in] maxBatchSize The maximum batch size for inference.
* The input layer buffers are allocated
* for this batch size.
* @return True if input layers are initialized successfully, or false
* otherwise.
*/
bool NvDsInferInitializeInputLayers (std::vector<NvDsInferLayerInfo> const &inputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
unsigned int maxBatchSize);
/**
* The NvDsInferCudaEngineGet interface has been deprecated and has been
* replaced by NvDsInferEngineCreateCustomFunc function.
*/
bool NvDsInferCudaEngineGet(nvinfer1::IBuilder *builder,
NvDsInferContextInitParams *initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine)
__attribute__((deprecated("Use 'engine-create-func-name' config parameter instead")));
/**
* Create a customized neural network parser for user-defined models.
*
* User need to implement a new IModelParser class with @a initParams
* referring to any model path and/or customNetworkConfigFilePath.
*
* @param[in] initParams with model paths or config files.
* @return Instance of IModelParser implementation.
*/
IModelParser* NvDsInferCreateModelParser(
const NvDsInferContextInitParams* initParams);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file nvdsinfer_dbscan.h
* <b>NVIDIA DeepStream DBScan based Object Clustering API </b>
*
* @b Description: This file defines the API for the DBScan-based object
* clustering algorithm.
*/
/**
* @defgroup ee_dbscan DBScan Based Object Clustering API
*
* Defines the API for DBScan-based object clustering.
*
* @ingroup NvDsInferApi
* @{
*/
#ifndef __NVDSINFER_DBSCAN_H__
#define __NVDSINFER_DBSCAN_H__
#include <stddef.h>
#include <stdint.h>
#include <nvdsinfer.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Holds an opaque structure for the DBScan object clustering context. */
struct NvDsInferDBScan;
/** Holds an opaque DBScan clustering context handle. */
typedef struct NvDsInferDBScan *NvDsInferDBScanHandle;
/** Holds object clustering parameters required by DBSCAN. */
typedef struct
{
float eps;
uint32_t minBoxes;
/** Holds a Boolean; true enables the area-to-hit ratio (ATHR) filter.
The ATHR is calculated as: ATHR = sqrt(clusterArea) / nObjectsInCluster. */
int enableATHRFilter;
/** Holds the area-to-hit ratio threshold. */
float thresholdATHR;
/** Holds the sum of neighborhood confidence thresholds. */
float minScore;
} NvDsInferDBScanClusteringParams;
/**
* Creates a new DBScan object clustering context.
*
* @return A handle to the created context.
*/
NvDsInferDBScanHandle NvDsInferDBScanCreate();
/**
* Destroys a DBScan object clustering context.
*
* @param[in] handle The handle to the context to be destroyed.
*/
void NvDsInferDBScanDestroy(NvDsInferDBScanHandle handle);
/**
* Clusters an array of objects in place using specified clustering parameters.
*
* @param[in] handle A handle to the context be used for clustering.
* @param[in] params A pointer to a clustering parameter structure.
* @param[in,out] objects A pointer to an array of objects to be
* clustered. The function places the clustered
* objects in the same array.
* @param[in,out] numObjects A pointer to the number of valid objects
* in the @a objects array. The function sets
* this value after clustering.
*/
void NvDsInferDBScanCluster(NvDsInferDBScanHandle handle,
NvDsInferDBScanClusteringParams *params, NvDsInferObjectDetectionInfo *objects,
size_t *numObjects);
/**
* Clusters an array of objects in place using specified clustering parameters.
* The outputs are partially only clustered i.e to merge close neighbors of
* the same cluster together only and the mean normalization of all the
* proposals in a cluster is not performed. The outputs from this stage are
* later fed into another clustering algorithm like NMS to obtain the final
* results.
*
* @param[in] handle A handle to the context be used for clustering.
* @param[in] params A pointer to a clustering parameter structure.
* @param[in,out] objects A pointer to an array of objects to be
* clustered. The function places the clustered
* objects in the same array.
* @param[in,out] numObjects A pointer to the number of valid objects
* in the @a objects array. The function sets
* this value after clustering.
*/
void NvDsInferDBScanClusterHybrid(NvDsInferDBScanHandle handle,
NvDsInferDBScanClusteringParams *params, NvDsInferObjectDetectionInfo *objects,
size_t *numObjects);
#ifdef __cplusplus
}
#endif
#endif
/** @} */

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>NVIDIA DeepStream API for importing Transfer Learning Toolkit
* encoded models </b>
*
* @b Description: This file specifies the API to decode and create
* a CUDA engine file from a Tranfer Learning Toolkit (TLT) encoded model.
*/
/**
* @defgroup ee_nvdsinfer_tlt Import Transfer Learning Toolkit Encoded Models
*
* Defines an API for importing Transfer Learning Toolkit encoded models.
*
* @ingroup NvDsInferApi
* @{
*/
#ifndef __NVDSINFER_TLT_H__
#define __NVDSINFER_TLT_H__
#include <nvdsinfer_custom_impl.h>
/**
* \brief Decodes and creates a CUDA engine file from a TLT encoded model.
*
* This function implements the @ref NvDsInferCudaEngineGet interface. The
* correct key and model path must be provided in the @a tltModelKey and
* @a tltEncodedModelFilePath members of @a initParams. Other parameters
* applicable to UFF models also apply to TLT encoded models.
*/
extern "C"
bool NvDsInferCudaEngineGetFromTltModel(nvinfer1::IBuilder * const builder,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine);
#endif
/** @} */

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
/**
* @file
* <b>Utility functions required by DeepStream Inferance API </b>
*/
#ifndef __NVDSINFER_UTILS_H__
#define __NVDSINFER_UTILS_H__
#include "nvdsinfer_dbscan.h"
#include "nvdsinfer_tlt.h"
#endif

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvdsinfer_custom_impl.h"
#include "nvdsinfer_context.h"
//#include "yoloPlugins.h"
#include "yolo.h"
#include <algorithm>
#define USE_CUDA_ENGINE_GET_API 1
static bool getYoloNetworkInfo (NetworkInfo &networkInfo, const NvDsInferContextInitParams* initParams)
{
std::string yoloCfg = initParams->customNetworkConfigFilePath;
std::string yoloType;
std::transform (yoloCfg.begin(), yoloCfg.end(), yoloCfg.begin(), [] (uint8_t c) {
return std::tolower (c);});
if (yoloCfg.find("yolov2") != std::string::npos) {
if (yoloCfg.find("yolov2-tiny") != std::string::npos)
yoloType = "yolov2-tiny";
else
yoloType = "yolov2";
} else if (yoloCfg.find("yolov3") != std::string::npos) {
if (yoloCfg.find("yolov3-tiny") != std::string::npos)
yoloType = "yolov3-tiny";
else
yoloType = "yolov3";
} else {
std::cerr << "Yolo type is not defined from config file name:"
<< yoloCfg << std::endl;
return false;
}
networkInfo.networkType = yoloType;
networkInfo.configFilePath = initParams->customNetworkConfigFilePath;
networkInfo.wtsFilePath = initParams->modelFilePath;
networkInfo.deviceType = (initParams->useDLA ? "kDLA" : "kGPU");
networkInfo.inputBlobName = "data";
if (networkInfo.configFilePath.empty() ||
networkInfo.wtsFilePath.empty()) {
std::cerr << "Yolo config file or weights file is NOT specified."
<< std::endl;
return false;
}
if (!fileExists(networkInfo.configFilePath) ||
!fileExists(networkInfo.wtsFilePath)) {
std::cerr << "Yolo config file or weights file is NOT exist."
<< std::endl;
return false;
}
return true;
}
#if !USE_CUDA_ENGINE_GET_API
IModelParser* NvDsInferCreateModelParser(
const NvDsInferContextInitParams* initParams) {
NetworkInfo networkInfo;
if (!getYoloNetworkInfo(networkInfo, initParams)) {
return nullptr;
}
std::cout << "Build cuda engine hase successed on USE_CUDA_ENGINE_GET_API=0" << std::endl;
return new Yolo(networkInfo);
}
#else
extern "C"
bool NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder * const builder,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine);
extern "C"
bool NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder * const builder,
const NvDsInferContextInitParams * const initParams,
nvinfer1::DataType dataType,
nvinfer1::ICudaEngine *& cudaEngine)
{
std::cout << "Begin cuda engine build..." << std::endl;
NetworkInfo networkInfo;
if (!getYoloNetworkInfo(networkInfo, initParams)) {
return false;
}
Yolo yolo(networkInfo);
cudaEngine = yolo.createEngine (builder);
if (cudaEngine == nullptr)
{
std::cerr << "Failed to build cuda engine on "
<< networkInfo.configFilePath << std::endl;
return false;
}
std::cout << "Build cuda engine hase successed on " << networkInfo.configFilePath << std::endl;
return true;
}
#endif

View File

@ -0,0 +1,349 @@
/**
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
syntax = "proto3";
package nvdsinferserver.config;
enum MediaFormat {
MEDIA_FORMAT_NONE = 0;
IMAGE_FORMAT_RGB = 1;
IMAGE_FORMAT_BGR = 2;
IMAGE_FORMAT_GRAY = 3;
}
enum TensorOrder {
TENSOR_ORDER_NONE = 0;
TENSOR_ORDER_LINEAR = 1;
TENSOR_ORDER_NHWC = 2;
}
enum TensorDataType {
TENSOR_DT_NONE = 0;
TENSOR_DT_FP32 = 1;
TENSOR_DT_FP16 = 2;
TENSOR_DT_INT8 = 3;
TENSOR_DT_INT16 = 4;
TENSOR_DT_INT32 = 5;
TENSOR_DT_UINT8 = 6;
TENSOR_DT_UINT16 = 7;
TENSOR_DT_UINT32 = 8;
}
enum FrameScalingHW {
FRAME_SCALING_HW_DEFAULT = 0;
FRAME_SCALING_HW_GPU = 1;
FRAME_SCALING_HW_VIC = 2;
}
/** Custom lib for preload */
message CustomLib {
/** Path point to the custom library */
string path = 1;
}
/** preprocessing settings */
message PreProcessParams {
/** Input data normalization settings */
message ScaleNormalize
{
/** Normalization factor to scale the input pixels with. */
float scale_factor = 1;
/** Per channel offsets for mean subtraction. This is an alternative to
* the mean image file. The number of offsets in the array should be
* exactly equalto the number of input channels.
*/
repeated float channel_offsets = 2;
/** Path to the mean image file (PPM format). Resolution of the file
* should be equal to the network input resolution.
*/
string mean_file = 3;
}
/** Network input format */
MediaFormat network_format = 1;
/** Network input tensor order */
TensorOrder tensor_order = 2;
/** preprocessing data set to network tensor name */
string tensor_name = 3;
/** Indicating if aspect ratio should be maintained when scaling to
* network resolution. Right/bottom areas will be filled with black areas. */
int32 maintain_aspect_ratio = 4;
/** Compute hardware to use for scaling frames / objects. */
FrameScalingHW frame_scaling_hw = 5;
/** Interpolation filter to use while scaling. Refer to
* NvBufSurfTransform_Inter for supported filter values. */
uint32 frame_scaling_filter = 6;
/** Preprocessing methods */
oneof preprocess_method {
/** usual scaling normalization for images */
ScaleNormalize normalize = 7;
}
}
/** Deepstream Detection settings */
message DetectionParams {
/** non-maximum-suppression cluster method */
message Nms
{
/** detection score less this threshold would be rejected */
float confidence_threshold = 1;
/** IOU threshold */
float iou_threshold = 2;
/** top kth detection results to keep after nms. 0), keep all */
int32 topk = 3;
}
/** DBScan object clustering */
message DbScan {
/** Bounding box detection threshold. */
float pre_threshold = 1;
// float post_threshold = 2;
/** Epsilon to control merging of overlapping boxes */
float eps = 3;
/** Minimum boxes in a cluster to be considered an object */
int32 min_boxes = 4;
/** Minimum score in a cluster for it to be considered as an object */
float min_score = 5;
}
/** cluster method based on grouping rectangles*/
message GroupRectangle {
/** detection score less this threshold would be rejected */
float confidence_threshold = 1;
/** how many bbox can be clustered together */
int32 group_threshold = 2;
/** Epsilon to control merging of overlapping boxes */
float eps = 3;
}
/** simple cluster method for confidence filter */
message SimpleCluster
{
/** detection score less this threshold would be rejected */
float threshold = 1;
}
/** specific parameters controled per class*/
message PerClassParams {
/** pre-threshold used for filter out confidence less than the value */
float pre_threshold = 1;
}
/** Number of classes detected by a detector network. */
int32 num_detected_classes = 1;
/** Per class detection parameters. key-value is for
* <class_id:class_parameter> */
map<int32, PerClassParams> per_class_params = 2;
/** Name of the custom bounding box function in the custom library. */
string custom_parse_bbox_func = 3;
/** cluster methods for bbox, choose one only */
oneof clustering_policy {
/** non-maximum-suppression, reserved, not supported yet */
Nms nms = 4;
/** DbScan clustering parameters */
DbScan dbscan = 5;
/** grouping rectagules */
GroupRectangle group_rectangle = 6;
/** simple threshold filter */
SimpleCluster simple_cluster = 7;
}
}
/** Deepstream Classifciation settings */
message ClassificationParams {
/** classifciation threshold */
float threshold = 1;
/** custom function for classification parsing */
string custom_parse_classifier_func = 2;
}
/** Deepstream segmentation settings */
message SegmentationParams {
/** reserved field */
float threshold = 1;
}
/** Other Network settings, need application to do postprocessing */
message OtherNetworkParams {
/** reserved field */
string type_name = 1;
}
/** TRTIS classifcation settings */
message TrtIsClassifyParams
{
/** top k classification results */
uint32 topk = 1;
/** classifciation threshold */
float threshold = 2;
/** [optional] specify which output tensor is used for triton classification.*/
string tensor_name = 3;
}
/** Post-processing settings */
message PostProcessParams {
/** label file path. It relative to config file path if value is not
* absoluate path
*/
string labelfile_path = 1;
/** post-process can only have one of the following types*/
oneof process_type
{
/** deepstream detection parameters */
DetectionParams detection = 2;
/** deepstream classification parameters */
ClassificationParams classification = 3;
/** deepstream segmentation parameters */
SegmentationParams segmentation = 4;
/** deepstream other postprocessing parameters */
OtherNetworkParams other = 5;
/* TRT-IS classification parameters */
TrtIsClassifyParams trtis_classification = 6;
}
}
/** Network Input layer information */
message InputLayer {
/** input tensor name, optional*/
string name = 1;
/** fixed inference shape, only required when backend has wildcard shape */
repeated int32 dims = 2;
/** tensor data type, optional. default TENSOR_DT_NONE */
TensorDataType data_type = 3;
}
/** Network Onput layer information */
message OutputLayer {
/** output tensor name */
string name = 1;
}
/** TRTIS inference backend parameters */
message TrtISParams {
/** TRTIS models repo settings */
message ModelRepo
{
/** root directory for all models
* All models should set same @a root value */
string root = 1;
/** log verbose level, the larger the more logs output
* (0): ERROR;
* (1): WARNING;
* (2): INFO
* (3+): VERBOSE Level
*/
uint32 log_level = 2;
/** enable strict model config
* true: config.pbtxt must exsit.
* false: trtis try to figure model's config file, it may cause failure on
* different input/output dims.
*/
bool strict_model_config = 3;
/** tensorflow gpu memory fraction, default 0.0 */
float tf_gpu_memory_fraction = 4;
/** tensorflow soft placement, allowed by default */
bool tf_disable_soft_placement = 5;
}
/** trt-is model name */
string model_name = 1;
/** model version, -1 is for latest version, required */
int64 version = 2;
oneof server {
/** trt-is server model repo, all models must have same @a model_repo */
ModelRepo model_repo = 3;
}
}
/** Network LSTM Parameters */
message LstmParams {
/** init constant value for lstm input tensors, usually zero or one */
message InitConst {
/** const value */
float value = 1;
}
/** LSTM loop information */
message LstmLoop {
/** input tensor name */
string input = 1;
/** output tensor name */
string output = 2;
/** initialize input tensor for first frame */
oneof init_state {
/** init const value, default is zero */
InitConst init_const = 3;
}
/** enable if need keep lstm output tensor data for application output
* parsing, it's disabled by default */
bool keep_output = 4;
}
repeated LstmLoop loops = 1;
}
/** Network backend Settings */
message BackendParams {
/** input tensors settings, optional */
repeated InputLayer inputs = 1;
/** outputs tensor settings, optional */
repeated OutputLayer outputs = 2;
/** inference framework */
oneof infer_framework
{
/** TRT-IS inference framework */
TrtISParams trt_is = 3;
}
}
/** extrac controls */
message ExtraControl {
/** enable if need copy input tensor data for application output parsing,
* it's disabled by default */
bool copy_input_to_host_buffers = 1;
/** defined how many buffers allocated for output tensors in the pool.
* Optional, default is 2, the value can be in range [2:6] */
int32 output_buffer_pool_size = 2;
}
/** Inference configuration */
message InferenceConfig {
/** unique id, larger than 0, required for multiple models inference */
uint32 unique_id = 1;
/** gpu id settings. Optional. support single gpu only at this time
* default values [0] */
repeated int32 gpu_ids = 2;
/** max batch size. Required, can be reset by plugin */
uint32 max_batch_size = 3;
/** inference backend parameters. required */
BackendParams backend = 4;
/** preprocessing for tensors, required */
PreProcessParams preprocess = 5;
/** postprocessing for all tensor data, required */
PostProcessParams postprocess = 6;
/** Custom libs for tensor output parsing or preload, optional */
CustomLib custom_lib = 7;
/** advanced controls as optional */
oneof advanced
{
/** extrac controls */
ExtraControl extra = 8;
}
/** LSTM controller */
oneof lstm_control {
/** LSTM parameters */
LstmParams lstm = 9;
}
}

View File

@ -0,0 +1,126 @@
/**
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
syntax = "proto3";
package nvdsinferserver.config;
import "nvdsinferserver_config.proto";
/** Plugin Control settings for input / inference / output */
message PluginControl {
/** Color values for Red/Green/Blue/Alpha, all values are in range [0, 1] */
message Color {
/** Red color value */
float r = 1;
/** Green color value */
float g = 2;
/** Blue color value */
float b = 3;
/** Alpha color value */
float a = 4;
}
/** Boudingbox filter */
message BBoxFilter {
/** Boudingbox minimum width */
uint32 min_width = 1;
/** Boudingbox minimum height */
uint32 min_height = 2;
/** Boudingbox maximum width */
uint32 max_width = 3;
/** Boudingbox maximum height */
uint32 max_height = 4;
}
/** Detection of classes filter */
message DetectClassFilter {
/** Detection Bounding box filter */
BBoxFilter bbox_filter = 1;
/** Offset of the RoI from the top of the frame. Only objects within the
* RoI are output */
uint32 roi_top_offset = 2;
/** Offset of the RoI from the bottom of the frame. Only objects within the
* RoI are output */
uint32 roi_bottom_offset = 3;
/** Specify border color for detection bounding boxes */
Color border_color = 4;
/** Specify background color for detection bounding boxes */
Color bg_color = 5;
}
/** Output detection results control */
message OutputDetectionControl {
/** Default detection classes filter */
DetectClassFilter default_filter = 1;
/** specifies detection filters per class instead of default filter */
map<uint32, DetectClassFilter> specific_class_filters = 2;
}
/** Input objects control */
message InputObjectControl {
/** Input bounding box of objects filter */
BBoxFilter bbox_filter = 1;
}
/** Processing Mode */
enum ProcessMode {
/** Processing Default Mode */
PROCESS_MODE_DEFAULT = 0;
/** Processing Full Frame Mode */
PROCESS_MODE_FULL_FRAME = 1;
/** Processing Object Clipping Mode */
PROCESS_MODE_CLIP_OBJECTS = 2;
}
/** Plugin input data control policy */
message InputControl {
/** Processing mode setting, optional */
ProcessMode process_mode = 1;
/** Unique ID of the GIE on whose metadata (bounding boxes) this GIE is to
* operate on. It is used for secondary GIE only. */
int32 operate_on_gie_id = 2;
/** Class IDs of the parent GIE on which this GIE is to operate on.
* It is used for secondary GIE only. */
repeated int32 operate_on_class_ids = 3;
/** Specifies the number of consecutive, batches to be skipped for
* inference. Default is 0. */
uint32 interval = 4;
/** Enables inference on detected objects and asynchronous metadata
* attachments. Works only when tracker-id is valid. It's used for
* classifier with secondary GIE only. */
bool async_mode = 5;
/** Input object filter policy */
oneof object_filter {
/** input object control settings */
InputObjectControl object_control = 6;
}
}
/** Plugin output data control policy */
message OutputControl {
/* Enable attaching inference output tensor metadata */
bool output_tensor_meta = 1;
/* Postprocessing control policy */
oneof postprocess_control {
/* Detection results filter */
OutputDetectionControl detect_control = 2;
}
}
/** Low-level libnvds_infer_server inference configuration settings */
InferenceConfig infer_config =1;
/** Control plugin input buffers, object filter before inference */
InputControl input_control = 2;
/** Control plugin output meta data after inference */
OutputControl output_control = 3;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,309 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA DeepStream: Metadata Extension Structures</b>
*
* @b Description: This file defines the NVIDIA DeepStream metadata structures
* used to describe metadata objects.
*/
/**
* @defgroup metadata_extensions Metadata Extension Structures
*
* Defines metadata structures used to describe metadata objects.
*
* @ingroup NvDsMetaApi
* @{
*/
#ifndef NVDSMETA_H_
#define NVDSMETA_H_
#include <glib.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Defines event type flags.
*/
typedef enum NvDsEventType {
NVDS_EVENT_ENTRY,
NVDS_EVENT_EXIT,
NVDS_EVENT_MOVING,
NVDS_EVENT_STOPPED,
NVDS_EVENT_EMPTY,
NVDS_EVENT_PARKED,
NVDS_EVENT_RESET,
/** Reserved for future use. Custom events must be assigned values
greater than this. */
NVDS_EVENT_RESERVED = 0x100,
/** Specifies a custom event. */
NVDS_EVENT_CUSTOM = 0x101,
NVDS_EVENT_FORCE32 = 0x7FFFFFFF
} NvDsEventType;
/**
* Defines object type flags.
*/
typedef enum NvDsObjectType {
NVDS_OBJECT_TYPE_VEHICLE,
NVDS_OBJECT_TYPE_PERSON,
NVDS_OBJECT_TYPE_FACE,
NVDS_OBJECT_TYPE_BAG,
NVDS_OBJECT_TYPE_BICYCLE,
NVDS_OBJECT_TYPE_ROADSIGN,
NVDS_OBJECT_TYPE_VEHICLE_EXT,
NVDS_OBJECT_TYPE_PERSON_EXT,
NVDS_OBJECT_TYPE_FACE_EXT,
/** Reserved for future use. Custom objects must be assigned values
greater than this. */
NVDS_OBJECT_TYPE_RESERVED = 0x100,
/** Specifies a custom object. */
NVDS_OBJECT_TYPE_CUSTOM = 0x101,
/** "object" key will be missing in the schema */
NVDS_OBJECT_TYPE_UNKNOWN = 0x102,
NVDS_OBEJCT_TYPE_FORCE32 = 0x7FFFFFFF
} NvDsObjectType;
/**
* Defines payload type flags.
*/
typedef enum NvDsPayloadType {
NVDS_PAYLOAD_DEEPSTREAM,
NVDS_PAYLOAD_DEEPSTREAM_MINIMAL,
/** Reserved for future use. Custom payloads must be assigned values
greater than this. */
NVDS_PAYLOAD_RESERVED = 0x100,
/** Specifies a custom payload. You must implement the nvds_msg2p_*
interface. */
NVDS_PAYLOAD_CUSTOM = 0x101,
NVDS_PAYLOAD_FORCE32 = 0x7FFFFFFF
} NvDsPayloadType;
/**
* Holds a rectangle's position and size.
*/
typedef struct NvDsRect {
float top; /**< Holds the position of rectangle's top in pixels. */
float left; /**< Holds the position of rectangle's left side in pixels. */
float width; /**< Holds the rectangle's width in pixels. */
float height; /**< Holds the rectangle's height in pixels. */
} NvDsRect;
/**
* Holds geolocation parameters.
*/
typedef struct NvDsGeoLocation {
gdouble lat; /**< Holds the location's latitude. */
gdouble lon; /**< Holds the location's longitude. */
gdouble alt; /**< Holds the location's altitude. */
} NvDsGeoLocation;
/**
* Hold a coordinate's position.
*/
typedef struct NvDsCoordinate {
gdouble x; /**< Holds the coordinate's X position. */
gdouble y; /**< Holds the coordinate's Y position. */
gdouble z; /**< Holds the coordinate's Z position. */
} NvDsCoordinate;
/**
* Holds an object's signature.
*/
typedef struct NvDsObjectSignature {
/** Holds a pointer to an array of signature values. */
gdouble *signature;
/** Holds the number of signature values in @a signature. */
guint size;
} NvDsObjectSignature;
/**
* Holds a vehicle object's parameters.
*/
typedef struct NvDsVehicleObject {
gchar *type; /**< Holds a pointer to the type of the vehicle. */
gchar *make; /**< Holds a pointer to the make of the vehicle. */
gchar *model; /**< Holds a pointer to the model of the vehicle. */
gchar *color; /**< Holds a pointer to the color of the vehicle. */
gchar *region; /**< Holds a pointer to the region of the vehicle. */
gchar *license; /**< Holds a pointer to the license number of the vehicle.*/
} NvDsVehicleObject;
/**
* Holds a person object's parameters.
*/
typedef struct NvDsPersonObject {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person is
wearing, if any. */
gchar *apparel; /**< Holds a pointer to a description of the person's
apparel. */
guint age; /**< Holds the person's age. */
} NvDsPersonObject;
/**
* Holds a face object's parameters.
*/
typedef struct NvDsFaceObject {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person
is wearing, if any. */
gchar *glasses; /**< Holds a pointer to the type of glasses the person
is wearing, if any. */
gchar *facialhair;/**< Holds a pointer to the person's facial hair color. */
gchar *name; /**< Holds a pointer to the person's name. */
gchar *eyecolor; /**< Holds a pointer to the person's eye color. */
guint age; /**< Holds the person's age. */
} NvDsFaceObject;
/**
* Holds a vehicle object's parameters.
*/
typedef struct NvDsVehicleObjectExt {
gchar *type; /**< Holds a pointer to the type of the vehicle. */
gchar *make; /**< Holds a pointer to the make of the vehicle. */
gchar *model; /**< Holds a pointer to the model of the vehicle. */
gchar *color; /**< Holds a pointer to the color of the vehicle. */
gchar *region; /**< Holds a pointer to the region of the vehicle. */
gchar *license; /**< Holds a pointer to the license number of the vehicle.*/
GList *mask; /**< Holds a list of polygons for vehicle mask. */
} NvDsVehicleObjectExt;
/**
* Holds a person object's parameters.
*/
typedef struct NvDsPersonObjectExt {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person is
wearing, if any. */
gchar *apparel; /**< Holds a pointer to a description of the person's
apparel. */
guint age; /**< Holds the person's age. */
GList *mask; /**< Holds a list of polygons for person mask. */
} NvDsPersonObjectExt;
/**
* Holds a face object's parameters.
*/
typedef struct NvDsFaceObjectWithExt {
gchar *gender; /**< Holds a pointer to the person's gender. */
gchar *hair; /**< Holds a pointer to the person's hair color. */
gchar *cap; /**< Holds a pointer to the type of cap the person
is wearing, if any. */
gchar *glasses; /**< Holds a pointer to the type of glasses the person
is wearing, if any. */
gchar *facialhair;/**< Holds a pointer to the person's facial hair color. */
gchar *name; /**< Holds a pointer to the person's name. */
gchar *eyecolor; /**< Holds a pointer to the person's eye color. */
guint age; /**< Holds the person's age. */
GList *mask; /**< Holds a list of polygons for face mask. */
} NvDsFaceObjectExt;
/**
* Holds event message meta data.
*
* You can attach various types of objects (vehicle, person, face, etc.)
* to an event by setting a pointer to the object in @a extMsg.
*
* Similarly, you can attach a custom object to an event by setting a pointer to the object in @a extMsg.
* A custom object must be handled by the metadata parsing module accordingly.
*/
typedef struct NvDsEventMsgMeta {
/** Holds the event's type. */
NvDsEventType type;
/** Holds the object's type. */
NvDsObjectType objType;
/** Holds the object's bounding box. */
NvDsRect bbox;
/** Holds the object's geolocation. */
NvDsGeoLocation location;
/** Holds the object's coordinates. */
NvDsCoordinate coordinate;
/** Holds the object's signature. */
NvDsObjectSignature objSignature;
/** Holds the object's class ID. */
gint objClassId;
/** Holds the ID of the sensor that generated the event. */
gint sensorId;
/** Holds the ID of the analytics module that generated the event. */
gint moduleId;
/** Holds the ID of the place related to the object. */
gint placeId;
/** Holds the ID of the component (plugin) that generated this event. */
gint componentId;
/** Holds the video frame ID of this event. */
gint frameId;
/** Holds the confidence level of the inference. */
gdouble confidence;
/** Holds the object's tracking ID. */
gint trackingId;
/** Holds a pointer to the generated event's timestamp. */
gchar *ts;
/** Holds a pointer to the detected or inferred object's ID. */
gchar *objectId;
/** Holds a pointer to a string containing the sensor's identity. */
gchar *sensorStr;
/** Holds a pointer to a string containing other attributes associated with
the object. */
gchar *otherAttrs;
/** Holds a pointer to the name of the video file. */
gchar *videoPath;
/** Holds a pointer to event message meta data. This can be used to hold
data that can't be accommodated in the existing fields, or an associated
object (representing a vehicle, person, face, etc.). */
gpointer extMsg;
/** Holds the size of the custom object at @a extMsg. */
guint extMsgSize;
} NvDsEventMsgMeta;
/**
* Holds event information.
*/
typedef struct _NvDsEvent {
/** Holds the type of event. */
NvDsEventType eventType;
/** Holds a pointer to event metadata. */
NvDsEventMsgMeta *metadata;
} NvDsEvent;
/**
* Holds payload metadata.
*/
typedef struct NvDsPayload {
/** Holds a pointer to the payload. */
gpointer payload;
/** Holds the size of the payload. */
guint payloadSize;
/** Holds the ID of the component (plugin) which attached the payload
(optional). */
guint componentId;
} NvDsPayload;
#ifdef __cplusplus
}
#endif
#endif /* NVDSMETA_H_ */
/** @} */

View File

@ -0,0 +1,665 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstring>
#include <fstream>
#include <iostream>
#include <unordered_map>
#include "nvdsinfer_custom_impl.h"
#include "trt_utils.h"
//commeeeent
static const int NUM_CLASSES_YOLO = 5;
//todo THAINQ: add constant
const int IMAGE_W = 1920;
const int IMAGE_H = 1080;
const int INPUT_W = 640;
const int INPUT_H = 640;
#define NMS_THRESH 0.1
#define CONF_THRESH 0.1
#define BATCH_SIZE 1
extern "C" bool NvDsInferParseCustomYoloV5(
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferParseObjectInfo> &objectList);
extern "C" bool NvDsInferParseCustomYoloV4(
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferParseObjectInfo> &objectList);
extern "C" bool NvDsInferParseCustomYoloV3(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList);
extern "C" bool NvDsInferParseCustomYoloV3Tiny(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList);
extern "C" bool NvDsInferParseCustomYoloV2(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList);
extern "C" bool NvDsInferParseCustomYoloV2Tiny(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList);
extern "C" bool NvDsInferParseCustomYoloTLT(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection{
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
float iou(float lbox[4], float rbox[4]) {
float interBox[] = {
std::max(lbox[0] - lbox[2]/2.f , rbox[0] - rbox[2]/2.f), //left
std::min(lbox[0] + lbox[2]/2.f , rbox[0] + rbox[2]/2.f), //right
std::max(lbox[1] - lbox[3]/2.f , rbox[1] - rbox[3]/2.f), //top
std::min(lbox[1] + lbox[3]/2.f , rbox[1] + rbox[3]/2.f), //bottom
};
if(interBox[2] > interBox[3] || interBox[0] > interBox[1])
return 0.0f;
float interBoxS =(interBox[1]-interBox[0])*(interBox[3]-interBox[2]);
return interBoxS/(lbox[2]*lbox[3] + rbox[2]*rbox[3] -interBoxS);
}
bool cmp(Detection& a, Detection& b) {
return a.conf > b.conf;
}
void nms(std::vector<Detection>& res, float *output, float conf_thresh, float nms_thresh = 0.5) {
int det_size = sizeof(Detection) / sizeof(float);
std::map<float, std::vector<Detection>> m;
for (int i = 0; i < output[0] && i < 1000; i++) {
if (output[1 + det_size * i + 4] <= conf_thresh) continue;
Detection det;
memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float));
if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector<Detection>());
m[det.class_id].push_back(det);
}
for (auto it = m.begin(); it != m.end(); it++) {
//std::cout << it->second[0].class_id << " --- " << std::endl;
auto& dets = it->second;
std::sort(dets.begin(), dets.end(), cmp);
for (size_t m = 0; m < dets.size(); ++m) {
auto& item = dets[m];
res.push_back(item);
for (size_t n = m + 1; n < dets.size(); ++n) {
if (iou(item.bbox, dets[n].bbox) > nms_thresh) {
dets.erase(dets.begin()+n);
--n;
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/* This is a sample bounding box parsing function for the sample YoloV5m detector model */
static bool NvDsInferParseYoloV5(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)
{
std::cerr << "WARNING: Num classes mismatch. Configured:"
<< detectionParams.numClassesConfigured
<< ", detected by network: " << NUM_CLASSES_YOLO << std::endl;
}
std::vector<Detection> res;
//todo THAINQ: ratio scale box
float scale = std::min(INPUT_W / (IMAGE_W*1.0), INPUT_H / (IMAGE_H*1.0));
float scale_bbox = 0.1f;
float scale_height_up_wheelchair = 0.75f;
float scale_height_down_wheelchair = 0.15f;
float scale_width_wheelchair = 0.2f;
nms(res, (float*)(outputLayersInfo[0].buffer), CONF_THRESH, NMS_THRESH);
//std::cout<<"Nms done sucessfully----"<<std::endl;
for(auto& r : res) {
NvDsInferParseObjectInfo oinfo;
oinfo.classId = r.class_id;
oinfo.left = static_cast<unsigned int>(r.bbox[0]-r.bbox[2]*0.5f);
oinfo.top = static_cast<unsigned int>(r.bbox[1]-r.bbox[3]*0.5f);
oinfo.width = static_cast<unsigned int>(r.bbox[2]);
oinfo.height = static_cast<unsigned int>(r.bbox[3]);
oinfo.detectionConfidence = r.conf;
// todo THAINQ: scale bbox
float x1 = oinfo.left / scale;
float y1 = oinfo.top / scale;
float width = oinfo.width / scale;
float height = oinfo.height / scale;
float x2 = x1 + width;
float y2 = y1 + height;
if (r.class_id == 1){
// xe lan
if (height > width){
float diff = height - width;
y1 = y1 + diff;
}
x1 = ((x1 - scale_width_wheelchair * width) >= 0) ? (x1 - scale_width_wheelchair * width) : 0;
y1 = ((y1 - scale_height_up_wheelchair * height) >= 0) ? (y1 - scale_height_up_wheelchair * height) : 0;
x2 = ((x2 + scale_width_wheelchair * width) <= IMAGE_W) ? (x2 + scale_width_wheelchair * width) : IMAGE_W;
y2 = ((y2 + scale_height_down_wheelchair * height) <= IMAGE_H) ? (y2 + scale_height_down_wheelchair * height) : IMAGE_H;
float new_width = y2 - y1;
if (new_width > 0.8 * IMAGE_H){
float diff = new_width - (0.8 * IMAGE_H);
y1 = y1 + diff;
}
oinfo.left = (float) x1 * scale;
oinfo.top = (float) y1 * scale;
oinfo.width = (float) (x2 - x1) * scale;
oinfo.height = (float) (y2 - y1) * scale;
}else if (r.class_id == 0){
// gay
x1 = ((x1 - scale_bbox * width) >= 0) ? (x1 - scale_bbox * width) : 0;
y1 = ((y1 - scale_bbox * height) >= 0) ? (y1 - scale_bbox * height) : 0;
x2 = ((x2 + scale_bbox * width) <= IMAGE_W) ? (x2 + scale_bbox * width) : IMAGE_W;
y2 = ((y2 + scale_bbox * height) <= IMAGE_H) ? (y2 + scale_bbox * height) : IMAGE_H;
oinfo.left = (float) x1 * scale;
oinfo.top = (float) y1 * scale;
oinfo.width = (float) (x2 - x1) * scale;
oinfo.height = (float) (y2 - y1) * scale;
}
//
//std::cout << static_cast<unsigned int>(r.bbox[0]) << "," << static_cast<unsigned int>(r.bbox[1]) << "," << static_cast<unsigned int>(r.bbox[2]) << ","
// << static_cast<unsigned int>(r.bbox[3]) << "," << static_cast<unsigned int>(r.class_id) << "," << static_cast<unsigned int>(r.conf) << std::endl;
objectList.push_back(oinfo);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/* This is a sample bounding box parsing function for the sample YoloV4 detector model */
static bool NvDsInferParseYoloV4(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)
{
std::cerr << "WARNING: Num classes mismatch. Configured:"
<< detectionParams.numClassesConfigured
<< ", detected by network: " << NUM_CLASSES_YOLO << std::endl;
}
std::vector<Detection> res;
nms(res, (float*)(outputLayersInfo[0].buffer), CONF_THRESH, NMS_THRESH);
//std::cout<<"Nms done sucessfully----"<<std::endl;
for(auto& r : res) {
NvDsInferParseObjectInfo oinfo;
oinfo.classId = r.class_id;
oinfo.left = static_cast<unsigned int>(r.bbox[0]-r.bbox[2]*0.5f);
oinfo.top = static_cast<unsigned int>(r.bbox[1]-r.bbox[3]*0.5f);
oinfo.width = static_cast<unsigned int>(r.bbox[2]);
oinfo.height = static_cast<unsigned int>(r.bbox[3]);
oinfo.detectionConfidence = r.conf;
//std::cout << static_cast<unsigned int>(r.bbox[0]) << "," << static_cast<unsigned int>(r.bbox[1]) << "," << static_cast<unsigned int>(r.bbox[2]) << ","
// << static_cast<unsigned int>(r.bbox[3]) << "," << static_cast<unsigned int>(r.class_id) << "," << static_cast<unsigned int>(r.conf) << std::endl;
objectList.push_back(oinfo);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/* This is a sample bounding box parsing function for the sample YoloV3 detector model */
static NvDsInferParseObjectInfo convertBBox(const float& bx, const float& by, const float& bw,
const float& bh, const int& stride, const uint& netW,
const uint& netH)
{
NvDsInferParseObjectInfo b;
// Restore coordinates to network input resolution
float xCenter = bx * stride;
float yCenter = by * stride;
float x0 = xCenter - bw / 2;
float y0 = yCenter - bh / 2;
float x1 = x0 + bw;
float y1 = y0 + bh;
x0 = clamp(x0, 0, netW);
y0 = clamp(y0, 0, netH);
x1 = clamp(x1, 0, netW);
y1 = clamp(y1, 0, netH);
b.left = x0;
b.width = clamp(x1 - x0, 0, netW);
b.top = y0;
b.height = clamp(y1 - y0, 0, netH);
return b;
}
static void addBBoxProposal(const float bx, const float by, const float bw, const float bh,
const uint stride, const uint& netW, const uint& netH, const int maxIndex,
const float maxProb, std::vector<NvDsInferParseObjectInfo>& binfo)
{
NvDsInferParseObjectInfo bbi = convertBBox(bx, by, bw, bh, stride, netW, netH);
if (bbi.width < 1 || bbi.height < 1) return;
bbi.detectionConfidence = maxProb;
bbi.classId = maxIndex;
binfo.push_back(bbi);
}
static std::vector<NvDsInferParseObjectInfo>
decodeYoloV2Tensor(
const float* detections, const std::vector<float> &anchors,
const uint gridSizeW, const uint gridSizeH, const uint stride, const uint numBBoxes,
const uint numOutputClasses, const uint& netW,
const uint& netH)
{
std::vector<NvDsInferParseObjectInfo> binfo;
for (uint y = 0; y < gridSizeH; ++y) {
for (uint x = 0; x < gridSizeW; ++x) {
for (uint b = 0; b < numBBoxes; ++b)
{
const float pw = anchors[b * 2];
const float ph = anchors[b * 2 + 1];
const int numGridCells = gridSizeH * gridSizeW;
const int bbindex = y * gridSizeW + x;
const float bx
= x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)];
const float by
= y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)];
const float bw
= pw * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]);
const float bh
= ph * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]);
const float objectness
= detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)];
float maxProb = 0.0f;
int maxIndex = -1;
for (uint i = 0; i < numOutputClasses; ++i)
{
float prob
= (detections[bbindex
+ numGridCells * (b * (5 + numOutputClasses) + (5 + i))]);
if (prob > maxProb)
{
maxProb = prob;
maxIndex = i;
}
}
maxProb = objectness * maxProb;
addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo);
}
}
}
return binfo;
}
static std::vector<NvDsInferParseObjectInfo>
decodeYoloV3Tensor(
const float* detections, const std::vector<int> &mask, const std::vector<float> &anchors,
const uint gridSizeW, const uint gridSizeH, const uint stride, const uint numBBoxes,
const uint numOutputClasses, const uint& netW,
const uint& netH)
{
std::vector<NvDsInferParseObjectInfo> binfo;
for (uint y = 0; y < gridSizeH; ++y) {
for (uint x = 0; x < gridSizeW; ++x) {
for (uint b = 0; b < numBBoxes; ++b)
{
const float pw = anchors[mask[b] * 2];
const float ph = anchors[mask[b] * 2 + 1];
const int numGridCells = gridSizeH * gridSizeW;
const int bbindex = y * gridSizeW + x;
const float bx
= x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)];
const float by
= y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)];
const float bw
= pw * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)];
const float bh
= ph * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)];
const float objectness
= detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)];
float maxProb = 0.0f;
int maxIndex = -1;
for (uint i = 0; i < numOutputClasses; ++i)
{
float prob
= (detections[bbindex
+ numGridCells * (b * (5 + numOutputClasses) + (5 + i))]);
if (prob > maxProb)
{
maxProb = prob;
maxIndex = i;
}
}
maxProb = objectness * maxProb;
addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo);
}
}
}
return binfo;
}
static inline std::vector<const NvDsInferLayerInfo*>
SortLayers(const std::vector<NvDsInferLayerInfo> & outputLayersInfo)
{
std::vector<const NvDsInferLayerInfo*> outLayers;
for (auto const &layer : outputLayersInfo) {
outLayers.push_back (&layer);
}
std::sort(outLayers.begin(), outLayers.end(),
[](const NvDsInferLayerInfo* a, const NvDsInferLayerInfo* b) {
return a->inferDims.d[1] < b->inferDims.d[1];
});
return outLayers;
}
static bool NvDsInferParseYoloV3(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList,
const std::vector<float> &anchors,
const std::vector<std::vector<int>> &masks)
{
const uint kNUM_BBOXES = 3;
const std::vector<const NvDsInferLayerInfo*> sortedLayers =
SortLayers (outputLayersInfo);
if (sortedLayers.size() != masks.size()) {
std::cerr << "ERROR: yoloV3 output layer.size: " << sortedLayers.size()
<< " does not match mask.size: " << masks.size() << std::endl;
return false;
}
if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)
{
std::cerr << "WARNING: Num classes mismatch. Configured:"
<< detectionParams.numClassesConfigured
<< ", detected by network: " << NUM_CLASSES_YOLO << std::endl;
}
std::vector<NvDsInferParseObjectInfo> objects;
for (uint idx = 0; idx < masks.size(); ++idx) {
const NvDsInferLayerInfo &layer = *sortedLayers[idx]; // 255 x Grid x Grid
assert(layer.inferDims.numDims == 3);
const uint gridSizeH = layer.inferDims.d[1];
const uint gridSizeW = layer.inferDims.d[2];
const uint stride = DIVUP(networkInfo.width, gridSizeW);
assert(stride == DIVUP(networkInfo.height, gridSizeH));
std::vector<NvDsInferParseObjectInfo> outObjs =
decodeYoloV3Tensor((const float*)(layer.buffer), masks[idx], anchors, gridSizeW, gridSizeH, stride, kNUM_BBOXES,
NUM_CLASSES_YOLO, networkInfo.width, networkInfo.height);
objects.insert(objects.end(), outObjs.begin(), outObjs.end());
}
objectList = objects;
return true;
}
static bool NvDsInferParseYoloV2(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
// copy anchor data from yolov2.cfg file
std::vector<float> anchors = {0.57273, 0.677385, 1.87446, 2.06253, 3.33843,
5.47434, 7.88282, 3.52778, 9.77052, 9.16828};
const uint kNUM_BBOXES = 5;
if (outputLayersInfo.empty()) {
std::cerr << "Could not find output layer in bbox parsing" << std::endl;;
return false;
}
const NvDsInferLayerInfo &layer = outputLayersInfo[0];
if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)
{
std::cerr << "WARNING: Num classes mismatch. Configured:"
<< detectionParams.numClassesConfigured
<< ", detected by network: " << NUM_CLASSES_YOLO << std::endl;
}
assert(layer.inferDims.numDims == 3);
const uint gridSizeH = layer.inferDims.d[1];
const uint gridSizeW = layer.inferDims.d[2];
const uint stride = DIVUP(networkInfo.width, gridSizeW);
assert(stride == DIVUP(networkInfo.height, gridSizeH));
for (auto& anchor : anchors) {
anchor *= stride;
}
std::vector<NvDsInferParseObjectInfo> objects =
decodeYoloV2Tensor((const float*)(layer.buffer), anchors, gridSizeW, gridSizeH, stride, kNUM_BBOXES,
NUM_CLASSES_YOLO, networkInfo.width, networkInfo.height);
objectList = objects;
return true;
}
/* C-linkage to prevent name-mangling */
extern "C" bool NvDsInferParseCustomYoloV5(
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferParseObjectInfo> &objectList)
{
return NvDsInferParseYoloV5(
outputLayersInfo, networkInfo, detectionParams, objectList);
}
extern "C" bool NvDsInferParseCustomYoloV4(
std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferParseObjectInfo> &objectList)
{
return NvDsInferParseYoloV4 (
outputLayersInfo, networkInfo, detectionParams, objectList);
}
extern "C" bool NvDsInferParseCustomYoloV3(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
static const std::vector<float> kANCHORS = {
10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0,
45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0};
static const std::vector<std::vector<int>> kMASKS = {
{6, 7, 8},
{3, 4, 5},
{0, 1, 2}};
return NvDsInferParseYoloV3 (
outputLayersInfo, networkInfo, detectionParams, objectList,
kANCHORS, kMASKS);
}
extern "C" bool NvDsInferParseCustomYoloV3Tiny(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
static const std::vector<float> kANCHORS = {
10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319};
static const std::vector<std::vector<int>> kMASKS = {
{3, 4, 5},
//{0, 1, 2}}; // as per output result, select {1,2,3}
{1, 2, 3}};
return NvDsInferParseYoloV3 (
outputLayersInfo, networkInfo, detectionParams, objectList,
kANCHORS, kMASKS);
}
extern "C" bool NvDsInferParseCustomYoloV2(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
return NvDsInferParseYoloV2 (
outputLayersInfo, networkInfo, detectionParams, objectList);
}
extern "C" bool NvDsInferParseCustomYoloV2Tiny(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
return NvDsInferParseYoloV2 (
outputLayersInfo, networkInfo, detectionParams, objectList);
}
extern "C" bool NvDsInferParseCustomYoloTLT(
std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams,
std::vector<NvDsInferParseObjectInfo>& objectList)
{
if(outputLayersInfo.size() != 4)
{
std::cerr << "Mismatch in the number of output buffers."
<< "Expected 4 output buffers, detected in the network :"
<< outputLayersInfo.size() << std::endl;
return false;
}
const int topK = 200;
const int* keepCount = static_cast <const int*>(outputLayersInfo.at(0).buffer);
const float* boxes = static_cast <const float*>(outputLayersInfo.at(1).buffer);
const float* scores = static_cast <const float*>(outputLayersInfo.at(2).buffer);
const float* cls = static_cast <const float*>(outputLayersInfo.at(3).buffer);
for (int i = 0; (i < keepCount[0]) && (objectList.size() <= topK); ++i)
{
const float* loc = &boxes[0] + (i * 4);
const float* conf = &scores[0] + i;
const float* cls_id = &cls[0] + i;
if(conf[0] > 1.001)
continue;
if((loc[0] < 0) || (loc[1] < 0) || (loc[2] < 0) || (loc[3] < 0))
continue;
if((loc[0] > networkInfo.width) || (loc[2] > networkInfo.width) || (loc[1] > networkInfo.height) || (loc[3] > networkInfo.width))
continue;
if((loc[2] < loc[0]) || (loc[3] < loc[1]))
continue;
if(((loc[3] - loc[1]) > networkInfo.height) || ((loc[2]-loc[0]) > networkInfo.width))
continue;
NvDsInferParseObjectInfo curObj{static_cast<unsigned int>(cls_id[0]),
loc[0],loc[1],(loc[2]-loc[0]),
(loc[3]-loc[1]), conf[0]};
objectList.push_back(curObj);
}
return true;
}
/* Check that the custom function has been defined correctly */
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV5);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV4);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3Tiny);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2Tiny);
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloTLT);

View File

@ -0,0 +1,452 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>DeepStream object tracker API </b>
*
* @b Description: This file defines the DeepStream object tracker API.
*/
/**
* @defgroup ee_NvMOTracker Object Tracker API
*
* Defines the DeepStream object tracker API.
*
* @ingroup NvDsTrackerApi
* @{
*/
#ifndef _NVMOTRACKER_H_
#define _NVMOTRACKER_H_
#include <stdint.h>
#include <time.h>
#include "nvbufsurface.h"
#include "nvds_tracker_meta.h"
#ifdef __cplusplus
extern "C"
{
#endif
#define NVMOT_MAX_TRANSFORMS 4
typedef uint64_t NvMOTStreamId;
/**
* @brief Compute target flags.
*
* You can select multiple targets; the tracker will optimize across them.
* Combinations are allowed, e.g. NVTCOMP_GPU or NVTCOMP_PVA
*/
#define NVMOTCOMP_GPU 0x01 /**< Defines the "GPU" compute target flag. */
#define NVMOTCOMP_CPU 0x02 /**< Defines the "CPU" compute target flag. */
#define NVMOTCOMP_PVA 0x04 /**< Defines the "PVA" compute target flag. */
#define NVMOTCOMP_ANY 0xff /**< Defines a compute target flag for
"any target." */
#define NVMOTCOMP_DEFAULT NVMOTCOMP_ANY
/**< Defines the compute target flag for the
default target. */
/** @} */
typedef uint8_t NvMOTCompute;
/**
* @brief Holds a configuration for batches for an input transform
* (a scaling/color conversion).
*/
typedef struct _NvMOTPerTransformBatchConfig
{
/** Holds the type of buffer. */
NvBufSurfaceMemType bufferType;
/** Holds the maximum width of each frame. */
uint32_t maxWidth;
/** Holds the maximum height of each frame. */
uint32_t maxHeight;
/** Holds the maximum pitch of each buffer. */
uint32_t maxPitch;
/** Holds the maximum size of the buffer in bytes. */
uint32_t maxSize;
/** Holds the color format: RGB, NV12 etc. */
uint32_t colorFormat;
} NvMOTPerTransformBatchConfig;
/**
* @brief Holds miscellaneous configurations.
*/
typedef struct _NvMOTMiscConfig
{
/** Holds the ID of the GPU to be used. */
uint32_t gpuId;
/** Holds the maximum number of objects to track per stream. 0 means
track an unlimited number of objects. */
uint32_t maxObjPerStream;
/** Holds the maximum number of objects to track per batch. 0 means
track an unlimited number of objects. */
uint32_t maxObjPerBatch;
/** Holds a pointer to a callback for logging messages. */
typedef void (*logMsg) (int logLevel, const char * format, ...);
} NvMOTMiscConfig;
/**
* @brief Holds a tracker configuration.
*
* Holds configuration options for the tracker, applied to the whole context.
*
* @note This structure must be deep-copied to be passed to a component that
* is to use it persistently.
*/
typedef struct _NvMOTConfig
{
/** Holds the compute target. @see NvMOTCompute. */
NvMOTCompute computeConfig;
/** Holds the maximum number of streams in a batch. */
uint32_t maxStreams;
/** Holds the number of \ref NvMOTPerTransformBatchConfig entries in
@a perTransformBatchConfig. */
uint8_t numTransforms;
/** Holds a pointer to a list of @a numTransform batch configurations,
one per transform, including type and resolution. */
NvMOTPerTransformBatchConfig *perTransformBatchConfig;
/** Holds miscellaneous configurations. */
NvMOTMiscConfig miscConfig;
/** Holds the length of @a customConfigFilePath. */
uint16_t customConfigFilePathSize;
/** A pointer to the pathname of the tracker's custom configuration file.
A null-terminated string. */
char* customConfigFilePath;
} NvMOTConfig;
/**
* @brief Defines configuration request return codes.
*/
typedef enum
{
NvMOTConfigStatus_OK,
NvMOTConfigStatus_Error,
NvMOTConfigStatus_Invalid,
NvMOTConfigStatus_Unsupported
} NvMOTConfigStatus;
/**
* @brief Holds a tracker's configuration status.
*
* Holds the status of a configuration request; includes both summary and
* per-configuration status.
*/
typedef struct _NvMOTConfigResponse
{
/** Holds the summary status of the entire configuration request. */
NvMOTConfigStatus summaryStatus;
/** Holds the compute target request status. */
NvMOTConfigStatus computeStatus;
/** Holds the transform batch configuration request status:
summary status for all transforms. */
NvMOTConfigStatus transformBatchStatus;
/** Holds the status of the miscellaneous configurations. */
NvMOTConfigStatus miscConfigStatus;
/** Holds the status of the custom configurations. */
NvMOTConfigStatus customConfigStatus;
} NvMOTConfigResponse;
/**
* @brief Defines generic status codes for tracking operations.
*/
typedef enum
{
NvMOTStatus_OK,
NvMOTStatus_Error,
NvMOTStatus_Invalid_Path
} NvMOTStatus;
/**
* @brief Holds the definition of a rectangle.
*/
typedef struct _NvMOTRect
{
/** Holds the left edge position of the object bounding box, in pixels. */
float x;
/** Holds the top edge position of the object bounding box, in pixels. */
float y;
/** Holds the width of the bounding box, in pixels. */
float width;
/** Holds the height of the bounding box, in pixels. */
float height;
} NvMOTRect;
/**
* @brief Holds information about an object to be tracked.
*
* NvMOT creates an instance of this structure for each tracked object.
*/
typedef struct _NvMOTObjToTrack
{
/** Holds the class of the object. */
uint16_t classId;
/** Holds the bounding box of the object. */
NvMOTRect bbox;
/** Holds the detection confidence of the object. */
float confidence;
/** Holds a Boolean which is true if NvMOT is to track this object. */
bool doTracking;
/** Holds a pointer to client data associated with the object. */
void *pPreservedData;
} NvMOTObjToTrack;
/**
* @brief Holds a list of objects.
*/
typedef struct _NvMOTObjToTrackList
{
/** Holds a Boolean which is true if detection was done on this frame
even if the list of objects to track is empty. */
bool detectionDone;
/** Holds a pointer to a list or array of object information blocks. */
NvMOTObjToTrack* list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of populated blocks in the list. */
uint32_t numFilled;
} NvMOTObjToTrackList;
/**
* @brief Holds a frame containing the image and objects to be tracked.
*
* @note @a numBuffers is supposed to be less than or equal to
* @a numTransforms in @ref NvMOTConfig.
* @note The metadata in the NvBufSurfaceParams structures
* which @a bufferList points to must be checked with the parameters
* specified in @a perTransformBatchConfig in NvMOTConfig.
*/
typedef struct _NvMOTFrame
{
/** Holds the stream ID of the stream source for this frame. */
NvMOTStreamId streamID;
/** Holds the sequential frame number that identifies the frame
within the stream. */
uint32_t frameNum;
/** Holds the timestamp of the frame at the time of capture. */
time_t timeStamp;
/** Holds a Boolean which is true if the timestamp value is properly
populated. */
bool timeStampValid;
/** Holds a Boolean which is true if objects in this frame are to be
tracked. */
bool doTracking;
/** Holds a Boolean which is true to reset tracking for the stream. */
bool reset;
/** Holds the number of entries in @a bufferList. */
uint8_t numBuffers;
/** Holds a pointer to an array of pointers to buffer parameter
structures. */
NvBufSurfaceParams** bufferList;
/** Holds a list of objects in this frame which are to be tracked.
Boundary boxes are scaled for the first buffer configuration. */
NvMOTObjToTrackList objectsIn;
} NvMOTFrame;
/**
* @brief Holds information about each tracked object.
*/
typedef struct _NvMOTTrackedObj
{
/** Holds the class ID of the object to be tracked. */
uint16_t classId;
/** Holds a unique ID for the object, assigned by the tracker. */
uint64_t trackingId;
/** Holds the bounding box. */
NvMOTRect bbox;
/** Holds the tracking confidence of the object. */
float confidence;
/** Holds the track length in frames. */
uint32_t age;
/** Holds a pointer to the associated input object, if there is one. */
NvMOTObjToTrack *associatedObjectIn;
uint8_t reserved[128];
} NvMOTTrackedObj;
/**
* @brief Holds a list of tracked objects.
*/
typedef struct _NvMOTTrackedObjList
{
/** Holds the stream ID of the stream associated with objects in the list.*/
NvMOTStreamId streamID;
/** Holds the frame number for objects in the list. */
uint32_t frameNum;
/** Holds a Boolean which is true if this entry in the batch is valid. */
bool valid;
/** Holds a pointer to a list or array of object information blocks. */
NvMOTTrackedObj* list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of populated blocks in the list. */
uint32_t numFilled;
} NvMOTTrackedObjList;
/**
* @brief Holds a batch of lists of tracked objects.
*/
typedef struct _NvMOTTrackedObjBatch
{
/** Holds a pointer to an array of object lists. */
NvMOTTrackedObjList *list;
/** Holds the number of blocks allocated for the list. */
uint32_t numAllocated;
/** Holds the number of filled blocks in the list. */
uint32_t numFilled;
} NvMOTTrackedObjBatch;
/**
* @brief Holds parameters for processing a batch.
*
* @see NvMOTProcessFrame.
*/
typedef struct _NvMOTProcessParams
{
uint32_t numFrames; /**< Holds the number of frames in the batch. */
NvMOTFrame *frameList; /**< Holds a pointer to an array of frame data. */
} NvMOTProcessParams;
typedef struct _NvMOTQuery
{
/** Holds flags for supported compute targets. @see NvMOTCompute. */
NvMOTCompute computeConfig;
/** Holds the number of \ref NvMOTPerTransformBatchConfig entries
in perTransformBatchConfig. */
uint8_t numTransforms;
/** Holds the color formats for input buffers; a required value. */
NvBufSurfaceColorFormat colorFormats[NVMOT_MAX_TRANSFORMS];
/** Holds the preferred memory type for input buffers. */
NvBufSurfaceMemType memType;
/** Holds a Boolean which is true if batch processing is supported. */
bool supportBatchProcessing;
/** Holds a Boolean which is true if outputing past frame is supported. */
bool supportPastFrame;
} NvMOTQuery;
/**
* @brief Holds an opaque context handle.
*/
struct NvMOTContext;
typedef struct NvMOTContext* NvMOTContextHandle;
/**
* @brief Initializes a tracking context for a batch of one or more image
* streams.
*
* If successful, the context is configured as specified by @a pConfigIn.
*
* @param [in] pConfigIn A pointer to to a structure specifying
* the configuration.
* @param [out] pContextHandle A pointer to a handle for the stream context.
* The stream context is created and owned
* by the tracker. The returned context handle
* must be included in
* all subsequent calls for the specified stream.
* @param [out] pConfigResponse A pointer to a structure that describes the
* operation's status.
* @return The outcome of the initialization attempt.
*/
NvMOTStatus NvMOT_Init(NvMOTConfig *pConfigIn,
NvMOTContextHandle *pContextHandle,
NvMOTConfigResponse *pConfigResponse);
/**
* @brief Deinitializes a stream context
*
* The specified context is retired and may not be used again.
*
* @param contextHandle The handle for the stream context to be retired.
*/
void NvMOT_DeInit(NvMOTContextHandle contextHandle);
/**
* @brief Processes a batch.
*
* Given a context and a batch of frames, processes the batch as the current
* frames in their respective streams. Once processed, each frame becomes part
* of the history and the previous frame in its stream.
*
* @param [in] contextHandle A context handle obtained from NvMOTInit().
* @param [in] pParams A pointer to parameters for the batch
* to be processed.
* @param [out] pTrackedObjectsBatch
* A pointer to a batch of lists of tracked object
* slots to be filled by the tracker. The batch is
* allocated by the client. Bounding boxes are
* scaled to the resolution of the first input
* image transform buffer.
* @return The status of batch processing.
*/
NvMOTStatus NvMOT_Process(NvMOTContextHandle contextHandle,
NvMOTProcessParams *pParams,
NvMOTTrackedObjBatch *pTrackedObjectsBatch);
/**
* @brief Process the past-frame data in the low-level tracker lib and retrieve
*
* Given a context and batch of frame(s), process the past-frame data of each tracked object stored in the low-level tracker lib
* , put it into the past-frame data strcture, and retrieve it
*
* @param [in] pContext The context handle obtained from NvMOTInit()
* @param [in] pParams Pointer to parameters for the batch of frames with the available stream ID
* @param [out] pPastFrameObjBatch Batch of lists of tracked objects that are stored by the low-level tracker in the past frames
BBoxes are scaled to the resolution of the first input image transform buffer.
* @return Status of batch processing
*/
NvMOTStatus NvMOT_ProcessPast(NvMOTContextHandle contextHandle,
NvMOTProcessParams *pParams,
NvDsPastFrameObjBatch *pPastFrameObjBatch);
/**
* @brief Queries the tracker library's capabilities and requirements.
*
* Answer query for this tracker lib's capabilities and requirements.
* The tracker's custom config file is provided for optional consultation.
*
* @param [in] customConfigFilePathSize Length of the custom configuration
* file's pathname.
* @param [in] pCustomConfigFilePath A pointer to the custom configuration
* file's pathname.
* @param [out] pQuery A pointer to a query structure to be
* filled by the tracker.
* @return Status of the query.
*/
NvMOTStatus NvMOT_Query(uint16_t customConfigFilePathSize, char* pCustomConfigFilePath, NvMOTQuery *pQuery);
/**
* @brief Removes streams from a batch.
*
* An optional function used in batch processing mode only. It notifies
* the tracker library that a stream has been removed,
* and will not be present in future batches. Any per-stream resources
* associated with the removed stream may be freed at this time.
*
* This function may be called only when all processing is quiesced.
*
* @param [in] contextHandle The context handle obtained from NvMOTInit().
* @param [in] streamIdMask A mask for finding streams to remove.
* The function reports removal of all streams
* where (streamId & streamIdMask) == streamIdMask.
*/
void NvMOT_RemoveStreams(NvMOTContextHandle contextHandle, NvMOTStreamId streamIdMask);
/** @} */ // end of API group
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,297 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: On-Screen Display API</b>
*
* This file defines the NvOSD library, used to draw rectangles and text
* over the frame.
*/
/**
* @defgroup NvDsOsdApi On-Screen Display API
*
* Defines the functions of On-screen Display APIs
*
*/
/**
* @defgroup ee_nvosd_api_group On-Screen Display API
* Defines the NvOSD library to be used to draw rectangles and text
* over the frame.
* @ingroup NvDsOsdApi
* @{
*/
#ifndef __NVLL_OSD_API_DEFS__
#define __NVLL_OSD_API_DEFS__
#include "nvll_osd_struct.h"
#include "nvbufsurface.h"
#define NVOSD_PRINT_E(f_, ...) \
printf("libnvosd (%d):(ERROR) : " f_, __LINE__, ##__VA_ARGS__)
#define MAX_IN_BUF 16
#define MAX_BORDER_WIDTH 32
#ifdef __cplusplus
extern "C"
{
#endif
typedef void * NvOSDCtxHandle;
/**
* Holds information about the text in a frame.
*/
typedef struct _NvOSD_FrameTextParams
{
/** Holds a pointer to a buffer containing a frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of strings. */
int num_strings;
/** Holds the strings' text parameters. */
NvOSD_TextParams *text_params_list;
} NvOSD_FrameTextParams;
/** Holds information about the rectangles in a frame. */
typedef struct _NvOSD_FrameRectParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of rectangles. */
int num_rects;
/** Holds the rectangles' parameters. */
NvOSD_RectParams *rect_params_list;
} NvOSD_FrameRectParams;
/** Holds information about the rectangles in a frame. */
typedef struct _NvOSD_FrameSegmentMaskParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of rectangles. */
int num_segments;
/** Holds the rectangles' parameters. */
NvOSD_RectParams *rect_params_list;
/** Holds mask parameters */
NvOSD_MaskParams *mask_params_list;
} NvOSD_FrameSegmentMaskParams;
/** Holds information about the lines in a frame. */
typedef struct _NvOSD_FrameLineParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of lines. */
int num_lines;
/** Holds the lines' parameters. */
NvOSD_LineParams *line_params_list;
} NvOSD_FrameLineParams;
/** Holds information about the arrows in a frame. */
typedef struct _NvOSD_FrameArrowParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of arrows. */
int num_arrows;
/** Holds the parameters of the arrows. */
NvOSD_ArrowParams *arrow_params_list;
} NvOSD_FrameArrowParams;
/** Holds information about the circles in a frame. */
typedef struct _NvOSD_FrameCircleParams
{
/** Holds a pointer to the buffer containing the frame. */
NvBufSurfaceParams *buf_ptr;
/** Holds the OSD mode to be used for processing. */
NvOSD_Mode mode;
/** Holds the number of circles. */
int num_circles;
/** Holds the parameters of the circles. */
NvOSD_CircleParams *circle_params_list;
} NvOSD_FrameCircleParams;
/**
* Creates an NvOSD context.
*
* @returns A handle for an NvOSD context if successful, or NULL otherwise.
*/
NvOSDCtxHandle nvll_osd_create_context(void);
/**
* Destroys an NvOSD context.
*
* @param[in] nvosd_ctx A handle for the NvOSD context to be destroyed.
*/
void nvll_osd_destroy_context(NvOSDCtxHandle nvosd_ctx);
/**
* \brief Sets clock parameters for a given context.
*
* The clock is overlayed when nvll_osd_put_text() is called.
* If no other text is to be overlayed, %nvll_osd_put_text() must be called with
* @a num_strings as 0 and @a text_params_list as NULL.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] clk_params A pointer to a structure for the clock
* to be overlayed; NULL to disable the clock.
*/
void nvll_osd_set_clock_params(NvOSDCtxHandle nvosd_ctx, NvOSD_TextParams *clk_params);
/**
* /brief Overlays clock and given text at a given location in a buffer.
*
* To overlay the clock, you must call nvll_osd_set_clock_params().
* You must also ensure that the length of @a text_params_list is at least
* @ref NvOSD_FrameTextParams::num_strings.
*
* @note Currently only the NvOSD_Mode value @ref MODE_CPU is supported.
* Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_text_params A pointer to a structure containing
* information about rectangles to be overlayed.
*
* @returns 0 for success, or -1 for failure.
*/
int nvll_osd_put_text(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameTextParams *frame_text_params);
/**
* \brief Overlays segment masks at a given location in a buffer.
*
* You must ensure that the length of @a mask_params_list is at least
* @a num_segments.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_mask_params A pointer to the FrameSegmentMaskParams struct
* containing mask information to be overlayed.
*
* @returns 0 for success, -1 for failure.
*/
int nvll_osd_draw_segment_masks(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameSegmentMaskParams *frame_mask_params);
/**
* \brief Overlays boxes at a given location in a buffer.
*
* Boxes can be configured with:
* a. Border only:
* Uou must set @a border_width and set
* @a has_bg_color to 0 for the given box.
* b. Border and background color
* To draw boxes with border and background color, you must set @a
* border_width and set @a has_bg_color to 1, and specify background color
* parameters for the given box.
* c. Solid fill acting as mask region
* To draw boxes with solid fill acting as mask region, you must set @a
* border_width to 0 and @a has_bg_color to 1 for the given box.
*
*
* You must ensure that the length of @a rect_params_list is at least
* @a num_rects.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_rect_params A pointer to the FrameRectParams struct
* containing rectangles information to be overlayed.
*
* @returns 0 for success, -1 for failure.
*/
int nvll_osd_draw_rectangles(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameRectParams *frame_rect_params);
/**
* Overlays lines on the buffer.
*
* The length of @a line_params_list must be equal to @a num_lines.
* The client is responsible for allocating this array.
*
* @note Currently only MODE_CPU is supported. Specifying other modes will have
* no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_line_params A pointer to the FrameLineParams struct
* containing line information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_lines(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameLineParams *frame_line_params);
/**
* Overlays arrows on the buffer.
*
* The length of @a arrow_params_list must equal @a num_arrows.
* The client is responsible for allocating this array.
*
* @note Currently only @ref MODE_CPU is supported. Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_arrow_params A pointer to the FrameArrowParams struct
* containing arrow information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_arrows(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameArrowParams *frame_arrow_params);
/**
* Overlays circles on the buffer.
*
* You must ensure that the length of @a circle_params_list equals @a num_circles.
*
* @note Currently only @ref MODE_CPU is supported. Specifying other modes has no effect.
*
* @param[in] nvosd_ctx A handle for an NvOSD context.
* @param[in] frame_circle_params A pointer to the FrameCircleParams struct
* containing circle information to be overlayed.
*
* @returns 0 if successful, or -1 otherwise.
*/
int nvll_osd_draw_circles(NvOSDCtxHandle nvosd_ctx, NvOSD_FrameCircleParams *frame_circle_params);
/**
* Sets the resolution of the frames on which the NvOSDContext is to operate.
*
* @param[in] nvosd_ctx A handle for the NvOSD context.
* @param[in] width Width of the input frames.
* @param[in] height Height of the input frames.
*
* @returns 0 if successful, or -1 otherwise.
*/
void *nvll_osd_set_params (NvOSDCtxHandle nvosd_ctx, int width, int height);
/**
* Initializes colors for HW based blending.
*
* Applicable ONLY for Jetson.
*
* @param[in] nvosd_ctx A pointer to NvOSD context.
* @param[in] color_info A pointer to the Color_info struct
* containing color information.
* @param[in] num_classes Number of classes.
*
* @returns A pointer to the internally allocated Host Memory.
*/
int nvll_osd_init_colors_for_hw_blend(void *nvosd_ctx, NvOSD_Color_info * color_info, int num_classes);
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,251 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/**
* @file
* <b>NVIDIA Multimedia Utilities: On-Screen Display Manager</b>
*
* This file defines the NvOSD library used to draw rectangles and text
* over the frame.
*/
/**
* @defgroup ee_nvosd_group On-Screen Display Manager
* Defines the NvOSD library to be used to draw rectangles and text
* over the frame.
* @ingroup NvDsOsdApi
* @{
*/
#ifndef __NVLL_OSD_STRUCT_DEFS__
#define __NVLL_OSD_STRUCT_DEFS__
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
{
#endif
/**
* Defines modes used to overlay boxes and text.
*/
typedef enum {
MODE_CPU, /**< Specifies using the CPU for OSD processing.
Works with RGBA data only */
MODE_GPU, /**< Specifies using the GPU for OSD processing.
Currently not implemented. */
MODE_HW /**< Specifies the NVIDIA hardware engine
for rectangle drawing and masking.
This mode works with both YUV and RGB data.
It does not consider alpha parameter.
Not applicable to drawing text. */
} NvOSD_Mode;
/**
* Specifies arrow head positions.
*/
typedef enum
{
/** Specifies an arrow head only at start = 0. */
START_HEAD,
/** Specifies an arrow head only at end = 1. */
END_HEAD,
/** Specifies arrow heads at both start and end = 2. */
BOTH_HEAD
} NvOSD_Arrow_Head_Direction;
/**
* Holds unclipped bounding box coordinates of the object.
*/
typedef struct _NvBbox_Coords {
float left; /**< Holds the box's left coordinate
in pixels. */
float top; /**< Holds the box's top coordinate
in pixels. */
float width; /**< Holds the box's width in pixels. */
float height; /**< Holds the box's height in pixels. */
} NvBbox_Coords;
/**
* Holds the color parameters of the box or text to be overlayed.
*/
typedef struct _NvOSD_ColorParams {
double red; /**< Holds the red component of color.
Value must be in the range 0.0-1.0. */
double green; /**< Holds the green component of color.
Value must be in the range 0.0-1.0.*/
double blue; /**< Holds the blue component of color.
Value must be in the range 0.0-1.0.*/
double alpha; /**< Holds the alpha component of color.
Value must be in the range 0.0-1.0.*/
} NvOSD_ColorParams;
/**
* Holds the font parameters of the text to be overlayed.
*/
typedef struct _NvOSD_FontParams {
char * font_name; /**< Holds a pointer to the string containing
the font name. To display a list of
supported fonts, run the fc-list command. */
// char font_name[64]; /**< Holds a pointer to a string containing
// the font name. */
unsigned int font_size; /**< Holds the size of the font. */
NvOSD_ColorParams font_color; /**< Holds the font color. */
} NvOSD_FontParams;
/**
* Holds parameters of text to be overlayed.
*/
typedef struct _NvOSD_TextParams {
char * display_text; /**< Holds the text to be overlayed. */
unsigned int x_offset; /**< Holds the text's horizontal offset from
the top left pixel of the frame. */
unsigned int y_offset; /**< Holds the text's vertical offset from the
top left pixel of the frame. */
NvOSD_FontParams font_params; /**< Holds the font parameters of the text
to be overlaid. */
int set_bg_clr; /**< Holds a Boolean; true if the text has a
background color. */
NvOSD_ColorParams text_bg_clr;/**< Holds the text's background color, if
specified. */
} NvOSD_TextParams;
typedef struct _NvOSD_Color_info {
int id;
NvOSD_ColorParams color;
}NvOSD_Color_info;
/**
* Holds the box parameters of the box to be overlayed.
*/
typedef struct _NvOSD_RectParams {
float left; /**< Holds the box's left coordinate
in pixels. */
float top; /**< Holds the box's top coordinate
in pixels. */
float width; /**< Holds the box's width in pixels. */
float height; /**< Holds the box's height in pixels. */
unsigned int border_width; /**< Holds the box's border width in pixels. */
NvOSD_ColorParams border_color;
/**< Holds the box's border color. */
unsigned int has_bg_color; /**< Holds a Boolean; true if the box has a
background color. */
unsigned int reserved; /**< Holds a field reserved for future use. */
NvOSD_ColorParams bg_color; /**< Holds the box's background color. */
int has_color_info;
int color_id;
} NvOSD_RectParams;
/**
* Holds the mask parameters of the segment to be overlayed
*/
typedef struct _NvOSD_MaskParams {
float *data; /** Mask data */
unsigned int size; /** Mask size */
float threshold; /** Threshold for binarization */
unsigned int width; /** Mask width */
unsigned int height; /** Mask height */
} NvOSD_MaskParams;
/**
* Holds the box parameters of a line to be overlayed.
*/
typedef struct _NvOSD_LineParams {
unsigned int x1; /**< Holds the box's left coordinate
in pixels. */
unsigned int y1; /**< Holds the box's top coordinate
in pixels. */
unsigned int x2; /**< Holds the box's width in pixels. */
unsigned int y2; /**< Holds the box's height in pixels. */
unsigned int line_width; /**< Holds the box's border width in pixels. */
NvOSD_ColorParams line_color; /**< Holds the box's border color. */
} NvOSD_LineParams;
/**
* Holds arrow parameters to be overlaid.
*/
typedef struct _NvOSD_ArrowParams {
unsigned int x1; /**< Holds the start horizontal coordinate in pixels. */
unsigned int y1; /**< Holds the start vertical coordinate in pixels. */
unsigned int x2; /**< Holds the end horizontal coordinate in pixels. */
unsigned int y2; /**< Holds the end vertical coordinate in pixels. */
unsigned int arrow_width; /**< Holds the arrow shaft width in pixels. */
NvOSD_Arrow_Head_Direction arrow_head;
/**< Holds the arrowhead position. */
NvOSD_ColorParams arrow_color;
/**< Holds color parameters of the arrow box. */
unsigned int reserved; /**< Reserved for future use; currently
for internal use only. */
} NvOSD_ArrowParams;
/**
* Holds circle parameters to be overlayed.
*/
typedef struct _NvOSD_CircleParams {
unsigned int xc; /**< Holds the start horizontal coordinate in pixels. */
unsigned int yc; /**< Holds the start vertical coordinate in pixels. */
unsigned int radius; /**< Holds the radius of circle in pixels. */
NvOSD_ColorParams circle_color;
/**< Holds the color parameters of the arrow box. */
unsigned int has_bg_color; /*< Holds a Boolean value indicating whether
the circle has a background color. */
NvOSD_ColorParams bg_color; /*< Holds the circle's background color. */
unsigned int reserved; /**< Reserved for future use; currently
for internal use only. */
} NvOSD_CircleParams;
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/*
* This file defines the NvMsgBroker interface.
* The interfaces is used by applications to send and receive
* messages from remote entities and services to deliver events, allow
* configuration of settings etc.
*/
#ifndef __NV_MSGBROKER_H__
#define __NV_MSGBROKER_H__
#ifdef __cplusplus
extern "C"
{
#endif
#define NV_MSGBROKER_VERSION "1.0"
#define LOG_CAT "DSLOG:NV_MSGBROKER"
/*
* Defines status for operations in the NvMsgBroker interface
*/
typedef enum {
NV_MSGBROKER_API_OK,
NV_MSGBROKER_API_ERR,
NV_MSGBROKER_API_NOT_SUPPORTED
} NvMsgBrokerErrorType;
/*
* Defines structure of a client message packet
*/
typedef struct {
char *topic;
void *payload;
size_t payload_len;
} NvMsgBrokerClientMsg;
/*
* Handle to be used in NvMsgBroker API interface
*/
typedef void *NvMsgBrokerClientHandle;
/** Connect callback method registered during connect
*
* @param[in] h_ptr Msgbroker Connection handle
* @param[in] status Connection status
*/
typedef void (*nv_msgbroker_connect_cb_t)(NvMsgBrokerClientHandle h_ptr, NvMsgBrokerErrorType status );
/** Send callback method registered during send_async
* @param[in] user_ptr Pointer passed during send_async for context
* @param[in] status Completion status of send operation
*/
typedef void (*nv_msgbroker_send_cb_t)(void *user_ptr, NvMsgBrokerErrorType flag);
/** subscribe callback method registered during subscribe
* @param[in] user_ptr Pointer passed during send_async for context
* @param[in] status Completion status of send operation
*/
typedef void (*nv_msgbroker_subscribe_cb_t)(NvMsgBrokerErrorType flag, void *msg, int msglen, char *topic, void *user_ptr);
/** Connect to a remote entity by calling into msgbroker library
*
* @param[in] broker_conn_str A connection string (optional) with format
* @param[in] broker_proto_lib Full Path to Message protocol adapter library
* @param[in] connect_cb A pointer to a callback function for events associated with the connection.
* @param[in] cfg Pathname of a configuration file passed to be passed to the message protocol adapter
*
* @return A connection handle.
*/
NvMsgBrokerClientHandle nv_msgbroker_connect(char *broker_conn_str, char *broker_proto_lib, nv_msgbroker_connect_cb_t connect_cb, char *cfg);
/* Send a message asynchronously based on non-blocking semantics
* @param[in] h_ptr connection handle to Message Broker library
* @param[in] message Message packet which has details of message, topic, priority ..etc
* @param[in] cb callback to be invoked to notify status of send
* @param[in] user_ctx pointer to pass to callback for context
*
* @return Completion status of send operation */
NvMsgBrokerErrorType nv_msgbroker_send_async (NvMsgBrokerClientHandle h_ptr, NvMsgBrokerClientMsg message, nv_msgbroker_send_cb_t cb, void *user_ctx);
/** Subscribe to a remote entity for receiving messages on particular topic(s)
* @param[in] h_ptr Connection handle
* @param[in] topics pointer to array of topics (cannot be NULL)
* @param[in] num_topics number of topics
* @param[in] cb A pointer to a callback function to forward consumed message
* @param[in] user_ctx user ptr to be passed to callback for context
* @return Completion status of send operation
**/
NvMsgBrokerErrorType nv_msgbroker_subscribe(NvMsgBrokerClientHandle h_ptr, char ** topics, int num_topics, nv_msgbroker_subscribe_cb_t cb, void *user_ctx);
/* Disconnect call to notify msgbroker library for connection termination
* @param[in] h_ptr Connection handle
*
* @return status of disconnect
*/
NvMsgBrokerErrorType nv_msgbroker_disconnect(NvMsgBrokerClientHandle h_ptr);
/* Version of Nvmsgbroker interface
*
* @return [out] version of Nvmsgbroker interface supported by msgbroker library in MAJOR.MINOR format
*/
char *nv_msgbroker_version(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,465 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "trt_utils.h"
#include <experimental/filesystem>
#include <fstream>
#include <iomanip>
#include <functional>
#include <algorithm>
#include <math.h>
#include "NvInferPlugin.h"
static void leftTrim(std::string& s)
{
s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) { return !isspace(ch); }));
}
static void rightTrim(std::string& s)
{
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !isspace(ch); }).base(), s.end());
}
std::string trim(std::string s)
{
leftTrim(s);
rightTrim(s);
return s;
}
float clamp(const float val, const float minVal, const float maxVal)
{
assert(minVal <= maxVal);
return std::min(maxVal, std::max(minVal, val));
}
bool fileExists(const std::string fileName, bool verbose)
{
if (!std::experimental::filesystem::exists(std::experimental::filesystem::path(fileName)))
{
if (verbose) std::cout << "File does not exist : " << fileName << std::endl;
return false;
}
return true;
}
std::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType)
{
assert(fileExists(weightsFilePath));
std::cout << "Loading pre-trained weights..." << std::endl;
std::ifstream file(weightsFilePath, std::ios_base::binary);
assert(file.good());
std::string line;
if (networkType == "yolov2")
{
// Remove 4 int32 bytes of data from the stream belonging to the header
file.ignore(4 * 4);
}
else if ((networkType == "yolov3") || (networkType == "yolov3-tiny")
|| (networkType == "yolov2-tiny"))
{
// Remove 5 int32 bytes of data from the stream belonging to the header
file.ignore(4 * 5);
}
else
{
std::cout << "Invalid network type" << std::endl;
assert(0);
}
std::vector<float> weights;
char floatWeight[4];
while (!file.eof())
{
file.read(floatWeight, 4);
assert(file.gcount() == 4);
weights.push_back(*reinterpret_cast<float*>(floatWeight));
if (file.peek() == std::istream::traits_type::eof()) break;
}
std::cout << "Loading weights of " << networkType << " complete!"
<< std::endl;
std::cout << "Total Number of weights read : " << weights.size() << std::endl;
return weights;
}
std::string dimsToString(const nvinfer1::Dims d)
{
std::stringstream s;
assert(d.nbDims >= 1);
for (int i = 0; i < d.nbDims - 1; ++i)
{
s << std::setw(4) << d.d[i] << " x";
}
s << std::setw(4) << d.d[d.nbDims - 1];
return s.str();
}
void displayDimType(const nvinfer1::Dims d)
{
std::cout << "(" << d.nbDims << ") ";
for (int i = 0; i < d.nbDims; ++i)
{
switch (d.type[i])
{
case nvinfer1::DimensionType::kSPATIAL: std::cout << "kSPATIAL "; break;
case nvinfer1::DimensionType::kCHANNEL: std::cout << "kCHANNEL "; break;
case nvinfer1::DimensionType::kINDEX: std::cout << "kINDEX "; break;
case nvinfer1::DimensionType::kSEQUENCE: std::cout << "kSEQUENCE "; break;
}
}
std::cout << std::endl;
}
int getNumChannels(nvinfer1::ITensor* t)
{
nvinfer1::Dims d = t->getDimensions();
assert(d.nbDims == 3);
return d.d[0];
}
uint64_t get3DTensorVolume(nvinfer1::Dims inputDims)
{
assert(inputDims.nbDims == 3);
return inputDims.d[0] * inputDims.d[1] * inputDims.d[2];
}
nvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network)
{
assert(block.at("type") == "maxpool");
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
int size = std::stoi(block.at("size"));
int stride = std::stoi(block.at("stride"));
nvinfer1::IPoolingLayer* pool
= network->addPooling(*input, nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{size, size});
assert(pool);
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
pool->setStride(nvinfer1::DimsHW{stride, stride});
pool->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
pool->setName(maxpoolLayerName.c_str());
return pool;
}
nvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
assert(block.at("type") == "convolutional");
assert(block.find("batch_normalize") == block.end());
assert(block.at("activation") == "linear");
assert(block.find("filters") != block.end());
assert(block.find("pad") != block.end());
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
int filters = std::stoi(block.at("filters"));
int padding = std::stoi(block.at("pad"));
int kernelSize = std::stoi(block.at("size"));
int stride = std::stoi(block.at("stride"));
int pad;
if (padding)
pad = (kernelSize - 1) / 2;
else
pad = 0;
// load the convolution layer bias
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, filters};
float* val = new float[filters];
for (int i = 0; i < filters; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convBias.values = val;
trtWeights.push_back(convBias);
// load the convolutional layer weights
int size = filters * inputChannels * kernelSize * kernelSize;
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
nvinfer1::IConvolutionLayer* conv = network->addConvolution(
*input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);
assert(conv != nullptr);
std::string convLayerName = "conv_" + std::to_string(layerIdx);
conv->setName(convLayerName.c_str());
conv->setStride(nvinfer1::DimsHW{stride, stride});
conv->setPadding(nvinfer1::DimsHW{pad, pad});
return conv;
}
nvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
assert(block.at("type") == "convolutional");
assert(block.find("batch_normalize") != block.end());
assert(block.at("batch_normalize") == "1");
assert(block.at("activation") == "leaky");
assert(block.find("filters") != block.end());
assert(block.find("pad") != block.end());
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
bool batchNormalize, bias;
if (block.find("batch_normalize") != block.end())
{
batchNormalize = (block.at("batch_normalize") == "1");
bias = false;
}
else
{
batchNormalize = false;
bias = true;
}
// all conv_bn_leaky layers assume bias is false
assert(batchNormalize == true && bias == false);
UNUSED(batchNormalize);
UNUSED(bias);
int filters = std::stoi(block.at("filters"));
int padding = std::stoi(block.at("pad"));
int kernelSize = std::stoi(block.at("size"));
int stride = std::stoi(block.at("stride"));
int pad;
if (padding)
pad = (kernelSize - 1) / 2;
else
pad = 0;
/***** CONVOLUTION LAYER *****/
/*****************************/
// batch norm weights are before the conv layer
// load BN biases (bn_biases)
std::vector<float> bnBiases;
for (int i = 0; i < filters; ++i)
{
bnBiases.push_back(weights[weightPtr]);
weightPtr++;
}
// load BN weights
std::vector<float> bnWeights;
for (int i = 0; i < filters; ++i)
{
bnWeights.push_back(weights[weightPtr]);
weightPtr++;
}
// load BN running_mean
std::vector<float> bnRunningMean;
for (int i = 0; i < filters; ++i)
{
bnRunningMean.push_back(weights[weightPtr]);
weightPtr++;
}
// load BN running_var
std::vector<float> bnRunningVar;
for (int i = 0; i < filters; ++i)
{
// 1e-05 for numerical stability
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
weightPtr++;
}
// load Conv layer weights (GKCRS)
int size = filters * inputChannels * kernelSize * kernelSize;
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
float* val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, 0};
trtWeights.push_back(convBias);
nvinfer1::IConvolutionLayer* conv = network->addConvolution(
*input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);
assert(conv != nullptr);
std::string convLayerName = "conv_" + std::to_string(layerIdx);
conv->setName(convLayerName.c_str());
conv->setStride(nvinfer1::DimsHW{stride, stride});
conv->setPadding(nvinfer1::DimsHW{pad, pad});
/***** BATCHNORM LAYER *****/
/***************************/
size = filters;
// create the weights
nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
float* shiftWt = new float[size];
for (int i = 0; i < size; ++i)
{
shiftWt[i]
= bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
}
shift.values = shiftWt;
float* scaleWt = new float[size];
for (int i = 0; i < size; ++i)
{
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
}
scale.values = scaleWt;
float* powerWt = new float[size];
for (int i = 0; i < size; ++i)
{
powerWt[i] = 1.0;
}
power.values = powerWt;
trtWeights.push_back(shift);
trtWeights.push_back(scale);
trtWeights.push_back(power);
// Add the batch norm layers
nvinfer1::IScaleLayer* bn = network->addScale(
*conv->getOutput(0), nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
assert(bn != nullptr);
std::string bnLayerName = "batch_norm_" + std::to_string(layerIdx);
bn->setName(bnLayerName.c_str());
/***** ACTIVATION LAYER *****/
/****************************/
nvinfer1::ITensor* bnOutput = bn->getOutput(0);
nvinfer1::IActivationLayer* leaky = network->addActivation(
*bnOutput, nvinfer1::ActivationType::kLEAKY_RELU);
leaky->setAlpha(0.1);
assert(leaky != nullptr);
std::string leakyLayerName = "leaky_" + std::to_string(layerIdx);
leaky->setName(leakyLayerName.c_str());
return leaky;
}
nvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network)
{
assert(block.at("type") == "upsample");
nvinfer1::Dims inpDims = input->getDimensions();
assert(inpDims.nbDims == 3);
assert(inpDims.d[1] == inpDims.d[2]);
int h = inpDims.d[1];
int w = inpDims.d[2];
int stride = std::stoi(block.at("stride"));
// add pre multiply matrix as a constant
nvinfer1::Dims preDims{3,
{1, stride * h, w},
{nvinfer1::DimensionType::kCHANNEL, nvinfer1::DimensionType::kSPATIAL,
nvinfer1::DimensionType::kSPATIAL}};
int size = stride * h * w;
nvinfer1::Weights preMul{nvinfer1::DataType::kFLOAT, nullptr, size};
float* preWt = new float[size];
/* (2*h * w)
[ [1, 0, ..., 0],
[1, 0, ..., 0],
[0, 1, ..., 0],
[0, 1, ..., 0],
...,
...,
[0, 0, ..., 1],
[0, 0, ..., 1] ]
*/
for (int i = 0, idx = 0; i < h; ++i)
{
for (int s = 0; s < stride; ++s)
{
for (int j = 0; j < w; ++j, ++idx)
{
preWt[idx] = (i == j) ? 1.0 : 0.0;
}
}
}
preMul.values = preWt;
trtWeights.push_back(preMul);
nvinfer1::IConstantLayer* preM = network->addConstant(preDims, preMul);
assert(preM != nullptr);
std::string preLayerName = "preMul_" + std::to_string(layerIdx);
preM->setName(preLayerName.c_str());
// add post multiply matrix as a constant
nvinfer1::Dims postDims{3,
{1, h, stride * w},
{nvinfer1::DimensionType::kCHANNEL, nvinfer1::DimensionType::kSPATIAL,
nvinfer1::DimensionType::kSPATIAL}};
size = stride * h * w;
nvinfer1::Weights postMul{nvinfer1::DataType::kFLOAT, nullptr, size};
float* postWt = new float[size];
/* (h * 2*w)
[ [1, 1, 0, 0, ..., 0, 0],
[0, 0, 1, 1, ..., 0, 0],
...,
...,
[0, 0, 0, 0, ..., 1, 1] ]
*/
for (int i = 0, idx = 0; i < h; ++i)
{
for (int j = 0; j < stride * w; ++j, ++idx)
{
postWt[idx] = (j / stride == i) ? 1.0 : 0.0;
}
}
postMul.values = postWt;
trtWeights.push_back(postMul);
nvinfer1::IConstantLayer* post_m = network->addConstant(postDims, postMul);
assert(post_m != nullptr);
std::string postLayerName = "postMul_" + std::to_string(layerIdx);
post_m->setName(postLayerName.c_str());
// add matrix multiply layers for upsampling
nvinfer1::IMatrixMultiplyLayer* mm1
= network->addMatrixMultiply(*preM->getOutput(0), nvinfer1::MatrixOperation::kNONE, *input,
nvinfer1::MatrixOperation::kNONE);
assert(mm1 != nullptr);
std::string mm1LayerName = "mm1_" + std::to_string(layerIdx);
mm1->setName(mm1LayerName.c_str());
nvinfer1::IMatrixMultiplyLayer* mm2
= network->addMatrixMultiply(*mm1->getOutput(0), nvinfer1::MatrixOperation::kNONE,
*post_m->getOutput(0), nvinfer1::MatrixOperation::kNONE);
assert(mm2 != nullptr);
std::string mm2LayerName = "mm2_" + std::to_string(layerIdx);
mm2->setName(mm2LayerName.c_str());
return mm2;
}
void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,
std::string layerOutput, std::string weightPtr)
{
std::cout << std::setw(6) << std::left << layerIndex << std::setw(15) << std::left << layerName;
std::cout << std::setw(20) << std::left << layerInput << std::setw(20) << std::left
<< layerOutput;
std::cout << std::setw(6) << std::left << weightPtr << std::endl;
}

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __TRT_UTILS_H__
#define __TRT_UTILS_H__
#include <set>
#include <map>
#include <string>
#include <vector>
#include <cassert>
#include <iostream>
#include <fstream>
#include "NvInfer.h"
#define UNUSED(expr) (void)(expr)
#define DIVUP(n, d) ((n) + (d)-1) / (d)
std::string trim(std::string s);
float clamp(const float val, const float minVal, const float maxVal);
bool fileExists(const std::string fileName, bool verbose = true);
std::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType);
std::string dimsToString(const nvinfer1::Dims d);
void displayDimType(const nvinfer1::Dims d);
int getNumChannels(nvinfer1::ITensor* t);
uint64_t get3DTensorVolume(nvinfer1::Dims inputDims);
// Helper functions to create yolo engine
nvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
nvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,
std::string layerOutput, std::string weightPtr);
#endif

Binary file not shown.

View File

@ -0,0 +1,458 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "yolo.h"
#include "yoloPlugins.h"
#include <fstream>
#include <iomanip>
#include <iterator>
Yolo::Yolo(const NetworkInfo& networkInfo)
: m_NetworkType(networkInfo.networkType), // yolov3
m_ConfigFilePath(networkInfo.configFilePath), // yolov3.cfg
m_WtsFilePath(networkInfo.wtsFilePath), // yolov3.weights
m_DeviceType(networkInfo.deviceType), // kDLA, kGPU
m_InputBlobName(networkInfo.inputBlobName), // data
m_InputH(0),
m_InputW(0),
m_InputC(0),
m_InputSize(0)
{
std::cout << "Build Yolo engine on " << networkInfo.configFilePath << std::endl;
}
Yolo::~Yolo()
{
destroyNetworkUtils();
}
nvinfer1::ICudaEngine *Yolo::createEngine (nvinfer1::IBuilder* builder)
{
assert (builder);
std::vector<float> weights = loadWeights(m_WtsFilePath, m_NetworkType);
std::vector<nvinfer1::Weights> trtWeights;
nvinfer1::INetworkDefinition *network = builder->createNetwork();
if (parseModel(*network) != NVDSINFER_SUCCESS) {
std::cout << "Parse Model Failed ..." << std::endl;
network->destroy();
return nullptr;
}
// Build the engine
std::cout << "Building the TensorRT Engine..." << std::endl;
nvinfer1::ICudaEngine * engine = builder->buildCudaEngine(*network);
if (engine) {
std::cout << "Building complete!" << std::endl;
} else {
std::cerr << "Building engine failed!" << std::endl;
}
// destroy
network->destroy();
return engine;
}
NvDsInferStatus Yolo::parseModel(nvinfer1::INetworkDefinition& network) {
destroyNetworkUtils();
std::cout << "ParseConfigFile:" << m_ConfigFilePath << std::endl;
m_ConfigBlocks = parseConfigFile(m_ConfigFilePath);
parseConfigBlocks();
std::vector<float> weights = loadWeights(m_WtsFilePath, m_NetworkType);
// build yolo network
std::cout << "Building Yolo network..." << std::endl;
NvDsInferStatus status = buildYoloNetwork(weights, network);
if (status == NVDSINFER_SUCCESS) {
std::cout << "Building yolo network complete!" << std::endl;
} else {
std::cerr << "Building yolo network failed!" << std::endl;
}
return status;
}
NvDsInferStatus Yolo::buildYoloNetwork(
std::vector<float>& weights, nvinfer1::INetworkDefinition& network) {
int weightPtr = 0;
int channels = m_InputC;
nvinfer1::ITensor* data =
network.addInput(m_InputBlobName.c_str(), nvinfer1::DataType::kFLOAT,
nvinfer1::DimsCHW{static_cast<int>(m_InputC),
static_cast<int>(m_InputH), static_cast<int>(m_InputW)});
assert(data != nullptr && data->getDimensions().nbDims > 0);
nvinfer1::ITensor* previous = data;
std::vector<nvinfer1::ITensor*> tensorOutputs;
uint outputTensorCount = 0;
// build the network using the network API
for (uint i = 0; i < m_ConfigBlocks.size(); ++i) {
// check if num. of channels is correct
assert(getNumChannels(previous) == channels);
std::string layerIndex = "(" + std::to_string(tensorOutputs.size()) + ")";
if (m_ConfigBlocks.at(i).at("type") == "net") {
printLayerInfo("", "layer", " inp_size", " out_size", "weightPtr");
} else if (m_ConfigBlocks.at(i).at("type") == "convolutional") {
std::string inputVol = dimsToString(previous->getDimensions());
nvinfer1::ILayer* out;
std::string layerType;
// check if batch_norm enabled
if (m_ConfigBlocks.at(i).find("batch_normalize") !=
m_ConfigBlocks.at(i).end()) {
out = netAddConvBNLeaky(i, m_ConfigBlocks.at(i), weights,
m_TrtWeights, weightPtr, channels, previous, &network);
layerType = "conv-bn-leaky";
}
else
{
out = netAddConvLinear(i, m_ConfigBlocks.at(i), weights,
m_TrtWeights, weightPtr, channels, previous, &network);
layerType = "conv-linear";
}
previous = out->getOutput(0);
assert(previous != nullptr);
channels = getNumChannels(previous);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(out->getOutput(0));
printLayerInfo(layerIndex, layerType, inputVol, outputVol, std::to_string(weightPtr));
} else if (m_ConfigBlocks.at(i).at("type") == "shortcut") {
assert(m_ConfigBlocks.at(i).at("activation") == "linear");
assert(m_ConfigBlocks.at(i).find("from") !=
m_ConfigBlocks.at(i).end());
int from = stoi(m_ConfigBlocks.at(i).at("from"));
std::string inputVol = dimsToString(previous->getDimensions());
// check if indexes are correct
assert((i - 2 >= 0) && (i - 2 < tensorOutputs.size()));
assert((i + from - 1 >= 0) && (i + from - 1 < tensorOutputs.size()));
assert(i + from - 1 < i - 2);
nvinfer1::IElementWiseLayer* ew = network.addElementWise(
*tensorOutputs[i - 2], *tensorOutputs[i + from - 1],
nvinfer1::ElementWiseOperation::kSUM);
assert(ew != nullptr);
std::string ewLayerName = "shortcut_" + std::to_string(i);
ew->setName(ewLayerName.c_str());
previous = ew->getOutput(0);
assert(previous != nullptr);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(ew->getOutput(0));
printLayerInfo(layerIndex, "skip", inputVol, outputVol, " -");
} else if (m_ConfigBlocks.at(i).at("type") == "yolo") {
nvinfer1::Dims prevTensorDims = previous->getDimensions();
assert(prevTensorDims.d[1] == prevTensorDims.d[2]);
TensorInfo& curYoloTensor = m_OutputTensors.at(outputTensorCount);
curYoloTensor.gridSize = prevTensorDims.d[1];
curYoloTensor.stride = m_InputW / curYoloTensor.gridSize;
m_OutputTensors.at(outputTensorCount).volume = curYoloTensor.gridSize
* curYoloTensor.gridSize
* (curYoloTensor.numBBoxes * (5 + curYoloTensor.numClasses));
std::string layerName = "yolo_" + std::to_string(i);
curYoloTensor.blobName = layerName;
nvinfer1::IPluginV2* yoloPlugin
= new YoloLayerV3(m_OutputTensors.at(outputTensorCount).numBBoxes,
m_OutputTensors.at(outputTensorCount).numClasses,
m_OutputTensors.at(outputTensorCount).gridSize);
assert(yoloPlugin != nullptr);
nvinfer1::IPluginV2Layer* yolo =
network.addPluginV2(&previous, 1, *yoloPlugin);
assert(yolo != nullptr);
yolo->setName(layerName.c_str());
std::string inputVol = dimsToString(previous->getDimensions());
previous = yolo->getOutput(0);
assert(previous != nullptr);
previous->setName(layerName.c_str());
std::string outputVol = dimsToString(previous->getDimensions());
network.markOutput(*previous);
channels = getNumChannels(previous);
tensorOutputs.push_back(yolo->getOutput(0));
printLayerInfo(layerIndex, "yolo", inputVol, outputVol, std::to_string(weightPtr));
++outputTensorCount;
} else if (m_ConfigBlocks.at(i).at("type") == "region") {
nvinfer1::Dims prevTensorDims = previous->getDimensions();
assert(prevTensorDims.d[1] == prevTensorDims.d[2]);
TensorInfo& curRegionTensor = m_OutputTensors.at(outputTensorCount);
curRegionTensor.gridSize = prevTensorDims.d[1];
curRegionTensor.stride = m_InputW / curRegionTensor.gridSize;
m_OutputTensors.at(outputTensorCount).volume = curRegionTensor.gridSize
* curRegionTensor.gridSize
* (curRegionTensor.numBBoxes * (5 + curRegionTensor.numClasses));
std::string layerName = "region_" + std::to_string(i);
curRegionTensor.blobName = layerName;
nvinfer1::plugin::RegionParameters RegionParameters{
static_cast<int>(curRegionTensor.numBBoxes), 4,
static_cast<int>(curRegionTensor.numClasses), nullptr};
std::string inputVol = dimsToString(previous->getDimensions());
nvinfer1::IPluginV2* regionPlugin
= createRegionPlugin(RegionParameters);
assert(regionPlugin != nullptr);
nvinfer1::IPluginV2Layer* region =
network.addPluginV2(&previous, 1, *regionPlugin);
assert(region != nullptr);
region->setName(layerName.c_str());
previous = region->getOutput(0);
assert(previous != nullptr);
previous->setName(layerName.c_str());
std::string outputVol = dimsToString(previous->getDimensions());
network.markOutput(*previous);
channels = getNumChannels(previous);
tensorOutputs.push_back(region->getOutput(0));
printLayerInfo(layerIndex, "region", inputVol, outputVol, std::to_string(weightPtr));
std::cout << "Anchors are being converted to network input resolution i.e. Anchors x "
<< curRegionTensor.stride << " (stride)" << std::endl;
for (auto& anchor : curRegionTensor.anchors) anchor *= curRegionTensor.stride;
++outputTensorCount;
} else if (m_ConfigBlocks.at(i).at("type") == "reorg") {
std::string inputVol = dimsToString(previous->getDimensions());
nvinfer1::IPluginV2* reorgPlugin = createReorgPlugin(2);
assert(reorgPlugin != nullptr);
nvinfer1::IPluginV2Layer* reorg =
network.addPluginV2(&previous, 1, *reorgPlugin);
assert(reorg != nullptr);
std::string layerName = "reorg_" + std::to_string(i);
reorg->setName(layerName.c_str());
previous = reorg->getOutput(0);
assert(previous != nullptr);
std::string outputVol = dimsToString(previous->getDimensions());
channels = getNumChannels(previous);
tensorOutputs.push_back(reorg->getOutput(0));
printLayerInfo(layerIndex, "reorg", inputVol, outputVol, std::to_string(weightPtr));
}
// route layers (single or concat)
else if (m_ConfigBlocks.at(i).at("type") == "route") {
std::string strLayers = m_ConfigBlocks.at(i).at("layers");
std::vector<int> idxLayers;
size_t lastPos = 0, pos = 0;
while ((pos = strLayers.find(',', lastPos)) != std::string::npos) {
int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));
idxLayers.push_back (vL);
lastPos = pos + 1;
}
if (lastPos < strLayers.length()) {
std::string lastV = trim(strLayers.substr(lastPos));
if (!lastV.empty()) {
idxLayers.push_back (std::stoi(lastV));
}
}
assert (!idxLayers.empty());
std::vector<nvinfer1::ITensor*> concatInputs;
for (int idxLayer : idxLayers) {
if (idxLayer < 0) {
idxLayer = tensorOutputs.size() + idxLayer;
}
assert (idxLayer >= 0 && idxLayer < (int)tensorOutputs.size());
concatInputs.push_back (tensorOutputs[idxLayer]);
}
nvinfer1::IConcatenationLayer* concat =
network.addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "route_" + std::to_string(i - 1);
concat->setName(concatLayerName.c_str());
// concatenate along the channel dimension
concat->setAxis(0);
previous = concat->getOutput(0);
assert(previous != nullptr);
std::string outputVol = dimsToString(previous->getDimensions());
// set the output volume depth
channels
= getNumChannels(previous);
tensorOutputs.push_back(concat->getOutput(0));
printLayerInfo(layerIndex, "route", " -", outputVol,
std::to_string(weightPtr));
} else if (m_ConfigBlocks.at(i).at("type") == "upsample") {
std::string inputVol = dimsToString(previous->getDimensions());
nvinfer1::ILayer* out = netAddUpsample(i - 1, m_ConfigBlocks[i],
weights, m_TrtWeights, channels, previous, &network);
previous = out->getOutput(0);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(out->getOutput(0));
printLayerInfo(layerIndex, "upsample", inputVol, outputVol, " -");
} else if (m_ConfigBlocks.at(i).at("type") == "maxpool") {
std::string inputVol = dimsToString(previous->getDimensions());
nvinfer1::ILayer* out =
netAddMaxpool(i, m_ConfigBlocks.at(i), previous, &network);
previous = out->getOutput(0);
assert(previous != nullptr);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(out->getOutput(0));
printLayerInfo(layerIndex, "maxpool", inputVol, outputVol, std::to_string(weightPtr));
}
else
{
std::cout << "Unsupported layer type --> \""
<< m_ConfigBlocks.at(i).at("type") << "\"" << std::endl;
assert(0);
}
}
if ((int)weights.size() != weightPtr)
{
std::cout << "Number of unused weights left : " << weights.size() - weightPtr << std::endl;
assert(0);
}
std::cout << "Output yolo blob names :" << std::endl;
for (auto& tensor : m_OutputTensors) {
std::cout << tensor.blobName << std::endl;
}
int nbLayers = network.getNbLayers();
std::cout << "Total number of yolo layers: " << nbLayers << std::endl;
return NVDSINFER_SUCCESS;
}
std::vector<std::map<std::string, std::string>>
Yolo::parseConfigFile (const std::string cfgFilePath)
{
assert(fileExists(cfgFilePath));
std::ifstream file(cfgFilePath);
assert(file.good());
std::string line;
std::vector<std::map<std::string, std::string>> blocks;
std::map<std::string, std::string> block;
while (getline(file, line))
{
if (line.size() == 0) continue;
if (line.front() == '#') continue;
line = trim(line);
if (line.front() == '[')
{
if (block.size() > 0)
{
blocks.push_back(block);
block.clear();
}
std::string key = "type";
std::string value = trim(line.substr(1, line.size() - 2));
block.insert(std::pair<std::string, std::string>(key, value));
}
else
{
int cpos = line.find('=');
std::string key = trim(line.substr(0, cpos));
std::string value = trim(line.substr(cpos + 1));
block.insert(std::pair<std::string, std::string>(key, value));
}
}
blocks.push_back(block);
return blocks;
}
void Yolo::parseConfigBlocks()
{
for (auto block : m_ConfigBlocks) {
if (block.at("type") == "net")
{
assert((block.find("height") != block.end())
&& "Missing 'height' param in network cfg");
assert((block.find("width") != block.end()) && "Missing 'width' param in network cfg");
assert((block.find("channels") != block.end())
&& "Missing 'channels' param in network cfg");
m_InputH = std::stoul(block.at("height"));
m_InputW = std::stoul(block.at("width"));
m_InputC = std::stoul(block.at("channels"));
assert(m_InputW == m_InputH);
m_InputSize = m_InputC * m_InputH * m_InputW;
}
else if ((block.at("type") == "region") || (block.at("type") == "yolo"))
{
assert((block.find("num") != block.end())
&& std::string("Missing 'num' param in " + block.at("type") + " layer").c_str());
assert((block.find("classes") != block.end())
&& std::string("Missing 'classes' param in " + block.at("type") + " layer")
.c_str());
assert((block.find("anchors") != block.end())
&& std::string("Missing 'anchors' param in " + block.at("type") + " layer")
.c_str());
TensorInfo outputTensor;
std::string anchorString = block.at("anchors");
while (!anchorString.empty())
{
int npos = anchorString.find_first_of(',');
if (npos != -1)
{
float anchor = std::stof(trim(anchorString.substr(0, npos)));
outputTensor.anchors.push_back(anchor);
anchorString.erase(0, npos + 1);
}
else
{
float anchor = std::stof(trim(anchorString));
outputTensor.anchors.push_back(anchor);
break;
}
}
if ((m_NetworkType == "yolov3") || (m_NetworkType == "yolov3-tiny"))
{
assert((block.find("mask") != block.end())
&& std::string("Missing 'mask' param in " + block.at("type") + " layer")
.c_str());
std::string maskString = block.at("mask");
while (!maskString.empty())
{
int npos = maskString.find_first_of(',');
if (npos != -1)
{
uint mask = std::stoul(trim(maskString.substr(0, npos)));
outputTensor.masks.push_back(mask);
maskString.erase(0, npos + 1);
}
else
{
uint mask = std::stoul(trim(maskString));
outputTensor.masks.push_back(mask);
break;
}
}
}
outputTensor.numBBoxes = outputTensor.masks.size() > 0
? outputTensor.masks.size()
: std::stoul(trim(block.at("num")));
outputTensor.numClasses = std::stoul(block.at("classes"));
m_OutputTensors.push_back(outputTensor);
}
}
}
void Yolo::destroyNetworkUtils() {
// deallocate the weights
for (uint i = 0; i < m_TrtWeights.size(); ++i) {
if (m_TrtWeights[i].count > 0)
free(const_cast<void*>(m_TrtWeights[i].values));
}
m_TrtWeights.clear();
}

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _YOLO_H_
#define _YOLO_H_
#include <stdint.h>
#include <string>
#include <vector>
#include <memory>
#include "NvInfer.h"
#include "trt_utils.h"
#include "nvdsinfer_custom_impl.h"
/**
* Holds all the file paths required to build a network.
*/
struct NetworkInfo
{
std::string networkType;
std::string configFilePath;
std::string wtsFilePath;
std::string deviceType;
std::string inputBlobName;
};
/**
* Holds information about an output tensor of the yolo network.
*/
struct TensorInfo
{
std::string blobName;
uint stride{0};
uint gridSize{0};
uint numClasses{0};
uint numBBoxes{0};
uint64_t volume{0};
std::vector<uint> masks;
std::vector<float> anchors;
int bindingIndex{-1};
float* hostBuffer{nullptr};
};
class Yolo : public IModelParser {
public:
Yolo(const NetworkInfo& networkInfo);
~Yolo() override;
bool hasFullDimsSupported() const override { return false; }
const char* getModelName() const override {
return m_ConfigFilePath.empty() ? m_NetworkType.c_str()
: m_ConfigFilePath.c_str();
}
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
nvinfer1::ICudaEngine *createEngine (nvinfer1::IBuilder* builder);
protected:
const std::string m_NetworkType;
const std::string m_ConfigFilePath;
const std::string m_WtsFilePath;
const std::string m_DeviceType;
const std::string m_InputBlobName;
std::vector<TensorInfo> m_OutputTensors;
std::vector<std::map<std::string, std::string>> m_ConfigBlocks;
uint m_InputH;
uint m_InputW;
uint m_InputC;
uint64_t m_InputSize;
// TRT specific members
std::vector<nvinfer1::Weights> m_TrtWeights;
private:
NvDsInferStatus buildYoloNetwork(
std::vector<float>& weights, nvinfer1::INetworkDefinition& network);
std::vector<std::map<std::string, std::string>> parseConfigFile(
const std::string cfgFilePath);
void parseConfigBlocks();
void destroyNetworkUtils();
};
#endif // _YOLO_H_

Binary file not shown.

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "yoloPlugins.h"
#include "NvInferPlugin.h"
#include <cassert>
#include <iostream>
#include <memory>
namespace {
template <typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template <typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
} //namespace
// Forward declaration of cuda kernels
cudaError_t cudaYoloLayerV3 (
const void* input, void* output, const uint& batchSize,
const uint& gridSize, const uint& numOutputClasses,
const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream);
YoloLayerV3::YoloLayerV3 (const void* data, size_t length)
{
const char *d = static_cast<const char*>(data);
read(d, m_NumBoxes);
read(d, m_NumClasses);
read(d, m_GridSize);
read(d, m_OutputSize);
};
YoloLayerV3::YoloLayerV3 (
const uint& numBoxes, const uint& numClasses, const uint& gridSize) :
m_NumBoxes(numBoxes),
m_NumClasses(numClasses),
m_GridSize(gridSize)
{
assert(m_NumBoxes > 0);
assert(m_NumClasses > 0);
assert(m_GridSize > 0);
m_OutputSize = m_GridSize * m_GridSize * (m_NumBoxes * (4 + 1 + m_NumClasses));
};
nvinfer1::Dims
YoloLayerV3::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nbInputDims)
{
assert(index == 0);
assert(nbInputDims == 1);
return inputs[0];
}
bool YoloLayerV3::supportsFormat (
nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
return (type == nvinfer1::DataType::kFLOAT &&
format == nvinfer1::PluginFormat::kNCHW);
}
void
YoloLayerV3::configureWithFormat (
const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize)
{
assert(nbInputs == 1);
assert (format == nvinfer1::PluginFormat::kNCHW);
assert(inputDims != nullptr);
}
int YoloLayerV3::enqueue(
int batchSize, const void* const* inputs, void** outputs, void* workspace,
cudaStream_t stream)
{
CHECK(cudaYoloLayerV3(
inputs[0], outputs[0], batchSize, m_GridSize, m_NumClasses, m_NumBoxes,
m_OutputSize, stream));
return 0;
}
size_t YoloLayerV3::getSerializationSize() const
{
return sizeof(m_NumBoxes) + sizeof(m_NumClasses) + sizeof(m_GridSize) + sizeof(m_OutputSize);
}
void YoloLayerV3::serialize(void* buffer) const
{
char *d = static_cast<char*>(buffer);
write(d, m_NumBoxes);
write(d, m_NumClasses);
write(d, m_GridSize);
write(d, m_OutputSize);
}
nvinfer1::IPluginV2* YoloLayerV3::clone() const
{
return new YoloLayerV3 (m_NumBoxes, m_NumClasses, m_GridSize);
}
REGISTER_TENSORRT_PLUGIN(YoloLayerV3PluginCreator);

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __YOLO_PLUGINS__
#define __YOLO_PLUGINS__
#include <cassert>
#include <cstring>
#include <cuda_runtime_api.h>
#include <iostream>
#include <memory>
#include "NvInferPlugin.h"
#define CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \
<< " at line " << __LINE__ << std::endl; \
abort(); \
} \
}
namespace
{
const char* YOLOV3LAYER_PLUGIN_VERSION {"1"};
const char* YOLOV3LAYER_PLUGIN_NAME {"YoloLayerV3_TRT"};
} // namespace
class YoloLayerV3 : public nvinfer1::IPluginV2
{
public:
YoloLayerV3 (const void* data, size_t length);
YoloLayerV3 (const uint& numBoxes, const uint& numClasses, const uint& gridSize);
const char* getPluginType () const override { return YOLOV3LAYER_PLUGIN_NAME; }
const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }
int getNbOutputs () const override { return 1; }
nvinfer1::Dims getOutputDimensions (
int index, const nvinfer1::Dims* inputs,
int nbInputDims) override;
bool supportsFormat (
nvinfer1::DataType type, nvinfer1::PluginFormat format) const override;
void configureWithFormat (
const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override;
int initialize () override { return 0; }
void terminate () override {}
size_t getWorkspaceSize (int maxBatchSize) const override { return 0; }
int enqueue (
int batchSize, const void* const* inputs, void** outputs,
void* workspace, cudaStream_t stream) override;
size_t getSerializationSize() const override;
void serialize (void* buffer) const override;
void destroy () override { delete this; }
nvinfer1::IPluginV2* clone() const override;
void setPluginNamespace (const char* pluginNamespace)override {
m_Namespace = pluginNamespace;
}
virtual const char* getPluginNamespace () const override {
return m_Namespace.c_str();
}
private:
uint m_NumBoxes {0};
uint m_NumClasses {0};
uint m_GridSize {0};
uint64_t m_OutputSize {0};
std::string m_Namespace {""};
};
class YoloLayerV3PluginCreator : public nvinfer1::IPluginCreator
{
public:
YoloLayerV3PluginCreator () {}
~YoloLayerV3PluginCreator () {}
const char* getPluginName () const override { return YOLOV3LAYER_PLUGIN_NAME; }
const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }
const nvinfer1::PluginFieldCollection* getFieldNames() override {
std::cerr<< "YoloLayerV3PluginCreator::getFieldNames is not implemented" << std::endl;
return nullptr;
}
nvinfer1::IPluginV2* createPlugin (
const char* name, const nvinfer1::PluginFieldCollection* fc) override
{
std::cerr<< "YoloLayerV3PluginCreator::getFieldNames is not implemented.\n";
return nullptr;
}
nvinfer1::IPluginV2* deserializePlugin (
const char* name, const void* serialData, size_t serialLength) override
{
std::cout << "Deserialize yoloLayerV3 plugin: " << name << std::endl;
return new YoloLayerV3(serialData, serialLength);
}
void setPluginNamespace(const char* libNamespace) override {
m_Namespace = libNamespace;
}
const char* getPluginNamespace() const override {
return m_Namespace.c_str();
}
private:
std::string m_Namespace {""};
};
#endif // __YOLO_PLUGINS__

View File

@ -0,0 +1,18 @@
[property]
#infer-dims=3;224;224
network-input-order=0
model-engine-file=20220611-weights-2class-stick.trt
labelfile-path=labels.txt
gpu-id=0
net-scale-factor=0
model-color-format=0
force-implicit-batch-dim=1
batch-size=1
# 0=FP32 and 1=INT8 mode
network-mode=0
network-type=1
process-mode=2
classifier-async-mode=0
classifier-threshold=0.8
secondary-reinfer-interval=1

View File

@ -0,0 +1 @@
white-sitck;NO-white-sitck

View File

@ -0,0 +1,18 @@
[property]
#infer-dims=3;224;224
network-input-order=0
model-engine-file=04-0.9678-weights-3class-wheelchair-20220616.trt
labelfile-path=labels.txt
gpu-id=0
net-scale-factor=0
model-color-format=0
force-implicit-batch-dim=1
batch-size=1
# 0=FP32 and 1=INT8 mode
network-mode=0
network-type=1
process-mode=2
classifier-async-mode=0
classifier-threshold=0.8
secondary-reinfer-interval=1

View File

@ -0,0 +1 @@
chair;wheelchair-electric;wheelchair

Binary file not shown.

230
app_config.txt Executable file
View File

@ -0,0 +1,230 @@
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=1
[tiled-display]
enable=1
rows=2
columns=2
gpu-id=0
nvbuf-memory-type=0
TIME_ZONE=9
FPS_VIDEO_EXPORT=5
PATH_SAVE=./1
BACKUP_SAVE=./1_bk
IS_BACKUP=0
width=1920
height=1080
service_id=SERVICE_00123
tenant_id=TENANT_00123
organization_id=ORG_00123
data_type=API_CDM_0001
#info mode verify
config_file_verify=Model_Classify/verify/Detect_Nguoi_Tren_XeLan_20210610.engine
config_chair_verify=Model_Classify/verify/20220615_weight_80class.engine
max_count_track=15
push_count_track=0
push_count_verify=5
ignore_image=0
process_name=deepstream-app -c
max_ram_usage=4
height_min=75
height_low=100
height_high=150
threshold_min=0.5
threshold_low=0.6
threshold_middle=0.65
threshold_high=0.8
[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=4
rtsp-reconnect-interval-sec=60
device_id=AXIS-4206-V
camera_id=cam1
#uri=rtsp://18.179.9.206:55544/test
#uri=rtsp://192.168.2.101/axis-media/media.amp
uri=file:///home/bi/Video_Test_Nhat/Chair-Positive-2M-15fps-KF17.mp4
#uri=rtsp://192.168.2.101/axis-media/media.amp
uri=rtsp://192.168.1.121:8556/test
#uri=rtsp://192.168.0.11/2
patlite=http://192.168.1.22
light-api=/api/control?alert=129999
turn-on-light=1
alert=ON
movie=ON
resolution=FHD
duration=15
confidence1=0.55
confidence2=0.55
num-sources=1
gpu-id=0
cudadec-memtype=0
drop-frame-interval=2
[source1]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=4
rtsp-reconnect-interval-sec=60
device_id=AXIS-4206-V
camera_id=cam2
#uri=rtsp://admin:Admin123@192.168.0.254
#uri=file:///home/bi/xelan/video_35.mp4
#url=file:///home/bi/xelan/video_10.mp4
uri=rtsp://192.168.2.101/axis-media/media.amp
#uri=rtsp://admin:Admin123@192.168.1.112:554/Streaming/Channels/1/
uri=rtsp://192.168.1.121:8554/test
patlite=http://192.168.1.22
light-api=/api/control?alert=129999
turn-on-light=1
alert=ON
movie=ON
resolution=FHD
duration=15
confidence1=0.55
confidence2=0.55
num-sources=1
gpu-id=0
cudadec-memtype=0
drop-frame-interval=2
[source2]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=4
rtsp-reconnect-interval-sec=60
device_id=AXIS-4206-V
camera_id=cam3
uri=rtsp://192.168.2.101/axis-media/media.amp
uri=rtsp://192.168.1.121:8554/test
#uri=file:///home/bi/xelan/2022-06-05_Chair-1500K-15fps-KF17(1).mp4
patlite=http://192.168.1.22
light-api=/api/control?alert=129999
turn-on-light=1
alert=ON
movie=ON
resolution=FHD
duration=15
confidence1=0.55
confidence2=0.55
num-sources=1
gpu-id=0
cudadec-memtype=0
drop-frame-interval=2
[source3]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=4
rtsp-reconnect-interval-sec=60
device_id=AXIS-4206-V
camera_id=cam4
uri=rtsp://192.168.2.101/axis-media/media.amp
#uri=file:///home/bi/xelan/2022-06-05_Chair-1500K-15fps-KF17(1).mp4
uri=rtsp://192.168.1.121:8554/test
patlite=http://192.168.1.22
light-api=/api/control?alert=129999
turn-on-light=1
alert=ON
movie=ON
resolution=FHD
duration=15
confidence1=0.55
confidence2=0.55
num-sources=1
gpu-id=0
cudadec-memtype=0
drop-frame-interval=2
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0
[osd]
enable=1
gpu-id=0
border-width=5
text-size=14
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Serif
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
[streammux]
gpu-id=0
live-source=1
batch-size=1
batched-push-timeout=40000
width=1920
height=1080
enable-padding=0
nvbuf-memory-type=0
[primary-gie]
enable=1
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=-2;-2;-2;-2
bbox-border-color3=0;1;0;1
interval=0
gie-unique-id=1
nvbuf-memory-type=0
config-file=ModelX/primary/config_infer_primary.txt
[secondary-gie0]
enable=1
gpu-id=0
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=Model_Classify/secondary-stick-classify/config_infer_secondary.txt
[secondary-gie1]
enable=1
gpu-id=0
gie-unique-id=3
operate-on-gie-id=1
operate-on-class-ids=1;
config-file=Model_Classify/secondary-wheelchair-classify/config_infer_secondary.txt
[tracker]
enable=1
display-tracking-id=1
tracker-width=640
tracker-height=384
ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so
[nvds-analytics]
enable=1
config-file=config_analytics.txt
[tests]
file-loop=0
[ds-example]
enable=1
full-frame=1
processing-width=1280
processing-height=720
gpu-id=0
unique-id=15

71
config_analytics.txt Executable file
View File

@ -0,0 +1,71 @@
[property]
enable=1
#Width height used for configuration to which below configs are configured
config-width=1920
config-height=1080
#osd-mode 0: Dont display any lines, rois and text
# 1: Display only lines, rois and static text i.e. labels
# 2: Display all info from 1 plus information about counts
osd-mode=2
#Set OSD font size that has to be displayed
display-font-size=12
[roi-filtering-stream-0]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-1]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
#roi-RF#=500;200;700;0;1500;0;1800;200;1800;700;1500;1080;700;1080;500;700
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-2]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[roi-filtering-stream-3]
#enable or disable following feature
enable=1
#ROI to filter select objects, and remove from meta data
roi-RF=0;0;1920;0;1920;1080;0;1080
#remove objects in the ROI
inverse-roi=0
class-id=-1
[line-crossing-stream-0]
enable=0
#Label;direction;lc
#line-crossing-Entry=1072;911;1143;1058;944;1020;1297;1020;
line-crossing-Exit=789;672;1084;900;851;773;1203;732
class-id=0
#extended when 0- only counts crossing on the configured Line
# 1- assumes extended Line crossing counts all the crossing
extended=0
#LC modes supported:
#loose : counts all crossing without strong adherence to direction
#balanced: Strict direction adherence expected compared to mode=loose
#strict : Strict direction adherence expected compared to mode=balanced
mode=loose
[direction-detection-stream-0]
enable=0
#Label;direction;
direction-South=284;840;360;662;
direction-North=1106;622;1312;701;
class-id=0

Some files were not shown because too many files have changed in this diff Show More