2021-02-23 16:56:53 +00:00
|
|
|
/*
|
|
|
|
* GStreamer gstreamer-onnxclient
|
2023-10-19 20:16:21 +00:00
|
|
|
* Copyright (C) 2021-2023 Collabora Ltd
|
2021-02-23 16:56:53 +00:00
|
|
|
*
|
|
|
|
* gstonnxclient.cpp
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
|
|
* Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "gstonnxclient.h"
|
2023-10-19 20:16:21 +00:00
|
|
|
#include <cpu_provider_factory.h>
|
2021-02-23 16:56:53 +00:00
|
|
|
#include <sstream>
|
|
|
|
|
2024-01-04 20:07:10 +00:00
|
|
|
#define GST_CAT_DEFAULT onnx_inference_debug
|
|
|
|
|
2021-02-23 16:56:53 +00:00
|
|
|
namespace GstOnnxNamespace
|
|
|
|
{
|
2023-06-26 14:55:53 +00:00
|
|
|
template < typename T >
|
|
|
|
std::ostream & operator<< (std::ostream & os, const std::vector < T > &v)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
os << "[";
|
|
|
|
for (size_t i = 0; i < v.size (); ++i)
|
|
|
|
{
|
|
|
|
os << v[i];
|
|
|
|
if (i != v.size () - 1)
|
|
|
|
{
|
|
|
|
os << ", ";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
os << "]";
|
|
|
|
|
|
|
|
return os;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2024-01-04 20:07:10 +00:00
|
|
|
GstOnnxClient::GstOnnxClient (GstElement *debug_parent):debug_parent(debug_parent),
|
|
|
|
session (nullptr),
|
2021-02-23 16:56:53 +00:00
|
|
|
width (0),
|
|
|
|
height (0),
|
|
|
|
channels (0),
|
|
|
|
dest (nullptr),
|
|
|
|
m_provider (GST_ONNX_EXECUTION_PROVIDER_CPU),
|
2023-06-26 14:55:53 +00:00
|
|
|
inputImageFormat (GST_ML_INPUT_IMAGE_FORMAT_HWC),
|
2024-01-25 02:31:44 +00:00
|
|
|
inputDatatype (GST_TENSOR_TYPE_UINT8),
|
2023-08-03 00:43:48 +00:00
|
|
|
inputDatatypeSize (sizeof (uint8_t)),
|
2023-12-05 00:57:39 +00:00
|
|
|
fixedInputImageSize (false),
|
|
|
|
inputTensorOffset (0.0),
|
2024-01-04 20:07:10 +00:00
|
|
|
inputTensorScale (1.0)
|
|
|
|
{
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
GstOnnxClient::~GstOnnxClient () {
|
2021-02-23 16:56:53 +00:00
|
|
|
delete session;
|
|
|
|
delete[]dest;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
int32_t GstOnnxClient::getWidth (void)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
return width;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
int32_t GstOnnxClient::getHeight (void)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
return height;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2024-01-04 20:23:54 +00:00
|
|
|
int32_t GstOnnxClient::getChannels (void)
|
|
|
|
{
|
|
|
|
return channels;
|
|
|
|
}
|
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
bool GstOnnxClient::isFixedInputImageSize (void)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
return fixedInputImageSize;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
void GstOnnxClient::setInputImageFormat (GstMlInputImageFormat format)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
inputImageFormat = format;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
GstMlInputImageFormat GstOnnxClient::getInputImageFormat (void)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
return inputImageFormat;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2024-01-25 05:02:30 +00:00
|
|
|
void GstOnnxClient::setInputImageDatatype(GstTensorDataType datatype)
|
2023-08-03 00:43:48 +00:00
|
|
|
{
|
|
|
|
inputDatatype = datatype;
|
|
|
|
switch (inputDatatype) {
|
2024-01-25 02:31:44 +00:00
|
|
|
case GST_TENSOR_TYPE_UINT8:
|
2023-08-03 00:43:48 +00:00
|
|
|
inputDatatypeSize = sizeof (uint8_t);
|
|
|
|
break;
|
2024-01-25 02:31:44 +00:00
|
|
|
case GST_TENSOR_TYPE_UINT16:
|
2023-08-03 00:43:48 +00:00
|
|
|
inputDatatypeSize = sizeof (uint16_t);
|
|
|
|
break;
|
2024-01-25 02:31:44 +00:00
|
|
|
case GST_TENSOR_TYPE_UINT32:
|
2023-08-03 00:43:48 +00:00
|
|
|
inputDatatypeSize = sizeof (uint32_t);
|
|
|
|
break;
|
2024-01-25 02:31:44 +00:00
|
|
|
case GST_TENSOR_TYPE_INT32:
|
|
|
|
inputDatatypeSize = sizeof (int32_t);
|
|
|
|
break;
|
2023-08-03 00:43:48 +00:00
|
|
|
case GST_TENSOR_TYPE_FLOAT16:
|
|
|
|
inputDatatypeSize = 2;
|
|
|
|
break;
|
|
|
|
case GST_TENSOR_TYPE_FLOAT32:
|
|
|
|
inputDatatypeSize = sizeof (float);
|
|
|
|
break;
|
2024-01-25 02:31:44 +00:00
|
|
|
default:
|
|
|
|
g_error ("Data type %d not handled", inputDatatype);
|
|
|
|
break;
|
2023-08-03 00:43:48 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2023-12-05 00:57:39 +00:00
|
|
|
void GstOnnxClient::setInputImageOffset (float offset)
|
|
|
|
{
|
|
|
|
inputTensorOffset = offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
float GstOnnxClient::getInputImageOffset ()
|
|
|
|
{
|
|
|
|
return inputTensorOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GstOnnxClient::setInputImageScale (float scale)
|
|
|
|
{
|
|
|
|
inputTensorScale = scale;
|
|
|
|
}
|
|
|
|
|
|
|
|
float GstOnnxClient::getInputImageScale ()
|
|
|
|
{
|
|
|
|
return inputTensorScale;
|
|
|
|
}
|
|
|
|
|
2024-01-25 05:02:30 +00:00
|
|
|
GstTensorDataType GstOnnxClient::getInputImageDatatype(void)
|
2023-08-03 00:43:48 +00:00
|
|
|
{
|
|
|
|
return inputDatatype;
|
|
|
|
}
|
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
std::vector < const char *>GstOnnxClient::genOutputNamesRaw (void)
|
|
|
|
{
|
|
|
|
if (!outputNames.empty () && outputNamesRaw.size () != outputNames.size ()) {
|
|
|
|
outputNamesRaw.resize (outputNames.size ());
|
|
|
|
for (size_t i = 0; i < outputNamesRaw.size (); i++)
|
|
|
|
outputNamesRaw[i] = outputNames[i].get ();
|
2022-11-10 13:50:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return outputNamesRaw;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
bool GstOnnxClient::hasSession (void)
|
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
return session != nullptr;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
bool GstOnnxClient::createSession (std::string modelFile,
|
2021-02-23 16:56:53 +00:00
|
|
|
GstOnnxOptimizationLevel optim, GstOnnxExecutionProvider provider)
|
2023-06-26 14:55:53 +00:00
|
|
|
{
|
2021-02-23 16:56:53 +00:00
|
|
|
if (session)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
GraphOptimizationLevel onnx_optim;
|
|
|
|
switch (optim) {
|
|
|
|
case GST_ONNX_OPTIMIZATION_LEVEL_DISABLE_ALL:
|
|
|
|
onnx_optim = GraphOptimizationLevel::ORT_DISABLE_ALL;
|
|
|
|
break;
|
|
|
|
case GST_ONNX_OPTIMIZATION_LEVEL_ENABLE_BASIC:
|
|
|
|
onnx_optim = GraphOptimizationLevel::ORT_ENABLE_BASIC;
|
|
|
|
break;
|
|
|
|
case GST_ONNX_OPTIMIZATION_LEVEL_ENABLE_EXTENDED:
|
|
|
|
onnx_optim = GraphOptimizationLevel::ORT_ENABLE_EXTENDED;
|
|
|
|
break;
|
|
|
|
case GST_ONNX_OPTIMIZATION_LEVEL_ENABLE_ALL:
|
|
|
|
onnx_optim = GraphOptimizationLevel::ORT_ENABLE_ALL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
onnx_optim = GraphOptimizationLevel::ORT_ENABLE_EXTENDED;
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
2023-06-02 17:21:06 +00:00
|
|
|
try {
|
|
|
|
Ort::SessionOptions sessionOptions;
|
2023-06-26 14:55:53 +00:00
|
|
|
const auto & api = Ort::GetApi ();
|
2023-06-02 17:21:06 +00:00
|
|
|
// for debugging
|
|
|
|
//sessionOptions.SetIntraOpNumThreads (1);
|
|
|
|
sessionOptions.SetGraphOptimizationLevel (onnx_optim);
|
|
|
|
m_provider = provider;
|
|
|
|
switch (m_provider) {
|
|
|
|
case GST_ONNX_EXECUTION_PROVIDER_CUDA:
|
2023-06-26 14:55:53 +00:00
|
|
|
try {
|
|
|
|
OrtCUDAProviderOptionsV2 *cuda_options = nullptr;
|
|
|
|
Ort::ThrowOnError (api.CreateCUDAProviderOptions (&cuda_options));
|
|
|
|
std::unique_ptr < OrtCUDAProviderOptionsV2,
|
|
|
|
decltype (api.ReleaseCUDAProviderOptions) >
|
|
|
|
rel_cuda_options (cuda_options, api.ReleaseCUDAProviderOptions);
|
|
|
|
Ort::ThrowOnError (api.SessionOptionsAppendExecutionProvider_CUDA_V2
|
|
|
|
(static_cast < OrtSessionOptions * >(sessionOptions),
|
|
|
|
rel_cuda_options.get ()));
|
|
|
|
}
|
|
|
|
catch (Ort::Exception & ortex) {
|
|
|
|
GST_WARNING
|
|
|
|
("Failed to create CUDA provider - dropping back to CPU");
|
|
|
|
Ort::ThrowOnError (OrtSessionOptionsAppendExecutionProvider_CPU
|
|
|
|
(sessionOptions, 1));
|
|
|
|
}
|
2023-06-02 17:21:06 +00:00
|
|
|
break;
|
|
|
|
default:
|
2023-06-26 14:55:53 +00:00
|
|
|
Ort::ThrowOnError (OrtSessionOptionsAppendExecutionProvider_CPU
|
|
|
|
(sessionOptions, 1));
|
2023-06-02 17:21:06 +00:00
|
|
|
break;
|
|
|
|
};
|
2023-06-26 14:55:53 +00:00
|
|
|
env =
|
|
|
|
Ort::Env (OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
|
|
|
|
"GstOnnxNamespace");
|
|
|
|
session = new Ort::Session (env, modelFile.c_str (), sessionOptions);
|
2023-06-02 17:21:06 +00:00
|
|
|
auto inputTypeInfo = session->GetInputTypeInfo (0);
|
|
|
|
std::vector < int64_t > inputDims =
|
|
|
|
inputTypeInfo.GetTensorTypeAndShapeInfo ().GetShape ();
|
2023-06-26 14:55:53 +00:00
|
|
|
if (inputImageFormat == GST_ML_INPUT_IMAGE_FORMAT_HWC) {
|
2023-06-02 17:21:06 +00:00
|
|
|
height = inputDims[1];
|
|
|
|
width = inputDims[2];
|
|
|
|
channels = inputDims[3];
|
|
|
|
} else {
|
|
|
|
channels = inputDims[1];
|
|
|
|
height = inputDims[2];
|
|
|
|
width = inputDims[3];
|
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-02 17:21:06 +00:00
|
|
|
fixedInputImageSize = width > 0 && height > 0;
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_DEBUG_OBJECT (debug_parent, "Number of Output Nodes: %d",
|
2023-06-02 17:21:06 +00:00
|
|
|
(gint) session->GetOutputCount ());
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2024-01-04 19:56:41 +00:00
|
|
|
ONNXTensorElementDataType elementType =
|
|
|
|
inputTypeInfo.GetTensorTypeAndShapeInfo ().GetElementType ();
|
|
|
|
|
|
|
|
switch (elementType) {
|
|
|
|
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
|
2024-01-25 02:31:44 +00:00
|
|
|
setInputImageDatatype(GST_TENSOR_TYPE_UINT8);
|
2024-01-04 19:56:41 +00:00
|
|
|
break;
|
|
|
|
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
|
|
|
|
setInputImageDatatype(GST_TENSOR_TYPE_FLOAT32);
|
|
|
|
break;
|
|
|
|
default:
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_ERROR_OBJECT (debug_parent,
|
|
|
|
"Only input tensors of type int8 and floatare supported");
|
2024-01-04 19:56:41 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-02 17:21:06 +00:00
|
|
|
Ort::AllocatorWithDefaultOptions allocator;
|
|
|
|
auto input_name = session->GetInputNameAllocated (0, allocator);
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_DEBUG_OBJECT (debug_parent, "Input name: %s", input_name.get ());
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-02 17:21:06 +00:00
|
|
|
for (size_t i = 0; i < session->GetOutputCount (); ++i) {
|
|
|
|
auto output_name = session->GetOutputNameAllocated (i, allocator);
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_DEBUG_OBJECT (debug_parent, "Output name %lu:%s", i, output_name.get ());
|
2023-06-02 17:21:06 +00:00
|
|
|
outputNames.push_back (std::move (output_name));
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
|
|
|
genOutputNamesRaw ();
|
|
|
|
|
|
|
|
// look up tensor ids
|
|
|
|
auto metaData = session->GetModelMetadata ();
|
|
|
|
OrtAllocator *ortAllocator;
|
|
|
|
auto status =
|
|
|
|
Ort::GetApi ().GetAllocatorWithDefaultOptions (&ortAllocator);
|
|
|
|
if (status) {
|
|
|
|
// Handle the error case
|
|
|
|
const char *errorString = Ort::GetApi ().GetErrorMessage (status);
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_WARNING_OBJECT (debug_parent, "Failed to get allocator: %s", errorString);
|
2023-06-26 14:55:53 +00:00
|
|
|
|
|
|
|
// Clean up the error status
|
|
|
|
Ort::GetApi ().ReleaseStatus (status);
|
|
|
|
|
|
|
|
return false;
|
2024-01-04 19:56:41 +00:00
|
|
|
}
|
2023-06-26 14:55:53 +00:00
|
|
|
for (auto & name:outputNamesRaw) {
|
2024-01-04 19:56:41 +00:00
|
|
|
Ort::AllocatedStringPtr res =
|
|
|
|
metaData.LookupCustomMetadataMapAllocated (name, ortAllocator);
|
|
|
|
if (res)
|
|
|
|
{
|
|
|
|
GQuark quark = g_quark_from_string (res.get ());
|
|
|
|
outputIds.push_back (quark);
|
|
|
|
} else {
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_ERROR_OBJECT (debug_parent, "Failed to look up id for key %s", name);
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
return false;
|
2023-06-02 17:21:06 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
}
|
|
|
|
}
|
2023-06-02 17:21:06 +00:00
|
|
|
catch (Ort::Exception & ortex) {
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_ERROR_OBJECT (debug_parent, "%s", ortex.what ());
|
2023-06-02 17:21:06 +00:00
|
|
|
return false;
|
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
|
|
|
return true;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
void GstOnnxClient::parseDimensions (GstVideoInfo vinfo)
|
|
|
|
{
|
|
|
|
int32_t newWidth = fixedInputImageSize ? width : vinfo.width;
|
|
|
|
int32_t newHeight = fixedInputImageSize ? height : vinfo.height;
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-08-03 00:43:48 +00:00
|
|
|
if (!fixedInputImageSize) {
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_WARNING_OBJECT (debug_parent, "Allocating before knowing model input size");
|
2023-08-03 00:43:48 +00:00
|
|
|
}
|
|
|
|
|
2021-02-23 16:56:53 +00:00
|
|
|
if (!dest || width * height < newWidth * newHeight) {
|
2023-06-26 14:55:53 +00:00
|
|
|
delete[]dest;
|
2023-08-03 00:43:48 +00:00
|
|
|
dest = new uint8_t[newWidth * newHeight * channels * inputDatatypeSize];
|
2021-02-23 16:56:53 +00:00
|
|
|
}
|
|
|
|
width = newWidth;
|
|
|
|
height = newHeight;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2024-07-19 18:00:45 +00:00
|
|
|
// copy tensor data to a GstTensorMeta
|
|
|
|
GstTensorMeta *
|
|
|
|
GstOnnxClient::copy_tensors_to_meta (std::vector < Ort::Value >
|
2023-06-26 14:55:53 +00:00
|
|
|
&outputs, GstBuffer * buffer)
|
|
|
|
{
|
|
|
|
size_t num_tensors = outputNamesRaw.size ();
|
|
|
|
GstTensorMeta *tmeta = (GstTensorMeta *) gst_buffer_add_meta (buffer,
|
|
|
|
gst_tensor_meta_get_info (),
|
|
|
|
NULL);
|
|
|
|
tmeta->num_tensors = num_tensors;
|
2024-10-17 21:28:24 +00:00
|
|
|
tmeta->tensors = g_new (GstTensor *, num_tensors);
|
2023-06-26 14:55:53 +00:00
|
|
|
bool hasIds = outputIds.size () == num_tensors;
|
|
|
|
for (size_t i = 0; i < num_tensors; i++) {
|
|
|
|
Ort::Value outputTensor = std::move (outputs[i]);
|
|
|
|
|
|
|
|
ONNXTensorElementDataType tensorType =
|
|
|
|
outputTensor.GetTensorTypeAndShapeInfo ().GetElementType ();
|
|
|
|
|
2024-10-17 21:28:24 +00:00
|
|
|
auto tensorShape = outputTensor.GetTensorTypeAndShapeInfo ().GetShape ();
|
|
|
|
GstTensor *tensor = gst_tensor_alloc (tensorShape.size ());
|
|
|
|
tmeta->tensors[i] = tensor;
|
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
if (hasIds)
|
|
|
|
tensor->id = outputIds[i];
|
2024-01-04 19:07:05 +00:00
|
|
|
else
|
2024-07-19 18:00:45 +00:00
|
|
|
tensor->id = 0;
|
2023-06-26 14:55:53 +00:00
|
|
|
tensor->num_dims = tensorShape.size ();
|
2024-07-17 18:39:42 +00:00
|
|
|
tensor->batch_size = 1;
|
2023-06-26 14:55:53 +00:00
|
|
|
|
2024-01-04 19:07:05 +00:00
|
|
|
for (size_t j = 0; j < tensorShape.size (); ++j)
|
2024-09-24 14:53:05 +00:00
|
|
|
tensor->dims[j].size = tensorShape[j];
|
2023-06-26 14:55:53 +00:00
|
|
|
|
|
|
|
size_t numElements =
|
|
|
|
outputTensor.GetTensorTypeAndShapeInfo ().GetElementCount ();
|
2021-02-23 16:56:53 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
if (tensorType == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT) {
|
2024-07-19 18:00:45 +00:00
|
|
|
size_t buffer_size = 0;
|
2023-06-26 14:55:53 +00:00
|
|
|
|
2024-01-04 19:07:05 +00:00
|
|
|
buffer_size = numElements * sizeof (float);
|
2024-07-19 18:00:45 +00:00
|
|
|
tensor->data = gst_buffer_new_allocate (NULL, buffer_size, NULL);
|
|
|
|
gst_buffer_fill (tensor->data, 0, outputTensor.GetTensorData < float >(),
|
2023-06-26 14:55:53 +00:00
|
|
|
buffer_size);
|
2024-02-06 03:27:59 +00:00
|
|
|
tensor->data_type = GST_TENSOR_TYPE_FLOAT32;
|
2023-06-26 14:55:53 +00:00
|
|
|
} else if (tensorType == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32) {
|
2024-07-19 18:00:45 +00:00
|
|
|
size_t buffer_size = 0;
|
2023-06-26 14:55:53 +00:00
|
|
|
|
2024-01-04 19:07:05 +00:00
|
|
|
buffer_size = numElements * sizeof (int);
|
2024-07-19 18:00:45 +00:00
|
|
|
tensor->data = gst_buffer_new_allocate (NULL, buffer_size, NULL);
|
|
|
|
gst_buffer_fill (tensor->data, 0, outputTensor.GetTensorData < float >(),
|
2023-06-26 14:55:53 +00:00
|
|
|
buffer_size);
|
2024-02-06 03:27:59 +00:00
|
|
|
tensor->data_type = GST_TENSOR_TYPE_INT32;
|
2024-01-04 19:07:05 +00:00
|
|
|
} else {
|
2024-07-19 18:00:45 +00:00
|
|
|
GST_ERROR_OBJECT (debug_parent,
|
|
|
|
"Output tensor is not FLOAT32 or INT32, not supported");
|
|
|
|
gst_buffer_remove_meta (buffer, (GstMeta *) tmeta);
|
|
|
|
return NULL;
|
2023-06-26 14:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return tmeta;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector < Ort::Value > GstOnnxClient::run (uint8_t * img_data,
|
2024-07-19 18:00:45 +00:00
|
|
|
GstVideoInfo vinfo)
|
|
|
|
{
|
2023-06-26 14:55:53 +00:00
|
|
|
std::vector < Ort::Value > modelOutput;
|
|
|
|
doRun (img_data, vinfo, modelOutput);
|
|
|
|
|
|
|
|
return modelOutput;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool GstOnnxClient::doRun (uint8_t * img_data, GstVideoInfo vinfo,
|
|
|
|
std::vector < Ort::Value > &modelOutput)
|
|
|
|
{
|
|
|
|
if (!img_data)
|
|
|
|
return false;
|
2021-02-23 16:56:53 +00:00
|
|
|
|
|
|
|
Ort::AllocatorWithDefaultOptions allocator;
|
2022-11-10 13:50:35 +00:00
|
|
|
auto inputName = session->GetInputNameAllocated (0, allocator);
|
2021-02-23 16:56:53 +00:00
|
|
|
auto inputTypeInfo = session->GetInputTypeInfo (0);
|
|
|
|
std::vector < int64_t > inputDims =
|
|
|
|
inputTypeInfo.GetTensorTypeAndShapeInfo ().GetShape ();
|
|
|
|
inputDims[0] = 1;
|
2023-06-26 14:55:53 +00:00
|
|
|
if (inputImageFormat == GST_ML_INPUT_IMAGE_FORMAT_HWC) {
|
2021-02-23 16:56:53 +00:00
|
|
|
inputDims[1] = height;
|
|
|
|
inputDims[2] = width;
|
|
|
|
} else {
|
|
|
|
inputDims[2] = height;
|
|
|
|
inputDims[3] = width;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::ostringstream buffer;
|
|
|
|
buffer << inputDims;
|
2024-01-04 20:07:10 +00:00
|
|
|
GST_DEBUG_OBJECT (debug_parent, "Input dimensions: %s", buffer.str ().c_str ());
|
2021-02-23 16:56:53 +00:00
|
|
|
|
|
|
|
// copy video frame
|
|
|
|
uint8_t *srcPtr[3] = { img_data, img_data + 1, img_data + 2 };
|
|
|
|
uint32_t srcSamplesPerPixel = 3;
|
2023-06-26 14:55:53 +00:00
|
|
|
switch (vinfo.finfo->format) {
|
2021-02-23 16:56:53 +00:00
|
|
|
case GST_VIDEO_FORMAT_RGBA:
|
|
|
|
srcSamplesPerPixel = 4;
|
|
|
|
break;
|
|
|
|
case GST_VIDEO_FORMAT_BGRA:
|
|
|
|
srcSamplesPerPixel = 4;
|
|
|
|
srcPtr[0] = img_data + 2;
|
|
|
|
srcPtr[1] = img_data + 1;
|
|
|
|
srcPtr[2] = img_data + 0;
|
|
|
|
break;
|
|
|
|
case GST_VIDEO_FORMAT_ARGB:
|
|
|
|
srcSamplesPerPixel = 4;
|
|
|
|
srcPtr[0] = img_data + 1;
|
|
|
|
srcPtr[1] = img_data + 2;
|
|
|
|
srcPtr[2] = img_data + 3;
|
|
|
|
break;
|
|
|
|
case GST_VIDEO_FORMAT_ABGR:
|
|
|
|
srcSamplesPerPixel = 4;
|
|
|
|
srcPtr[0] = img_data + 3;
|
|
|
|
srcPtr[1] = img_data + 2;
|
|
|
|
srcPtr[2] = img_data + 1;
|
|
|
|
break;
|
|
|
|
case GST_VIDEO_FORMAT_BGR:
|
|
|
|
srcPtr[0] = img_data + 2;
|
|
|
|
srcPtr[1] = img_data + 1;
|
|
|
|
srcPtr[2] = img_data + 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2023-06-26 14:55:53 +00:00
|
|
|
uint32_t stride = vinfo.stride[0];
|
2023-08-03 00:43:48 +00:00
|
|
|
const size_t inputTensorSize = width * height * channels * inputDatatypeSize;
|
|
|
|
auto memoryInfo =
|
|
|
|
Ort::MemoryInfo::CreateCpu (OrtAllocatorType::OrtArenaAllocator,
|
|
|
|
OrtMemType::OrtMemTypeDefault);
|
|
|
|
|
|
|
|
std::vector < Ort::Value > inputTensors;
|
|
|
|
|
|
|
|
switch (inputDatatype) {
|
2024-01-25 02:31:44 +00:00
|
|
|
case GST_TENSOR_TYPE_UINT8:
|
2024-01-04 20:23:54 +00:00
|
|
|
uint8_t *src_data;
|
|
|
|
if (inputTensorOffset == 00 && inputTensorScale == 1.0) {
|
|
|
|
src_data = img_data;
|
|
|
|
} else {
|
|
|
|
convert_image_remove_alpha (
|
|
|
|
dest, inputImageFormat, srcPtr, srcSamplesPerPixel, stride,
|
|
|
|
(uint8_t)inputTensorOffset, (uint8_t)inputTensorScale);
|
|
|
|
src_data = dest;
|
|
|
|
}
|
|
|
|
|
2023-08-03 00:43:48 +00:00
|
|
|
inputTensors.push_back (Ort::Value::CreateTensor < uint8_t > (
|
2024-01-04 20:23:54 +00:00
|
|
|
memoryInfo, src_data, inputTensorSize, inputDims.data (),
|
2023-08-03 00:43:48 +00:00
|
|
|
inputDims.size ()));
|
|
|
|
break;
|
|
|
|
case GST_TENSOR_TYPE_FLOAT32: {
|
|
|
|
convert_image_remove_alpha ((float*)dest, inputImageFormat , srcPtr,
|
2023-12-05 00:57:39 +00:00
|
|
|
srcSamplesPerPixel, stride, (float)inputTensorOffset, (float)
|
|
|
|
inputTensorScale);
|
2023-08-03 00:43:48 +00:00
|
|
|
inputTensors.push_back (Ort::Value::CreateTensor < float > (
|
|
|
|
memoryInfo, (float*)dest, inputTensorSize, inputDims.data (),
|
|
|
|
inputDims.size ()));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector < const char *>inputNames { inputName.get () };
|
|
|
|
modelOutput = session->Run (Ort::RunOptions {nullptr},
|
|
|
|
inputNames.data (),
|
|
|
|
inputTensors.data (), 1, outputNamesRaw.data (),
|
|
|
|
outputNamesRaw.size ());
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template < typename T>
|
2023-12-05 00:11:23 +00:00
|
|
|
void GstOnnxClient::convert_image_remove_alpha (T *dst,
|
2023-08-03 00:43:48 +00:00
|
|
|
GstMlInputImageFormat hwc, uint8_t **srcPtr, uint32_t srcSamplesPerPixel,
|
2023-12-05 00:57:39 +00:00
|
|
|
uint32_t stride, T offset, T div) {
|
2023-08-03 00:43:48 +00:00
|
|
|
size_t destIndex = 0;
|
2023-12-05 00:57:39 +00:00
|
|
|
T tmp;
|
2023-08-03 00:43:48 +00:00
|
|
|
|
2023-06-26 14:55:53 +00:00
|
|
|
if (inputImageFormat == GST_ML_INPUT_IMAGE_FORMAT_HWC) {
|
2021-02-23 16:56:53 +00:00
|
|
|
for (int32_t j = 0; j < height; ++j) {
|
|
|
|
for (int32_t i = 0; i < width; ++i) {
|
|
|
|
for (int32_t k = 0; k < channels; ++k) {
|
2023-12-05 00:57:39 +00:00
|
|
|
tmp = *srcPtr[k];
|
|
|
|
tmp += offset;
|
|
|
|
dst[destIndex++] = (T)(tmp / div);
|
2021-02-23 16:56:53 +00:00
|
|
|
srcPtr[k] += srcSamplesPerPixel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// correct for stride
|
|
|
|
for (uint32_t k = 0; k < 3; ++k)
|
|
|
|
srcPtr[k] += stride - srcSamplesPerPixel * width;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size_t frameSize = width * height;
|
2023-08-03 00:43:48 +00:00
|
|
|
T *destPtr[3] = { dst, dst + frameSize, dst + 2 * frameSize };
|
2021-02-23 16:56:53 +00:00
|
|
|
for (int32_t j = 0; j < height; ++j) {
|
|
|
|
for (int32_t i = 0; i < width; ++i) {
|
|
|
|
for (int32_t k = 0; k < channels; ++k) {
|
2023-12-05 00:57:39 +00:00
|
|
|
tmp = *srcPtr[k];
|
|
|
|
tmp += offset;
|
|
|
|
destPtr[k][destIndex] = (T)(tmp / div);
|
2021-02-23 16:56:53 +00:00
|
|
|
srcPtr[k] += srcSamplesPerPixel;
|
|
|
|
}
|
|
|
|
destIndex++;
|
|
|
|
}
|
|
|
|
// correct for stride
|
|
|
|
for (uint32_t k = 0; k < 3; ++k)
|
|
|
|
srcPtr[k] += stride - srcSamplesPerPixel * width;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|