mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2025-02-17 03:35:21 +00:00
cuda: Port to C++
Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/3884>
This commit is contained in:
parent
a37b3ec710
commit
f212bd901b
10 changed files with 337 additions and 348 deletions
|
@ -131,7 +131,7 @@ if build_gir
|
|||
{'name': 'audio', 'gir': audio_gir, 'lib': gstbadaudio_dep, 'prefix': 'bad-'},
|
||||
{'name': 'transcoder', 'gir': transcoder_gir, 'lib': gst_transcoder_dep},
|
||||
{'name': 'codecs', 'gir': codecs_gir, 'lib': gstcodecs_dep},
|
||||
{'name': 'cuda', 'gir': gst_cuda_gir, 'lib': gstcuda_dep},
|
||||
{'name': 'cuda', 'gir': gst_cuda_gir, 'lib': gstcuda_dep, 'c_source_patterns': ['*.h', '*.cpp']},
|
||||
]
|
||||
|
||||
if gstopencv_dep.found()
|
||||
|
|
|
@ -52,3 +52,14 @@ gboolean gst_cuda_buffer_copy (GstBuffer * dst,
|
|||
|
||||
G_END_DECLS
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <mutex>
|
||||
|
||||
#define GST_CUDA_CALL_ONCE_BEGIN \
|
||||
static std::once_flag __once_flag; \
|
||||
std::call_once (__once_flag, [&]()
|
||||
|
||||
#define GST_CUDA_CALL_ONCE_END )
|
||||
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ G_DEFINE_TYPE_WITH_PRIVATE (GstCudaBufferPool, gst_cuda_buffer_pool,
|
|||
static const gchar **
|
||||
gst_cuda_buffer_pool_get_options (GstBufferPool * pool)
|
||||
{
|
||||
static const gchar *options[] = { GST_BUFFER_POOL_OPTION_VIDEO_META, NULL
|
||||
static const gchar *options[] = { GST_BUFFER_POOL_OPTION_VIDEO_META, nullptr
|
||||
};
|
||||
|
||||
return options;
|
||||
|
@ -53,7 +53,7 @@ gst_cuda_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
|
|||
{
|
||||
GstCudaBufferPool *self = GST_CUDA_BUFFER_POOL (pool);
|
||||
GstCudaBufferPoolPrivate *priv = self->priv;
|
||||
GstCaps *caps = NULL;
|
||||
GstCaps *caps = nullptr;
|
||||
guint size, min_buffers, max_buffers;
|
||||
GstVideoInfo info;
|
||||
GstMemory *mem;
|
||||
|
@ -89,7 +89,7 @@ gst_cuda_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
mem = gst_cuda_allocator_alloc (NULL, self->context, NULL, &info);
|
||||
mem = gst_cuda_allocator_alloc (nullptr, self->context, nullptr, &info);
|
||||
if (!mem) {
|
||||
GST_WARNING_OBJECT (self, "Failed to allocate memory");
|
||||
return FALSE;
|
||||
|
@ -180,12 +180,13 @@ gst_cuda_buffer_pool_new (GstCudaContext * context)
|
|||
{
|
||||
GstCudaBufferPool *self;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), nullptr);
|
||||
|
||||
self = g_object_new (GST_TYPE_CUDA_BUFFER_POOL, NULL);
|
||||
self = (GstCudaBufferPool *)
|
||||
g_object_new (GST_TYPE_CUDA_BUFFER_POOL, nullptr);
|
||||
gst_object_ref_sink (self);
|
||||
|
||||
self->context = gst_object_ref (context);
|
||||
self->context = (GstCudaContext *) gst_object_ref (context);
|
||||
|
||||
return GST_BUFFER_POOL_CAST (self);
|
||||
}
|
||||
|
@ -202,12 +203,12 @@ gst_cuda_buffer_pool_new (GstCudaContext * context)
|
|||
GstCudaStream *
|
||||
gst_buffer_pool_config_get_cuda_stream (GstStructure * config)
|
||||
{
|
||||
GstCudaStream *stream = NULL;
|
||||
GstCudaStream *stream = nullptr;
|
||||
|
||||
g_return_val_if_fail (config, NULL);
|
||||
g_return_val_if_fail (config, nullptr);
|
||||
|
||||
gst_structure_get (config, "cuda-stream", GST_TYPE_CUDA_STREAM, &stream,
|
||||
NULL);
|
||||
nullptr);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
@ -228,7 +229,8 @@ gst_buffer_pool_config_set_cuda_stream (GstStructure * config,
|
|||
g_return_if_fail (config);
|
||||
g_return_if_fail (GST_IS_CUDA_STREAM (stream));
|
||||
|
||||
gst_structure_set (config, "cuda-stream", GST_TYPE_CUDA_STREAM, stream, NULL);
|
||||
gst_structure_set (config,
|
||||
"cuda-stream", GST_TYPE_CUDA_STREAM, stream, nullptr);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -269,5 +271,6 @@ gst_cuda_buffer_pool_class_init (GstCudaBufferPoolClass * klass)
|
|||
static void
|
||||
gst_cuda_buffer_pool_init (GstCudaBufferPool * pool)
|
||||
{
|
||||
pool->priv = gst_cuda_buffer_pool_get_instance_private (pool);
|
||||
pool->priv = (GstCudaBufferPoolPrivate *)
|
||||
gst_cuda_buffer_pool_get_instance_private (pool);
|
||||
}
|
|
@ -25,17 +25,26 @@
|
|||
#include "gstcudacontext.h"
|
||||
#include "gstcudautils.h"
|
||||
#include "gstcudamemory.h"
|
||||
#include "gstcuda-private.h"
|
||||
|
||||
#ifdef GST_CUDA_HAS_D3D
|
||||
#include <gst/d3d11/gstd3d11.h>
|
||||
#include <wrl.h>
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
using namespace Microsoft::WRL;
|
||||
/* *INDENT-ON* */
|
||||
#endif
|
||||
|
||||
GST_DEBUG_CATEGORY_STATIC (gst_cuda_context_debug);
|
||||
#define GST_CAT_DEFAULT gst_cuda_context_debug
|
||||
|
||||
/* store all context object with weak ref */
|
||||
static GList *context_list = NULL;
|
||||
G_LOCK_DEFINE_STATIC (list_lock);
|
||||
static GList *context_list = nullptr;
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
static std::mutex list_lock;
|
||||
/* *INDENT-ON* */
|
||||
|
||||
enum
|
||||
{
|
||||
|
@ -90,15 +99,16 @@ gst_cuda_context_class_init (GstCudaContextClass * klass)
|
|||
g_param_spec_uint ("cuda-device-id", "Cuda Device ID",
|
||||
"Set the GPU device to use for operations",
|
||||
0, G_MAXUINT, 0,
|
||||
G_PARAM_CONSTRUCT_ONLY | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
||||
(GParamFlags) (G_PARAM_CONSTRUCT_ONLY | G_PARAM_READWRITE |
|
||||
G_PARAM_STATIC_STRINGS)));
|
||||
|
||||
#ifdef GST_CUDA_HAS_D3D
|
||||
g_object_class_install_property (gobject_class, PROP_DXGI_ADAPTER_LUID,
|
||||
g_param_spec_int64 ("dxgi-adapter-luid", "DXGI Adapter LUID",
|
||||
"Associated DXGI Adapter LUID (Locally Unique Identifier) ",
|
||||
G_MININT64, G_MAXINT64, 0,
|
||||
GST_PARAM_CONDITIONALLY_AVAILABLE | G_PARAM_READABLE |
|
||||
G_PARAM_STATIC_STRINGS));
|
||||
(GParamFlags) (GST_PARAM_CONDITIONALLY_AVAILABLE | G_PARAM_READABLE |
|
||||
G_PARAM_STATIC_STRINGS)));
|
||||
#endif
|
||||
|
||||
gst_cuda_memory_init_once ();
|
||||
|
@ -107,7 +117,8 @@ gst_cuda_context_class_init (GstCudaContextClass * klass)
|
|||
static void
|
||||
gst_cuda_context_init (GstCudaContext * context)
|
||||
{
|
||||
GstCudaContextPrivate *priv = gst_cuda_context_get_instance_private (context);
|
||||
GstCudaContextPrivate *priv = (GstCudaContextPrivate *)
|
||||
gst_cuda_context_get_instance_private (context);
|
||||
|
||||
priv->accessible_peer = g_hash_table_new (g_direct_hash, g_direct_equal);
|
||||
|
||||
|
@ -157,68 +168,59 @@ gst_cuda_context_find_dxgi_adapter_luid (CUdevice cuda_device)
|
|||
{
|
||||
gint64 ret = 0;
|
||||
HRESULT hr;
|
||||
IDXGIFactory1 *factory = NULL;
|
||||
ComPtr < IDXGIFactory1 > factory;
|
||||
guint i;
|
||||
|
||||
hr = CreateDXGIFactory1 (&IID_IDXGIFactory1, (void **) &factory);
|
||||
hr = CreateDXGIFactory1 (IID_PPV_ARGS (&factory));
|
||||
if (FAILED (hr))
|
||||
return 0;
|
||||
|
||||
for (i = 0;; i++) {
|
||||
IDXGIAdapter1 *adapter;
|
||||
ComPtr < IDXGIAdapter1 > adapter;
|
||||
DXGI_ADAPTER_DESC desc;
|
||||
CUdevice other_dev = 0;
|
||||
CUresult cuda_ret;
|
||||
|
||||
hr = IDXGIFactory1_EnumAdapters1 (factory, i, &adapter);
|
||||
hr = factory->EnumAdapters1 (i, &adapter);
|
||||
if (FAILED (hr))
|
||||
break;
|
||||
|
||||
hr = IDXGIAdapter1_GetDesc (adapter, &desc);
|
||||
if (FAILED (hr)) {
|
||||
IDXGIAdapter1_Release (adapter);
|
||||
hr = adapter->GetDesc (&desc);
|
||||
if (FAILED (hr))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (desc.VendorId != 0x10de) {
|
||||
IDXGIAdapter1_Release (adapter);
|
||||
if (desc.VendorId != 0x10de)
|
||||
continue;
|
||||
}
|
||||
|
||||
cuda_ret = CuD3D11GetDevice (&other_dev, (IDXGIAdapter *) adapter);
|
||||
IDXGIAdapter1_Release (adapter);
|
||||
|
||||
cuda_ret = CuD3D11GetDevice (&other_dev, adapter.Get ());
|
||||
if (cuda_ret == CUDA_SUCCESS && other_dev == cuda_device) {
|
||||
ret = gst_d3d11_luid_to_int64 (&desc.AdapterLuid);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
IDXGIFactory1_Release (factory);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
static gboolean
|
||||
init_cuda_ctx (void)
|
||||
{
|
||||
gboolean ret = TRUE;
|
||||
static gboolean ret = TRUE;
|
||||
|
||||
static gsize once = 0;
|
||||
|
||||
if (g_once_init_enter (&once)) {
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
if (CuInit (0) != CUDA_SUCCESS) {
|
||||
GST_ERROR ("Failed to cuInit");
|
||||
ret = FALSE;
|
||||
}
|
||||
GST_DEBUG_CATEGORY_INIT (gst_cuda_context_debug,
|
||||
"cudacontext", 0, "CUDA Context");
|
||||
g_once_init_leave (&once, ret);
|
||||
}
|
||||
GST_CUDA_CALL_ONCE_END;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
static gboolean
|
||||
gst_create_cucontext (guint * device_id, CUcontext * context)
|
||||
{
|
||||
|
@ -229,7 +231,6 @@ gst_create_cucontext (guint * device_id, CUcontext * context)
|
|||
gint min = 0, maj = 0;
|
||||
gint i;
|
||||
|
||||
|
||||
if (!init_cuda_ctx ())
|
||||
return FALSE;
|
||||
|
||||
|
@ -247,7 +248,7 @@ gst_create_cucontext (guint * device_id, CUcontext * context)
|
|||
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cdev))) {
|
||||
GST_INFO ("GPU #%d supports NVENC: %s (%s) (Compute SM %d.%d)", i,
|
||||
(((maj << 4) + min) >= 0x30) ? "yes" : "no", name, maj, min);
|
||||
if (*device_id == -1 || *device_id == cdev) {
|
||||
if (*device_id == (guint) -1 || *device_id == (guint) cdev) {
|
||||
*device_id = cuda_dev = cdev;
|
||||
break;
|
||||
}
|
||||
|
@ -275,6 +276,7 @@ gst_create_cucontext (guint * device_id, CUcontext * context)
|
|||
|
||||
return TRUE;
|
||||
}
|
||||
/* *INDENT-ON* */
|
||||
|
||||
/* must be called with list_lock taken */
|
||||
static void
|
||||
|
@ -302,7 +304,7 @@ gst_cuda_context_enable_peer_access (GstCudaContext * context,
|
|||
g_hash_table_add (priv->accessible_peer, peer);
|
||||
}
|
||||
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -310,7 +312,7 @@ gst_cuda_context_weak_ref_notify (gpointer data, GstCudaContext * context)
|
|||
{
|
||||
GList *iter;
|
||||
|
||||
G_LOCK (list_lock);
|
||||
std::lock_guard < std::mutex > lk (list_lock);
|
||||
context_list = g_list_remove (context_list, context);
|
||||
|
||||
/* disable self -> peer access */
|
||||
|
@ -319,18 +321,18 @@ gst_cuda_context_weak_ref_notify (gpointer data, GstCudaContext * context)
|
|||
gpointer key;
|
||||
g_hash_table_iter_init (&iter, context->priv->accessible_peer);
|
||||
if (gst_cuda_context_push (context)) {
|
||||
while (g_hash_table_iter_next (&iter, &key, NULL)) {
|
||||
while (g_hash_table_iter_next (&iter, &key, nullptr)) {
|
||||
GstCudaContext *peer = GST_CUDA_CONTEXT (key);
|
||||
CUcontext peer_handle = gst_cuda_context_get_handle (peer);
|
||||
GST_DEBUG_OBJECT (context,
|
||||
"Disable peer access to %" GST_PTR_FORMAT, peer);
|
||||
gst_cuda_result (CuCtxDisablePeerAccess (peer_handle));
|
||||
}
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
|
||||
g_hash_table_destroy (context->priv->accessible_peer);
|
||||
context->priv->accessible_peer = NULL;
|
||||
context->priv->accessible_peer = nullptr;
|
||||
}
|
||||
|
||||
/* disable peer -> self access */
|
||||
|
@ -348,13 +350,12 @@ gst_cuda_context_weak_ref_notify (gpointer data, GstCudaContext * context)
|
|||
GST_DEBUG_OBJECT (other,
|
||||
"Disable peer access to %" GST_PTR_FORMAT, context);
|
||||
gst_cuda_result (CuCtxDisablePeerAccess (self_handle));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
|
||||
g_hash_table_remove (other_priv->accessible_peer, context);
|
||||
}
|
||||
}
|
||||
G_UNLOCK (list_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -390,13 +391,13 @@ gst_cuda_context_new (guint device_id)
|
|||
GstCudaContext *self;
|
||||
|
||||
if (!gst_create_cucontext (&device_id, &ctx)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
self = gst_cuda_context_new_wrapped (ctx, device_id);
|
||||
if (!self) {
|
||||
CuCtxDestroy (ctx);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
self->priv->owns_context = TRUE;
|
||||
|
@ -405,7 +406,7 @@ gst_cuda_context_new (guint device_id)
|
|||
GST_ERROR ("Could not pop current context");
|
||||
g_object_unref (self);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return self;
|
||||
|
@ -461,8 +462,8 @@ gst_cuda_context_pop (CUcontext * cuda_ctx)
|
|||
gpointer
|
||||
gst_cuda_context_get_handle (GstCudaContext * ctx)
|
||||
{
|
||||
g_return_val_if_fail (ctx, NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (ctx), NULL);
|
||||
g_return_val_if_fail (ctx, nullptr);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (ctx), nullptr);
|
||||
|
||||
return ctx->priv->context;
|
||||
}
|
||||
|
@ -505,12 +506,11 @@ gst_cuda_context_can_access_peer (GstCudaContext * ctx, GstCudaContext * peer)
|
|||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (ctx), FALSE);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (peer), FALSE);
|
||||
|
||||
G_LOCK (list_lock);
|
||||
std::lock_guard < std::mutex > lk (list_lock);
|
||||
if (ctx->priv->accessible_peer &&
|
||||
g_hash_table_lookup (ctx->priv->accessible_peer, peer)) {
|
||||
ret = TRUE;
|
||||
}
|
||||
G_UNLOCK (list_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -541,20 +541,21 @@ gst_cuda_context_new_wrapped (CUcontext handler, CUdevice device)
|
|||
|
||||
GstCudaContext *self;
|
||||
|
||||
g_return_val_if_fail (handler, NULL);
|
||||
g_return_val_if_fail (device >= 0, NULL);
|
||||
g_return_val_if_fail (handler, nullptr);
|
||||
g_return_val_if_fail (device >= 0, nullptr);
|
||||
|
||||
if (!init_cuda_ctx ())
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
if (!gst_cuda_result (CuDeviceGetAttribute (&tex_align,
|
||||
CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, device))) {
|
||||
GST_ERROR ("Could not get texture alignment for %d", device);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
self = g_object_new (GST_TYPE_CUDA_CONTEXT, "cuda-device-id", device, NULL);
|
||||
self = (GstCudaContext *)
|
||||
g_object_new (GST_TYPE_CUDA_CONTEXT, "cuda-device-id", device, nullptr);
|
||||
self->priv->context = handler;
|
||||
self->priv->device = device;
|
||||
self->priv->tex_align = tex_align;
|
||||
|
@ -566,9 +567,9 @@ gst_cuda_context_new_wrapped (CUcontext handler, CUdevice device)
|
|||
#endif
|
||||
|
||||
|
||||
G_LOCK (list_lock);
|
||||
std::lock_guard < std::mutex > lk (list_lock);
|
||||
g_object_weak_ref (G_OBJECT (self),
|
||||
(GWeakNotify) gst_cuda_context_weak_ref_notify, NULL);
|
||||
(GWeakNotify) gst_cuda_context_weak_ref_notify, nullptr);
|
||||
for (iter = context_list; iter; iter = g_list_next (iter)) {
|
||||
GstCudaContext *peer = (GstCudaContext *) iter->data;
|
||||
|
||||
|
@ -578,7 +579,6 @@ gst_cuda_context_new_wrapped (CUcontext handler, CUdevice device)
|
|||
}
|
||||
|
||||
context_list = g_list_append (context_list, self);
|
||||
G_UNLOCK (list_lock);
|
||||
|
||||
return self;
|
||||
}
|
|
@ -24,6 +24,7 @@
|
|||
#include "cuda-gst.h"
|
||||
#include "gstcudaloader.h"
|
||||
#include <gmodule.h>
|
||||
#include "gstcuda-private.h"
|
||||
|
||||
GST_DEBUG_CATEGORY (gst_cudaloader_debug);
|
||||
#define GST_CAT_DEFAULT gst_cudaloader_debug
|
||||
|
@ -37,7 +38,8 @@ GST_DEBUG_CATEGORY (gst_cudaloader_debug);
|
|||
#define LOAD_SYMBOL(name,func) G_STMT_START { \
|
||||
if (!g_module_symbol (module, G_STRINGIFY (name), (gpointer *) &vtable->func)) { \
|
||||
GST_ERROR ("Failed to load '%s' from %s, %s", G_STRINGIFY (name), filename, g_module_error()); \
|
||||
goto error; \
|
||||
g_module_close (module); \
|
||||
return; \
|
||||
} \
|
||||
} G_STMT_END;
|
||||
|
||||
|
@ -137,37 +139,17 @@ typedef struct _GstNvCodecCudaVTable
|
|||
|
||||
static GstNvCodecCudaVTable gst_cuda_vtable = { 0, };
|
||||
|
||||
/**
|
||||
* gst_cuda_load_library:
|
||||
*
|
||||
* Loads the cuda library
|
||||
*
|
||||
* Returns: %TRUE if the libcuda could be loaded %FALSE otherwise
|
||||
*
|
||||
* Since: 1.22
|
||||
*/
|
||||
gboolean
|
||||
gst_cuda_load_library (void)
|
||||
static void
|
||||
gst_cuda_load_library_once_func (void)
|
||||
{
|
||||
GModule *module;
|
||||
const gchar *filename = CUDA_LIBNAME;
|
||||
GstNvCodecCudaVTable *vtable;
|
||||
static gsize debug_initialized = FALSE;
|
||||
|
||||
if (g_once_init_enter (&debug_initialized)) {
|
||||
GST_DEBUG_CATEGORY_INIT (gst_cudaloader_debug, "cudaloader", 0,
|
||||
"cudaloader");
|
||||
|
||||
g_once_init_leave (&debug_initialized, TRUE);
|
||||
}
|
||||
|
||||
if (gst_cuda_vtable.loaded)
|
||||
return TRUE;
|
||||
|
||||
module = g_module_open (filename, G_MODULE_BIND_LAZY);
|
||||
if (module == NULL) {
|
||||
if (module == nullptr) {
|
||||
GST_WARNING ("Could not open library %s, %s", filename, g_module_error ());
|
||||
return FALSE;
|
||||
return;
|
||||
}
|
||||
|
||||
vtable = &gst_cuda_vtable;
|
||||
|
@ -234,19 +216,31 @@ gst_cuda_load_library (void)
|
|||
#endif
|
||||
|
||||
vtable->loaded = TRUE;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
/**
|
||||
* gst_cuda_load_library:
|
||||
*
|
||||
* Loads the cuda library
|
||||
*
|
||||
* Returns: %TRUE if the libcuda could be loaded %FALSE otherwise
|
||||
*
|
||||
* Since: 1.22
|
||||
*/
|
||||
gboolean
|
||||
gst_cuda_load_library (void)
|
||||
{
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
gst_cuda_load_library_once_func ();
|
||||
} GST_CUDA_CALL_ONCE_END;
|
||||
|
||||
error:
|
||||
g_module_close (module);
|
||||
|
||||
return FALSE;
|
||||
return gst_cuda_vtable.loaded;
|
||||
}
|
||||
|
||||
CUresult CUDAAPI
|
||||
CuInit (unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuInit != NULL);
|
||||
g_assert (gst_cuda_vtable.CuInit != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuInit (Flags);
|
||||
}
|
||||
|
@ -254,7 +248,7 @@ CuInit (unsigned int Flags)
|
|||
CUresult CUDAAPI
|
||||
CuGetErrorName (CUresult error, const char **pStr)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGetErrorName != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGetErrorName != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGetErrorName (error, pStr);
|
||||
}
|
||||
|
@ -262,7 +256,7 @@ CuGetErrorName (CUresult error, const char **pStr)
|
|||
CUresult CUDAAPI
|
||||
CuGetErrorString (CUresult error, const char **pStr)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGetErrorString != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGetErrorString != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGetErrorString (error, pStr);
|
||||
}
|
||||
|
@ -270,7 +264,7 @@ CuGetErrorString (CUresult error, const char **pStr)
|
|||
CUresult CUDAAPI
|
||||
CuCtxCreate (CUcontext * pctx, unsigned int flags, CUdevice dev)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxCreate != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxCreate != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxCreate (pctx, flags, dev);
|
||||
}
|
||||
|
@ -278,7 +272,7 @@ CuCtxCreate (CUcontext * pctx, unsigned int flags, CUdevice dev)
|
|||
CUresult CUDAAPI
|
||||
CuCtxDestroy (CUcontext ctx)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxDestroy != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxDestroy != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxDestroy (ctx);
|
||||
}
|
||||
|
@ -286,7 +280,7 @@ CuCtxDestroy (CUcontext ctx)
|
|||
CUresult CUDAAPI
|
||||
CuCtxPopCurrent (CUcontext * pctx)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxPopCurrent != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxPopCurrent != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxPopCurrent (pctx);
|
||||
}
|
||||
|
@ -294,7 +288,7 @@ CuCtxPopCurrent (CUcontext * pctx)
|
|||
CUresult CUDAAPI
|
||||
CuCtxPushCurrent (CUcontext ctx)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxPushCurrent != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxPushCurrent != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxPushCurrent (ctx);
|
||||
}
|
||||
|
@ -302,7 +296,7 @@ CuCtxPushCurrent (CUcontext ctx)
|
|||
CUresult CUDAAPI
|
||||
CuCtxEnablePeerAccess (CUcontext peerContext, unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxEnablePeerAccess != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxEnablePeerAccess != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxEnablePeerAccess (peerContext, Flags);
|
||||
}
|
||||
|
@ -310,7 +304,7 @@ CuCtxEnablePeerAccess (CUcontext peerContext, unsigned int Flags)
|
|||
CUresult CUDAAPI
|
||||
CuCtxDisablePeerAccess (CUcontext peerContext)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuCtxDisablePeerAccess != NULL);
|
||||
g_assert (gst_cuda_vtable.CuCtxDisablePeerAccess != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuCtxDisablePeerAccess (peerContext);
|
||||
}
|
||||
|
@ -319,7 +313,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsMapResources (unsigned int count, CUgraphicsResource * resources,
|
||||
CUstream hStream)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsMapResources != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsMapResources != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsMapResources (count, resources, hStream);
|
||||
}
|
||||
|
@ -328,7 +322,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsUnmapResources (unsigned int count, CUgraphicsResource * resources,
|
||||
CUstream hStream)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsUnmapResources != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsUnmapResources != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsUnmapResources (count, resources, hStream);
|
||||
}
|
||||
|
@ -336,7 +330,7 @@ CuGraphicsUnmapResources (unsigned int count, CUgraphicsResource * resources,
|
|||
CUresult CUDAAPI
|
||||
CuGraphicsResourceSetMapFlags (CUgraphicsResource resource, unsigned int flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsResourceSetMapFlags != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsResourceSetMapFlags != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsResourceSetMapFlags (resource, flags);
|
||||
}
|
||||
|
@ -345,7 +339,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsSubResourceGetMappedArray (CUarray * pArray,
|
||||
CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsSubResourceGetMappedArray != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsSubResourceGetMappedArray != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsSubResourceGetMappedArray (pArray, resource,
|
||||
arrayIndex, mipLevel);
|
||||
|
@ -356,7 +350,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsResourceGetMappedPointer (CUdeviceptr * pDevPtr, size_t * pSize,
|
||||
CUgraphicsResource resource)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsResourceGetMappedPointer != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsResourceGetMappedPointer != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsResourceGetMappedPointer (pDevPtr, pSize,
|
||||
resource);
|
||||
|
@ -366,7 +360,7 @@ CuGraphicsResourceGetMappedPointer (CUdeviceptr * pDevPtr, size_t * pSize,
|
|||
CUresult CUDAAPI
|
||||
CuGraphicsUnregisterResource (CUgraphicsResource resource)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsUnregisterResource != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsUnregisterResource != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsUnregisterResource (resource);
|
||||
}
|
||||
|
@ -374,7 +368,7 @@ CuGraphicsUnregisterResource (CUgraphicsResource resource)
|
|||
CUresult CUDAAPI
|
||||
CuMemAlloc (CUdeviceptr * dptr, unsigned int bytesize)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemAlloc != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemAlloc != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemAlloc (dptr, bytesize);
|
||||
}
|
||||
|
@ -384,7 +378,7 @@ CUresult CUDAAPI
|
|||
CuMemAllocPitch (CUdeviceptr * dptr, size_t * pPitch, size_t WidthInBytes,
|
||||
size_t Height, unsigned int ElementSizeBytes)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemAllocPitch != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemAllocPitch != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemAllocPitch (dptr, pPitch, WidthInBytes, Height,
|
||||
ElementSizeBytes);
|
||||
|
@ -394,7 +388,7 @@ CuMemAllocPitch (CUdeviceptr * dptr, size_t * pPitch, size_t WidthInBytes,
|
|||
CUresult CUDAAPI
|
||||
CuMemAllocHost (void **pp, unsigned int bytesize)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemAllocHost != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemAllocHost != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemAllocHost (pp, bytesize);
|
||||
}
|
||||
|
@ -402,7 +396,7 @@ CuMemAllocHost (void **pp, unsigned int bytesize)
|
|||
CUresult CUDAAPI
|
||||
CuMemcpy2D (const CUDA_MEMCPY2D * pCopy)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemcpy2D != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemcpy2D != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemcpy2D (pCopy);
|
||||
}
|
||||
|
@ -410,7 +404,7 @@ CuMemcpy2D (const CUDA_MEMCPY2D * pCopy)
|
|||
CUresult CUDAAPI
|
||||
CuMemcpy2DAsync (const CUDA_MEMCPY2D * pCopy, CUstream hStream)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemcpy2DAsync != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemcpy2DAsync != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemcpy2DAsync (pCopy, hStream);
|
||||
}
|
||||
|
@ -418,7 +412,7 @@ CuMemcpy2DAsync (const CUDA_MEMCPY2D * pCopy, CUstream hStream)
|
|||
CUresult CUDAAPI
|
||||
CuMemFree (CUdeviceptr dptr)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemFree != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemFree != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemFree (dptr);
|
||||
}
|
||||
|
@ -426,7 +420,7 @@ CuMemFree (CUdeviceptr dptr)
|
|||
CUresult CUDAAPI
|
||||
CuMemFreeHost (void *p)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuMemFreeHost != NULL);
|
||||
g_assert (gst_cuda_vtable.CuMemFreeHost != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuMemFreeHost (p);
|
||||
}
|
||||
|
@ -434,7 +428,7 @@ CuMemFreeHost (void *p)
|
|||
CUresult CUDAAPI
|
||||
CuStreamCreate (CUstream * phStream, unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuStreamCreate != NULL);
|
||||
g_assert (gst_cuda_vtable.CuStreamCreate != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuStreamCreate (phStream, Flags);
|
||||
}
|
||||
|
@ -442,7 +436,7 @@ CuStreamCreate (CUstream * phStream, unsigned int Flags)
|
|||
CUresult CUDAAPI
|
||||
CuStreamDestroy (CUstream hStream)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuStreamDestroy != NULL);
|
||||
g_assert (gst_cuda_vtable.CuStreamDestroy != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuStreamDestroy (hStream);
|
||||
}
|
||||
|
@ -450,7 +444,7 @@ CuStreamDestroy (CUstream hStream)
|
|||
CUresult CUDAAPI
|
||||
CuStreamSynchronize (CUstream hStream)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuStreamSynchronize != NULL);
|
||||
g_assert (gst_cuda_vtable.CuStreamSynchronize != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuStreamSynchronize (hStream);
|
||||
}
|
||||
|
@ -458,7 +452,7 @@ CuStreamSynchronize (CUstream hStream)
|
|||
CUresult CUDAAPI
|
||||
CuDeviceGet (CUdevice * device, int ordinal)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDeviceGet != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDeviceGet != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDeviceGet (device, ordinal);
|
||||
}
|
||||
|
@ -466,7 +460,7 @@ CuDeviceGet (CUdevice * device, int ordinal)
|
|||
CUresult CUDAAPI
|
||||
CuDeviceGetCount (int *count)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetCount != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetCount != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDeviceGetCount (count);
|
||||
}
|
||||
|
@ -474,7 +468,7 @@ CuDeviceGetCount (int *count)
|
|||
CUresult CUDAAPI
|
||||
CuDeviceGetName (char *name, int len, CUdevice dev)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetName != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetName != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDeviceGetName (name, len, dev);
|
||||
}
|
||||
|
@ -482,7 +476,7 @@ CuDeviceGetName (char *name, int len, CUdevice dev)
|
|||
CUresult CUDAAPI
|
||||
CuDeviceGetAttribute (int *pi, CUdevice_attribute attrib, CUdevice dev)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetAttribute != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDeviceGetAttribute != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDeviceGetAttribute (pi, attrib, dev);
|
||||
}
|
||||
|
@ -490,7 +484,7 @@ CuDeviceGetAttribute (int *pi, CUdevice_attribute attrib, CUdevice dev)
|
|||
CUresult CUDAAPI
|
||||
CuDeviceCanAccessPeer (int *canAccessPeer, CUdevice dev, CUdevice peerDev)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDeviceCanAccessPeer != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDeviceCanAccessPeer != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDeviceCanAccessPeer (canAccessPeer, dev, peerDev);
|
||||
}
|
||||
|
@ -498,7 +492,7 @@ CuDeviceCanAccessPeer (int *canAccessPeer, CUdevice dev, CUdevice peerDev)
|
|||
CUresult CUDAAPI
|
||||
CuDriverGetVersion (int *driverVersion)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuDriverGetVersion != NULL);
|
||||
g_assert (gst_cuda_vtable.CuDriverGetVersion != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuDriverGetVersion (driverVersion);
|
||||
}
|
||||
|
@ -506,7 +500,7 @@ CuDriverGetVersion (int *driverVersion)
|
|||
CUresult CUDAAPI
|
||||
CuModuleLoadData (CUmodule * module, const void *image)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuModuleLoadData != NULL);
|
||||
g_assert (gst_cuda_vtable.CuModuleLoadData != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuModuleLoadData (module, image);
|
||||
}
|
||||
|
@ -514,7 +508,7 @@ CuModuleLoadData (CUmodule * module, const void *image)
|
|||
CUresult CUDAAPI
|
||||
CuModuleUnload (CUmodule module)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuModuleUnload != NULL);
|
||||
g_assert (gst_cuda_vtable.CuModuleUnload != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuModuleUnload (module);
|
||||
}
|
||||
|
@ -522,7 +516,7 @@ CuModuleUnload (CUmodule module)
|
|||
CUresult CUDAAPI
|
||||
CuModuleGetFunction (CUfunction * hfunc, CUmodule hmod, const char *name)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuModuleGetFunction != NULL);
|
||||
g_assert (gst_cuda_vtable.CuModuleGetFunction != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuModuleGetFunction (hfunc, hmod, name);
|
||||
}
|
||||
|
@ -532,7 +526,7 @@ CuTexObjectCreate (CUtexObject * pTexObject,
|
|||
const CUDA_RESOURCE_DESC * pResDesc, const CUDA_TEXTURE_DESC * pTexDesc,
|
||||
const CUDA_RESOURCE_VIEW_DESC * pResViewDesc)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuTexObjectCreate != NULL);
|
||||
g_assert (gst_cuda_vtable.CuTexObjectCreate != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuTexObjectCreate (pTexObject, pResDesc, pTexDesc,
|
||||
pResViewDesc);
|
||||
|
@ -541,7 +535,7 @@ CuTexObjectCreate (CUtexObject * pTexObject,
|
|||
CUresult CUDAAPI
|
||||
CuTexObjectDestroy (CUtexObject texObject)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuTexObjectDestroy != NULL);
|
||||
g_assert (gst_cuda_vtable.CuTexObjectDestroy != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuTexObjectDestroy (texObject);
|
||||
}
|
||||
|
@ -553,7 +547,7 @@ CuLaunchKernel (CUfunction f, unsigned int gridDimX,
|
|||
unsigned int sharedMemBytes, CUstream hStream, void **kernelParams,
|
||||
void **extra)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuLaunchKernel != NULL);
|
||||
g_assert (gst_cuda_vtable.CuLaunchKernel != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuLaunchKernel (f, gridDimX, gridDimY, gridDimZ,
|
||||
blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams,
|
||||
|
@ -565,7 +559,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsGLRegisterImage (CUgraphicsResource * pCudaResource,
|
||||
unsigned int image, unsigned int target, unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsGLRegisterImage != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsGLRegisterImage != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsGLRegisterImage (pCudaResource, image,
|
||||
target, Flags);
|
||||
|
@ -575,7 +569,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsGLRegisterBuffer (CUgraphicsResource * pCudaResource,
|
||||
unsigned int buffer, unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsGLRegisterBuffer != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsGLRegisterBuffer != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsGLRegisterBuffer (pCudaResource, buffer,
|
||||
Flags);
|
||||
|
@ -585,7 +579,7 @@ CUresult CUDAAPI
|
|||
CuGLGetDevices (unsigned int *pCudaDeviceCount, CUdevice * pCudaDevices,
|
||||
unsigned int cudaDeviceCount, CUGLDeviceList deviceList)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGLGetDevices != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGLGetDevices != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGLGetDevices (pCudaDeviceCount, pCudaDevices,
|
||||
cudaDeviceCount, deviceList);
|
||||
|
@ -597,7 +591,7 @@ CUresult CUDAAPI
|
|||
CuGraphicsD3D11RegisterResource (CUgraphicsResource * pCudaResource,
|
||||
ID3D11Resource * pD3DResource, unsigned int Flags)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuGraphicsD3D11RegisterResource != NULL);
|
||||
g_assert (gst_cuda_vtable.CuGraphicsD3D11RegisterResource != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuGraphicsD3D11RegisterResource (pCudaResource,
|
||||
pD3DResource, Flags);
|
||||
|
@ -606,7 +600,7 @@ CuGraphicsD3D11RegisterResource (CUgraphicsResource * pCudaResource,
|
|||
CUresult CUDAAPI
|
||||
CuD3D11GetDevice (CUdevice * device, IDXGIAdapter * pAdapter)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuD3D11GetDevice != NULL);
|
||||
g_assert (gst_cuda_vtable.CuD3D11GetDevice != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuD3D11GetDevice (device, pAdapter);
|
||||
}
|
||||
|
@ -616,7 +610,7 @@ CuD3D11GetDevices (unsigned int *pCudaDeviceCount,
|
|||
CUdevice * pCudaDevices, unsigned int cudaDeviceCount,
|
||||
ID3D11Device * pD3D11Device, CUd3d11DeviceList deviceList)
|
||||
{
|
||||
g_assert (gst_cuda_vtable.CuD3D11GetDevices != NULL);
|
||||
g_assert (gst_cuda_vtable.CuD3D11GetDevices != nullptr);
|
||||
|
||||
return gst_cuda_vtable.CuD3D11GetDevices (pCudaDeviceCount, pCudaDevices,
|
||||
cudaDeviceCount, pD3D11Device, deviceList);
|
|
@ -23,28 +23,31 @@
|
|||
|
||||
#include "gstcudamemory.h"
|
||||
#include "gstcudautils.h"
|
||||
#include "gstcuda-private.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
GST_DEBUG_CATEGORY_STATIC (cuda_allocator_debug);
|
||||
#define GST_CAT_DEFAULT cuda_allocator_debug
|
||||
|
||||
static GstAllocator *_gst_cuda_allocator = NULL;
|
||||
static GstAllocator *_gst_cuda_allocator = nullptr;
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
struct _GstCudaMemoryPrivate
|
||||
{
|
||||
CUdeviceptr data;
|
||||
void *staging;
|
||||
CUdeviceptr data = 0;
|
||||
void *staging = nullptr;
|
||||
|
||||
/* params used for cuMemAllocPitch */
|
||||
gsize pitch;
|
||||
guint width_in_bytes;
|
||||
guint height;
|
||||
gsize pitch = 0;
|
||||
guint width_in_bytes = 0;
|
||||
guint height = 0;
|
||||
|
||||
GMutex lock;
|
||||
std::mutex lock;
|
||||
|
||||
GstCudaStream *stream;
|
||||
GstCudaStream *stream = nullptr;
|
||||
};
|
||||
/* *INDENT-ON* */
|
||||
|
||||
struct _GstCudaAllocatorPrivate
|
||||
{
|
||||
|
@ -67,7 +70,7 @@ static GstMemory *
|
|||
gst_cuda_allocator_dummy_alloc (GstAllocator * allocator, gsize size,
|
||||
GstAllocationParams * params)
|
||||
{
|
||||
g_return_val_if_reached (NULL);
|
||||
g_return_val_if_reached (nullptr);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -88,7 +91,8 @@ gst_cuda_allocator_init (GstCudaAllocator * allocator)
|
|||
GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
|
||||
GstCudaAllocatorPrivate *priv;
|
||||
|
||||
priv = allocator->priv = gst_cuda_allocator_get_instance_private (allocator);
|
||||
priv = allocator->priv = (GstCudaAllocatorPrivate *)
|
||||
gst_cuda_allocator_get_instance_private (allocator);
|
||||
|
||||
alloc->mem_type = GST_CUDA_MEMORY_TYPE_NAME;
|
||||
|
||||
|
@ -116,35 +120,35 @@ gst_cuda_allocator_alloc_internal (GstCudaAllocator * self,
|
|||
GstVideoInfo *alloc_info;
|
||||
|
||||
if (!gst_cuda_context_push (context))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
ret = gst_cuda_result (CuMemAllocPitch (&data, &pitch, width_in_bytes,
|
||||
alloc_height, 16));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
if (!ret) {
|
||||
GST_ERROR_OBJECT (self, "Failed to allocate CUDA memory");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
mem = g_new0 (GstCudaMemory, 1);
|
||||
mem->priv = priv = g_new0 (GstCudaMemoryPrivate, 1);
|
||||
mem->priv = priv = new GstCudaMemoryPrivate ();
|
||||
|
||||
priv->data = data;
|
||||
priv->pitch = pitch;
|
||||
priv->width_in_bytes = width_in_bytes;
|
||||
priv->height = alloc_height;
|
||||
g_mutex_init (&priv->lock);
|
||||
if (stream)
|
||||
priv->stream = gst_cuda_stream_ref (stream);
|
||||
|
||||
mem->context = gst_object_ref (context);
|
||||
mem->context = (GstCudaContext *) gst_object_ref (context);
|
||||
mem->info = *info;
|
||||
mem->info.size = pitch * alloc_height;
|
||||
|
||||
alloc_info = &mem->info;
|
||||
gst_memory_init (GST_MEMORY_CAST (mem), 0, GST_ALLOCATOR_CAST (self),
|
||||
NULL, alloc_info->size, 0, 0, alloc_info->size);
|
||||
gst_memory_init (GST_MEMORY_CAST (mem), (GstMemoryFlags) 0,
|
||||
GST_ALLOCATOR_CAST (self), nullptr, alloc_info->size, 0, 0,
|
||||
alloc_info->size);
|
||||
|
||||
switch (GST_VIDEO_INFO_FORMAT (info)) {
|
||||
case GST_VIDEO_FORMAT_I420:
|
||||
|
@ -222,7 +226,7 @@ gst_cuda_allocator_alloc_internal (GstCudaAllocator * self,
|
|||
gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (info)));
|
||||
g_assert_not_reached ();
|
||||
gst_memory_unref (GST_MEMORY_CAST (mem));
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return GST_MEMORY_CAST (mem);
|
||||
|
@ -246,13 +250,13 @@ gst_cuda_allocator_free (GstAllocator * allocator, GstMemory * memory)
|
|||
|
||||
if (priv->staging)
|
||||
gst_cuda_result (CuMemFreeHost (priv->staging));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
gst_clear_cuda_stream (&priv->stream);
|
||||
gst_object_unref (mem->context);
|
||||
|
||||
g_mutex_clear (&priv->lock);
|
||||
g_free (mem->priv);
|
||||
delete mem->priv;
|
||||
|
||||
g_free (mem);
|
||||
}
|
||||
|
||||
|
@ -294,7 +298,7 @@ gst_cuda_memory_upload (GstCudaAllocator * self, GstCudaMemory * mem)
|
|||
} else {
|
||||
GST_MINI_OBJECT_FLAG_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_SYNC);
|
||||
}
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
if (!ret)
|
||||
GST_ERROR_OBJECT (self, "Failed to upload memory");
|
||||
|
@ -323,7 +327,7 @@ gst_cuda_memory_download (GstCudaAllocator * self, GstCudaMemory * mem)
|
|||
GST_MEMORY_CAST (mem)->size));
|
||||
if (!ret) {
|
||||
GST_ERROR_OBJECT (self, "Failed to allocate staging memory");
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
@ -341,7 +345,7 @@ gst_cuda_memory_download (GstCudaAllocator * self, GstCudaMemory * mem)
|
|||
ret = gst_cuda_result (CuMemcpy2DAsync (¶m, stream));
|
||||
/* For CPU access, sync immediately */
|
||||
CuStreamSynchronize (stream);
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
GST_MINI_OBJECT_FLAG_UNSET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_SYNC);
|
||||
|
||||
if (!ret)
|
||||
|
@ -356,12 +360,13 @@ cuda_mem_map (GstMemory * mem, gsize maxsize, GstMapFlags flags)
|
|||
GstCudaAllocator *self = GST_CUDA_ALLOCATOR (mem->allocator);
|
||||
GstCudaMemory *cmem = GST_CUDA_MEMORY_CAST (mem);
|
||||
GstCudaMemoryPrivate *priv = cmem->priv;
|
||||
gpointer ret = NULL;
|
||||
gpointer ret = nullptr;
|
||||
|
||||
std::lock_guard < std::mutex > lk (priv->lock);
|
||||
|
||||
g_mutex_lock (&priv->lock);
|
||||
if ((flags & GST_MAP_CUDA) == GST_MAP_CUDA) {
|
||||
if (!gst_cuda_memory_upload (self, cmem))
|
||||
goto out;
|
||||
return nullptr;
|
||||
|
||||
GST_MEMORY_FLAG_UNSET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_UPLOAD);
|
||||
|
||||
|
@ -372,8 +377,7 @@ cuda_mem_map (GstMemory * mem, gsize maxsize, GstMapFlags flags)
|
|||
GST_MINI_OBJECT_FLAG_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_SYNC);
|
||||
}
|
||||
|
||||
ret = (gpointer) priv->data;
|
||||
goto out;
|
||||
return (gpointer) priv->data;
|
||||
}
|
||||
|
||||
/* First CPU access, must be downloaded */
|
||||
|
@ -381,7 +385,7 @@ cuda_mem_map (GstMemory * mem, gsize maxsize, GstMapFlags flags)
|
|||
GST_MINI_OBJECT_FLAG_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_DOWNLOAD);
|
||||
|
||||
if (!gst_cuda_memory_download (self, cmem))
|
||||
goto out;
|
||||
return nullptr;
|
||||
|
||||
ret = priv->staging;
|
||||
|
||||
|
@ -390,9 +394,6 @@ cuda_mem_map (GstMemory * mem, gsize maxsize, GstMapFlags flags)
|
|||
|
||||
GST_MEMORY_FLAG_UNSET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_DOWNLOAD);
|
||||
|
||||
out:
|
||||
g_mutex_unlock (&priv->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -402,20 +403,16 @@ cuda_mem_unmap_full (GstMemory * mem, GstMapInfo * info)
|
|||
GstCudaMemory *cmem = GST_CUDA_MEMORY_CAST (mem);
|
||||
GstCudaMemoryPrivate *priv = cmem->priv;
|
||||
|
||||
g_mutex_lock (&priv->lock);
|
||||
std::lock_guard < std::mutex > lk (priv->lock);
|
||||
if ((info->flags & GST_MAP_CUDA) == GST_MAP_CUDA) {
|
||||
if ((info->flags & GST_MAP_WRITE) == GST_MAP_WRITE)
|
||||
GST_MINI_OBJECT_FLAG_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_DOWNLOAD);
|
||||
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((info->flags & GST_MAP_WRITE) == GST_MAP_WRITE)
|
||||
GST_MINI_OBJECT_FLAG_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_UPLOAD);
|
||||
|
||||
out:
|
||||
g_mutex_unlock (&priv->lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -427,7 +424,7 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
GstCudaContext *context = src_mem->context;
|
||||
GstMapInfo src_info, dst_info;
|
||||
CUDA_MEMCPY2D param = { 0, };
|
||||
GstMemory *copy = NULL;
|
||||
GstMemory *copy = nullptr;
|
||||
gboolean ret;
|
||||
GstCudaStream *stream = src_mem->priv->stream;
|
||||
CUstream stream_handle = gst_cuda_stream_get_handle (stream);
|
||||
|
@ -450,20 +447,22 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
|
||||
if (!copy) {
|
||||
GST_ERROR_OBJECT (self, "Failed to allocate memory for copying");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!gst_memory_map (mem, &src_info, GST_MAP_READ | GST_MAP_CUDA)) {
|
||||
if (!gst_memory_map (mem, &src_info,
|
||||
(GstMapFlags) (GST_MAP_READ | GST_MAP_CUDA))) {
|
||||
GST_ERROR_OBJECT (self, "Failed to map src memory");
|
||||
gst_memory_unref (copy);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!gst_memory_map (copy, &dst_info, GST_MAP_WRITE | GST_MAP_CUDA)) {
|
||||
if (!gst_memory_map (copy, &dst_info,
|
||||
(GstMapFlags) (GST_MAP_WRITE | GST_MAP_CUDA))) {
|
||||
GST_ERROR_OBJECT (self, "Failed to map dst memory");
|
||||
gst_memory_unmap (mem, &src_info);
|
||||
gst_memory_unref (copy);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!gst_cuda_context_push (context)) {
|
||||
|
@ -471,7 +470,7 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
gst_memory_unmap (mem, &src_info);
|
||||
gst_memory_unmap (copy, &dst_info);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
param.srcMemoryType = CU_MEMORYTYPE_DEVICE;
|
||||
|
@ -486,7 +485,7 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
|
||||
ret = gst_cuda_result (CuMemcpy2DAsync (¶m, stream_handle));
|
||||
CuStreamSynchronize (stream_handle);
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
gst_memory_unmap (mem, &src_info);
|
||||
gst_memory_unmap (copy, &dst_info);
|
||||
|
@ -494,7 +493,7 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
if (!ret) {
|
||||
GST_ERROR_OBJECT (self, "Failed to copy memory");
|
||||
gst_memory_unref (copy);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return copy;
|
||||
|
@ -510,17 +509,14 @@ cuda_mem_copy (GstMemory * mem, gssize offset, gssize size)
|
|||
void
|
||||
gst_cuda_memory_init_once (void)
|
||||
{
|
||||
static gsize _init = 0;
|
||||
|
||||
if (g_once_init_enter (&_init)) {
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
_gst_cuda_allocator =
|
||||
(GstAllocator *) g_object_new (GST_TYPE_CUDA_ALLOCATOR, NULL);
|
||||
(GstAllocator *) g_object_new (GST_TYPE_CUDA_ALLOCATOR, nullptr);
|
||||
gst_object_ref_sink (_gst_cuda_allocator);
|
||||
gst_object_ref (_gst_cuda_allocator);
|
||||
|
||||
gst_allocator_register (GST_CUDA_MEMORY_TYPE_NAME, _gst_cuda_allocator);
|
||||
g_once_init_leave (&_init, 1);
|
||||
}
|
||||
} GST_CUDA_CALL_ONCE_END;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -534,7 +530,7 @@ gst_cuda_memory_init_once (void)
|
|||
gboolean
|
||||
gst_is_cuda_memory (GstMemory * mem)
|
||||
{
|
||||
return mem != NULL && mem->allocator != NULL &&
|
||||
return mem != nullptr && mem->allocator != nullptr &&
|
||||
GST_IS_CUDA_ALLOCATOR (mem->allocator);
|
||||
}
|
||||
|
||||
|
@ -552,7 +548,7 @@ gst_is_cuda_memory (GstMemory * mem)
|
|||
GstCudaStream *
|
||||
gst_cuda_memory_get_stream (GstCudaMemory * mem)
|
||||
{
|
||||
g_return_val_if_fail (gst_is_cuda_memory ((GstMemory *) mem), NULL);
|
||||
g_return_val_if_fail (gst_is_cuda_memory ((GstMemory *) mem), nullptr);
|
||||
|
||||
return mem->priv->stream;
|
||||
}
|
||||
|
@ -576,16 +572,14 @@ gst_cuda_memory_sync (GstCudaMemory * mem)
|
|||
if (!priv->stream)
|
||||
return;
|
||||
|
||||
g_mutex_lock (&priv->lock);
|
||||
std::lock_guard < std::mutex > lk (priv->lock);
|
||||
if (GST_MEMORY_FLAG_IS_SET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_SYNC)) {
|
||||
GST_MEMORY_FLAG_UNSET (mem, GST_CUDA_MEMORY_TRANSFER_NEED_SYNC);
|
||||
if (gst_cuda_context_push (mem->context)) {
|
||||
CuStreamSynchronize (gst_cuda_stream_get_handle (priv->stream));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
g_mutex_unlock (&priv->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -605,14 +599,14 @@ gst_cuda_allocator_alloc (GstCudaAllocator * allocator,
|
|||
{
|
||||
guint alloc_height;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), NULL);
|
||||
g_return_val_if_fail (info != NULL, NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), nullptr);
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), nullptr);
|
||||
g_return_val_if_fail (info != nullptr, nullptr);
|
||||
|
||||
if (stream && stream->context != context) {
|
||||
GST_ERROR_OBJECT (context,
|
||||
"stream object is holding different CUDA context");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!allocator)
|
||||
|
@ -741,7 +735,7 @@ gst_cuda_pool_allocator_init (GstCudaPoolAllocator * allocator)
|
|||
{
|
||||
GstCudaPoolAllocatorPrivate *priv;
|
||||
|
||||
priv = allocator->priv =
|
||||
priv = allocator->priv = (GstCudaPoolAllocatorPrivate *)
|
||||
gst_cuda_pool_allocator_get_instance_private (allocator);
|
||||
|
||||
g_rec_mutex_init (&priv->lock);
|
||||
|
@ -878,7 +872,7 @@ gst_cuda_pool_allocator_free_memory (GstCudaPoolAllocator * self,
|
|||
g_atomic_int_add (&priv->cur_mems, -1);
|
||||
GST_LOG_OBJECT (self, "freeing memory %p (%u left)", mem, priv->cur_mems);
|
||||
|
||||
GST_MINI_OBJECT_CAST (mem)->dispose = NULL;
|
||||
GST_MINI_OBJECT_CAST (mem)->dispose = nullptr;
|
||||
gst_memory_unref (mem);
|
||||
}
|
||||
|
||||
|
@ -894,7 +888,7 @@ gst_cuda_pool_allocator_clear_queue (GstCudaPoolAllocator * self)
|
|||
/* Wait for outstanding operations */
|
||||
gst_cuda_context_push (self->context);
|
||||
CuStreamSynchronize (gst_cuda_stream_get_handle (self->stream));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
|
||||
while ((memory = (GstMemory *) gst_atomic_queue_pop (priv->queue))) {
|
||||
|
@ -961,8 +955,8 @@ gst_cuda_pool_allocator_release_memory (GstCudaPoolAllocator * self,
|
|||
{
|
||||
GST_LOG_OBJECT (self, "Released memory %p", mem);
|
||||
|
||||
GST_MINI_OBJECT_CAST (mem)->dispose = NULL;
|
||||
mem->allocator = gst_object_ref (_gst_cuda_allocator);
|
||||
GST_MINI_OBJECT_CAST (mem)->dispose = nullptr;
|
||||
mem->allocator = (GstAllocator *) gst_object_ref (_gst_cuda_allocator);
|
||||
|
||||
/* keep it around in our queue */
|
||||
gst_atomic_queue_push (self->priv->queue, mem);
|
||||
|
@ -1121,13 +1115,14 @@ gst_cuda_pool_allocator_new (GstCudaContext * context, GstCudaStream * stream,
|
|||
{
|
||||
GstCudaPoolAllocator *self;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), nullptr);
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), nullptr);
|
||||
|
||||
self = g_object_new (GST_TYPE_CUDA_POOL_ALLOCATOR, NULL);
|
||||
self = (GstCudaPoolAllocator *)
|
||||
g_object_new (GST_TYPE_CUDA_POOL_ALLOCATOR, nullptr);
|
||||
gst_object_ref_sink (self);
|
||||
|
||||
self->context = gst_object_ref (context);
|
||||
self->context = (GstCudaContext *) gst_object_ref (context);
|
||||
if (stream)
|
||||
self->stream = gst_cuda_stream_ref (stream);
|
||||
self->info = *info;
|
||||
|
@ -1167,7 +1162,7 @@ gst_cuda_pool_allocator_acquire_memory (GstCudaPoolAllocator * allocator,
|
|||
GstMemory *mem = *memory;
|
||||
/* Replace default allocator with ours */
|
||||
gst_object_unref (mem->allocator);
|
||||
mem->allocator = gst_object_ref (allocator);
|
||||
mem->allocator = (GstAllocator *) gst_object_ref (allocator);
|
||||
GST_MINI_OBJECT_CAST (mem)->dispose = gst_cuda_memory_release;
|
||||
allocator->priv->outstanding++;
|
||||
} else {
|
|
@ -26,6 +26,7 @@
|
|||
#include "gstcudaloader.h"
|
||||
#include <nvrtc.h>
|
||||
#include <gmodule.h>
|
||||
#include "gstcuda-private.h"
|
||||
|
||||
GST_DEBUG_CATEGORY_STATIC (gst_cuda_nvrtc_debug);
|
||||
#define GST_CAT_DEFAULT gst_cuda_nvrtc_debug
|
||||
|
@ -68,8 +69,8 @@ static GstCudaNvrtcVTable gst_cuda_nvrtc_vtable = { 0, };
|
|||
static GModule *
|
||||
gst_cuda_nvrtc_load_library_once_win32 (void)
|
||||
{
|
||||
gchar *dll_name = NULL;
|
||||
GModule *module = NULL;
|
||||
gchar *dll_name = nullptr;
|
||||
GModule *module = nullptr;
|
||||
gint cuda_version;
|
||||
gint cuda_major_version;
|
||||
gint cuda_minor_version;
|
||||
|
@ -79,7 +80,7 @@ gst_cuda_nvrtc_load_library_once_win32 (void)
|
|||
rst = CuDriverGetVersion (&cuda_version);
|
||||
if (rst != CUDA_SUCCESS) {
|
||||
GST_WARNING ("Couldn't get driver version, 0x%x", (guint) rst);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
cuda_major_version = cuda_version / 1000;
|
||||
|
@ -121,14 +122,14 @@ gst_cuda_nvrtc_load_library_once_win32 (void)
|
|||
|
||||
g_free (dll_name);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
static gboolean
|
||||
gst_cuda_nvrtc_load_library_once (void)
|
||||
{
|
||||
GModule *module = NULL;
|
||||
GModule *module = nullptr;
|
||||
const gchar *filename_env;
|
||||
GstCudaNvrtcVTable *vtable;
|
||||
|
||||
|
@ -144,7 +145,7 @@ gst_cuda_nvrtc_load_library_once (void)
|
|||
#endif
|
||||
}
|
||||
|
||||
if (module == NULL) {
|
||||
if (module == nullptr) {
|
||||
GST_WARNING ("Could not open nvrtc library %s", g_module_error ());
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -181,15 +182,13 @@ error:
|
|||
gboolean
|
||||
gst_cuda_nvrtc_load_library (void)
|
||||
{
|
||||
static gsize init_once = 0;
|
||||
|
||||
if (g_once_init_enter (&init_once)) {
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
GST_DEBUG_CATEGORY_INIT (gst_cuda_nvrtc_debug, "cudanvrtc", 0,
|
||||
"CUDA runtime compiler");
|
||||
if (gst_cuda_load_library ())
|
||||
gst_cuda_nvrtc_load_library_once ();
|
||||
g_once_init_leave (&init_once, 1);
|
||||
}
|
||||
GST_CUDA_CALL_ONCE_END;
|
||||
|
||||
return gst_cuda_nvrtc_vtable.loaded;
|
||||
}
|
||||
|
@ -198,7 +197,7 @@ gst_cuda_nvrtc_load_library (void)
|
|||
static nvrtcResult
|
||||
NvrtcCompileProgram (nvrtcProgram prog, int numOptions, const char **options)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcCompileProgram != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcCompileProgram != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcCompileProgram (prog, numOptions, options);
|
||||
}
|
||||
|
@ -207,7 +206,7 @@ static nvrtcResult
|
|||
NvrtcCreateProgram (nvrtcProgram * prog, const char *src, const char *name,
|
||||
int numHeaders, const char **headers, const char **includeNames)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcCreateProgram != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcCreateProgram != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcCreateProgram (prog, src, name, numHeaders,
|
||||
headers, includeNames);
|
||||
|
@ -216,7 +215,7 @@ NvrtcCreateProgram (nvrtcProgram * prog, const char *src, const char *name,
|
|||
static nvrtcResult
|
||||
NvrtcDestroyProgram (nvrtcProgram * prog)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcDestroyProgram != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcDestroyProgram != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcDestroyProgram (prog);
|
||||
}
|
||||
|
@ -224,7 +223,7 @@ NvrtcDestroyProgram (nvrtcProgram * prog)
|
|||
static nvrtcResult
|
||||
NvrtcGetPTX (nvrtcProgram prog, char *ptx)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetPTX != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetPTX != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcGetPTX (prog, ptx);
|
||||
}
|
||||
|
@ -232,7 +231,7 @@ NvrtcGetPTX (nvrtcProgram prog, char *ptx)
|
|||
static nvrtcResult
|
||||
NvrtcGetPTXSize (nvrtcProgram prog, size_t *ptxSizeRet)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetPTXSize != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetPTXSize != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcGetPTXSize (prog, ptxSizeRet);
|
||||
}
|
||||
|
@ -240,7 +239,7 @@ NvrtcGetPTXSize (nvrtcProgram prog, size_t *ptxSizeRet)
|
|||
static nvrtcResult
|
||||
NvrtcGetProgramLog (nvrtcProgram prog, char *log)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetProgramLog != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetProgramLog != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcGetProgramLog (prog, log);
|
||||
}
|
||||
|
@ -248,7 +247,7 @@ NvrtcGetProgramLog (nvrtcProgram prog, char *log)
|
|||
static nvrtcResult
|
||||
NvrtcGetProgramLogSize (nvrtcProgram prog, size_t *logSizeRet)
|
||||
{
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetProgramLogSize != NULL);
|
||||
g_assert (gst_cuda_nvrtc_vtable.NvrtcGetProgramLogSize != nullptr);
|
||||
|
||||
return gst_cuda_nvrtc_vtable.NvrtcGetProgramLogSize (prog, logSizeRet);
|
||||
}
|
||||
|
@ -268,13 +267,13 @@ gst_cuda_nvrtc_compile (const gchar * source)
|
|||
CUresult curet;
|
||||
const gchar *opts[] = { "--gpu-architecture=compute_30" };
|
||||
gsize ptx_size;
|
||||
gchar *ptx = NULL;
|
||||
gchar *ptx = nullptr;
|
||||
int driverVersion;
|
||||
|
||||
g_return_val_if_fail (source != NULL, NULL);
|
||||
g_return_val_if_fail (source != nullptr, nullptr);
|
||||
|
||||
if (!gst_cuda_nvrtc_load_library ()) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GST_TRACE ("CUDA kernel source \n%s", source);
|
||||
|
@ -282,16 +281,16 @@ gst_cuda_nvrtc_compile (const gchar * source)
|
|||
curet = CuDriverGetVersion (&driverVersion);
|
||||
if (curet != CUDA_SUCCESS) {
|
||||
GST_ERROR ("Failed to query CUDA Driver version, ret %d", curet);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GST_DEBUG ("CUDA Driver Version %d.%d", driverVersion / 1000,
|
||||
(driverVersion % 1000) / 10);
|
||||
|
||||
ret = NvrtcCreateProgram (&prog, source, NULL, 0, NULL, NULL);
|
||||
ret = NvrtcCreateProgram (&prog, source, nullptr, 0, nullptr, nullptr);
|
||||
if (ret != NVRTC_SUCCESS) {
|
||||
GST_ERROR ("couldn't create nvrtc program, ret %d", ret);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* Starting from CUDA 11, the lowest supported architecture is 5.2 */
|
||||
|
@ -305,7 +304,7 @@ gst_cuda_nvrtc_compile (const gchar * source)
|
|||
GST_ERROR ("couldn't compile nvrtc program, ret %d", ret);
|
||||
if (NvrtcGetProgramLogSize (prog, &log_size) == NVRTC_SUCCESS &&
|
||||
log_size > 0) {
|
||||
gchar *compile_log = g_alloca (log_size);
|
||||
gchar *compile_log = (gchar *) g_alloca (log_size);
|
||||
if (NvrtcGetProgramLog (prog, compile_log) == NVRTC_SUCCESS) {
|
||||
GST_ERROR ("nvrtc compile log %s", compile_log);
|
||||
}
|
||||
|
@ -321,7 +320,7 @@ gst_cuda_nvrtc_compile (const gchar * source)
|
|||
goto error;
|
||||
}
|
||||
|
||||
ptx = g_malloc0 (ptx_size);
|
||||
ptx = (gchar *) g_malloc0 (ptx_size);
|
||||
ret = NvrtcGetPTX (prog, ptx);
|
||||
if (ret != NVRTC_SUCCESS) {
|
||||
GST_ERROR ("couldn't get ptx, ret %d", ret);
|
||||
|
@ -339,5 +338,5 @@ gst_cuda_nvrtc_compile (const gchar * source)
|
|||
error:
|
||||
NvrtcDestroyProgram (&prog);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
|
@ -24,21 +24,20 @@
|
|||
#include "cuda-gst.h"
|
||||
#include "gstcudastream.h"
|
||||
#include "gstcudautils.h"
|
||||
#include "gstcuda-private.h"
|
||||
|
||||
#ifndef GST_DISABLE_GST_DEBUG
|
||||
#define GST_CAT_DEFAULT ensure_debug_category()
|
||||
static GstDebugCategory *
|
||||
ensure_debug_category (void)
|
||||
{
|
||||
static gsize cat_once = 0;
|
||||
static GstDebugCategory *cat = nullptr;
|
||||
|
||||
if (g_once_init_enter (&cat_once)) {
|
||||
gsize temp = (gsize) _gst_debug_category_new ("cudastream", 0,
|
||||
"cudastream");
|
||||
g_once_init_leave (&cat_once, temp);
|
||||
}
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
cat = _gst_debug_category_new ("cudastream", 0, "cudastream");
|
||||
} GST_CUDA_CALL_ONCE_END;
|
||||
|
||||
return (GstDebugCategory *) cat_once;
|
||||
return cat;
|
||||
}
|
||||
#else
|
||||
#define ensure_debug_category() /* NOOP */
|
||||
|
@ -58,7 +57,7 @@ gst_cuda_stream_init_once (GType type)
|
|||
{
|
||||
static GstValueTable table = {
|
||||
0, (GstValueCompareFunc) gst_cuda_stream_compare_func,
|
||||
NULL, NULL
|
||||
nullptr, nullptr
|
||||
};
|
||||
|
||||
table.type = type;
|
||||
|
@ -84,7 +83,7 @@ _gst_cuda_stream_free (GstCudaStream * stream)
|
|||
if (priv->handle) {
|
||||
gst_cuda_context_push (stream->context);
|
||||
CuStreamDestroy (priv->handle);
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
}
|
||||
|
||||
gst_object_unref (stream->context);
|
||||
|
@ -112,28 +111,28 @@ gst_cuda_stream_new (GstCudaContext * context)
|
|||
CUresult cuda_ret;
|
||||
CUstream stream;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), nullptr);
|
||||
|
||||
if (!gst_cuda_context_push (context)) {
|
||||
GST_ERROR_OBJECT (context, "Couldn't push context");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
cuda_ret = CuStreamCreate (&stream, CU_STREAM_DEFAULT);
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
if (!gst_cuda_result (cuda_ret)) {
|
||||
GST_ERROR_OBJECT (context, "Couldn't create stream");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
self = g_new0 (GstCudaStream, 1);
|
||||
self->context = gst_object_ref (context);
|
||||
self->context = (GstCudaContext *) gst_object_ref (context);
|
||||
self->priv = g_new0 (GstCudaStreamPrivate, 1);
|
||||
self->priv->handle = stream;
|
||||
|
||||
gst_mini_object_init (GST_MINI_OBJECT_CAST (self), 0,
|
||||
GST_TYPE_CUDA_STREAM, NULL, NULL,
|
||||
GST_TYPE_CUDA_STREAM, nullptr, nullptr,
|
||||
(GstMiniObjectFreeFunction) _gst_cuda_stream_free);
|
||||
|
||||
return self;
|
||||
|
@ -152,10 +151,10 @@ gst_cuda_stream_new (GstCudaContext * context)
|
|||
CUstream
|
||||
gst_cuda_stream_get_handle (GstCudaStream * stream)
|
||||
{
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), NULL);
|
||||
g_return_val_if_fail (!stream || GST_IS_CUDA_STREAM (stream), nullptr);
|
||||
|
||||
if (!stream)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
return stream->priv->handle;
|
||||
}
|
||||
|
@ -203,6 +202,6 @@ gst_clear_cuda_stream (GstCudaStream ** stream)
|
|||
{
|
||||
if (stream && *stream) {
|
||||
gst_cuda_stream_unref (*stream);
|
||||
*stream = NULL;
|
||||
*stream = nullptr;
|
||||
}
|
||||
}
|
|
@ -47,22 +47,18 @@ GST_DEBUG_CATEGORY_STATIC (GST_CAT_CONTEXT);
|
|||
static void
|
||||
_init_debug (void)
|
||||
{
|
||||
static gsize once_init = 0;
|
||||
|
||||
if (g_once_init_enter (&once_init)) {
|
||||
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
GST_DEBUG_CATEGORY_INIT (gst_cuda_utils_debug, "cudautils", 0,
|
||||
"CUDA utils");
|
||||
GST_DEBUG_CATEGORY_GET (GST_CAT_CONTEXT, "GST_CONTEXT");
|
||||
g_once_init_leave (&once_init, 1);
|
||||
}
|
||||
} GST_CUDA_CALL_ONCE_END;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
pad_query (const GValue * item, GValue * value, gpointer user_data)
|
||||
{
|
||||
GstPad *pad = g_value_get_object (item);
|
||||
GstQuery *query = user_data;
|
||||
GstPad *pad = (GstPad *) g_value_get_object (item);
|
||||
GstQuery *query = (GstQuery *) user_data;
|
||||
gboolean res;
|
||||
|
||||
res = gst_pad_peer_query (pad, query);
|
||||
|
@ -122,7 +118,7 @@ find_cuda_context (GstElement * element, GstCudaContext ** cuda_ctx)
|
|||
|
||||
/* although we found cuda context above, the element does not want
|
||||
* to use the context. Then try to find from the other direction */
|
||||
if (*cuda_ctx == NULL && run_query (element, query, GST_PAD_SINK)) {
|
||||
if (*cuda_ctx == nullptr && run_query (element, query, GST_PAD_SINK)) {
|
||||
gst_query_parse_context (query, &ctxt);
|
||||
if (ctxt) {
|
||||
GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, element,
|
||||
|
@ -131,7 +127,7 @@ find_cuda_context (GstElement * element, GstCudaContext ** cuda_ctx)
|
|||
}
|
||||
}
|
||||
|
||||
if (*cuda_ctx == NULL) {
|
||||
if (*cuda_ctx == nullptr) {
|
||||
/* 3) Post a GST_MESSAGE_NEED_CONTEXT message on the bus with
|
||||
* the required context type and afterwards check if a
|
||||
* usable context was set now. The message could
|
||||
|
@ -162,9 +158,9 @@ context_set_cuda_context (GstContext * context, GstCudaContext * cuda_ctx)
|
|||
GstStructure *s;
|
||||
guint device_id;
|
||||
|
||||
g_return_if_fail (context != NULL);
|
||||
g_return_if_fail (context != nullptr);
|
||||
|
||||
g_object_get (G_OBJECT (cuda_ctx), "cuda-device-id", &device_id, NULL);
|
||||
g_object_get (G_OBJECT (cuda_ctx), "cuda-device-id", &device_id, nullptr);
|
||||
|
||||
GST_CAT_LOG (GST_CAT_CONTEXT,
|
||||
"setting GstCudaContext(%" GST_PTR_FORMAT
|
||||
|
@ -173,7 +169,7 @@ context_set_cuda_context (GstContext * context, GstCudaContext * cuda_ctx)
|
|||
|
||||
s = gst_context_writable_structure (context);
|
||||
gst_structure_set (s, GST_CUDA_CONTEXT_TYPE, GST_TYPE_CUDA_CONTEXT,
|
||||
cuda_ctx, "cuda-device-id", G_TYPE_UINT, device_id, NULL);
|
||||
cuda_ctx, "cuda-device-id", G_TYPE_UINT, device_id, nullptr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -199,26 +195,21 @@ gst_cuda_ensure_element_context (GstElement * element, gint device_id,
|
|||
{
|
||||
guint target_device_id = 0;
|
||||
gboolean ret = TRUE;
|
||||
static GRecMutex lock;
|
||||
static gsize init_lock_once = 0;
|
||||
static std::recursive_mutex lock;
|
||||
|
||||
g_return_val_if_fail (element != NULL, FALSE);
|
||||
g_return_val_if_fail (cuda_ctx != NULL, FALSE);
|
||||
g_return_val_if_fail (element != nullptr, FALSE);
|
||||
g_return_val_if_fail (cuda_ctx != nullptr, FALSE);
|
||||
|
||||
_init_debug ();
|
||||
if (g_once_init_enter (&init_lock_once)) {
|
||||
g_rec_mutex_init (&lock);
|
||||
g_once_init_leave (&init_lock_once, 1);
|
||||
}
|
||||
|
||||
g_rec_mutex_lock (&lock);
|
||||
std::lock_guard < std::recursive_mutex > lk (lock);
|
||||
|
||||
if (*cuda_ctx)
|
||||
goto out;
|
||||
return TRUE;
|
||||
|
||||
find_cuda_context (element, cuda_ctx);
|
||||
if (*cuda_ctx)
|
||||
goto out;
|
||||
return TRUE;
|
||||
|
||||
if (device_id > 0)
|
||||
target_device_id = device_id;
|
||||
|
@ -226,7 +217,7 @@ gst_cuda_ensure_element_context (GstElement * element, gint device_id,
|
|||
/* No available CUDA context in pipeline, create new one here */
|
||||
*cuda_ctx = gst_cuda_context_new (target_device_id);
|
||||
|
||||
if (*cuda_ctx == NULL) {
|
||||
if (*cuda_ctx == nullptr) {
|
||||
GST_CAT_ERROR_OBJECT (GST_CAT_CONTEXT, element,
|
||||
"Failed to create CUDA context with device-id %d", device_id);
|
||||
ret = FALSE;
|
||||
|
@ -248,9 +239,6 @@ gst_cuda_ensure_element_context (GstElement * element, gint device_id,
|
|||
gst_element_post_message (GST_ELEMENT_CAST (element), msg);
|
||||
}
|
||||
|
||||
out:
|
||||
g_rec_mutex_unlock (&lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -277,8 +265,8 @@ gst_cuda_handle_set_context (GstElement * element,
|
|||
{
|
||||
const gchar *context_type;
|
||||
|
||||
g_return_val_if_fail (element != NULL, FALSE);
|
||||
g_return_val_if_fail (cuda_ctx != NULL, FALSE);
|
||||
g_return_val_if_fail (element != nullptr, FALSE);
|
||||
g_return_val_if_fail (cuda_ctx != nullptr, FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
||||
|
@ -288,7 +276,7 @@ gst_cuda_handle_set_context (GstElement * element,
|
|||
context_type = gst_context_get_context_type (context);
|
||||
if (g_strcmp0 (context_type, GST_CUDA_CONTEXT_TYPE) == 0) {
|
||||
const GstStructure *str;
|
||||
GstCudaContext *other_ctx = NULL;
|
||||
GstCudaContext *other_ctx = nullptr;
|
||||
guint other_device_id = 0;
|
||||
|
||||
/* If we had context already, will not replace it */
|
||||
|
@ -297,10 +285,10 @@ gst_cuda_handle_set_context (GstElement * element,
|
|||
|
||||
str = gst_context_get_structure (context);
|
||||
if (gst_structure_get (str, GST_CUDA_CONTEXT_TYPE, GST_TYPE_CUDA_CONTEXT,
|
||||
&other_ctx, NULL)) {
|
||||
g_object_get (other_ctx, "cuda-device-id", &other_device_id, NULL);
|
||||
&other_ctx, nullptr)) {
|
||||
g_object_get (other_ctx, "cuda-device-id", &other_device_id, nullptr);
|
||||
|
||||
if (device_id == -1 || other_device_id == device_id) {
|
||||
if (device_id == -1 || other_device_id == (guint) device_id) {
|
||||
GST_CAT_DEBUG_OBJECT (GST_CAT_CONTEXT, element, "Found CUDA context");
|
||||
*cuda_ctx = other_ctx;
|
||||
|
||||
|
@ -334,7 +322,7 @@ gst_cuda_handle_context_query (GstElement * element,
|
|||
|
||||
g_return_val_if_fail (GST_IS_ELEMENT (element), FALSE);
|
||||
g_return_val_if_fail (GST_IS_QUERY (query), FALSE);
|
||||
g_return_val_if_fail (cuda_ctx == NULL
|
||||
g_return_val_if_fail (cuda_ctx == nullptr
|
||||
|| GST_IS_CUDA_CONTEXT (cuda_ctx), FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
@ -377,7 +365,7 @@ gst_context_new_cuda_context (GstCudaContext * cuda_ctx)
|
|||
{
|
||||
GstContext *context;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (cuda_ctx), NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (cuda_ctx), nullptr);
|
||||
|
||||
_init_debug ();
|
||||
|
||||
|
@ -395,17 +383,13 @@ static GQuark gst_cuda_quark_table[GST_CUDA_QUARK_MAX];
|
|||
static void
|
||||
init_cuda_quark_once (void)
|
||||
{
|
||||
static gsize once_init = 0;
|
||||
|
||||
if (g_once_init_enter (&once_init)) {
|
||||
gint i;
|
||||
|
||||
for (i = 0; i < GST_CUDA_QUARK_MAX; i++)
|
||||
GST_CUDA_CALL_ONCE_BEGIN {
|
||||
for (guint i = 0; i < GST_CUDA_QUARK_MAX; i++) {
|
||||
gst_cuda_quark_table[i] =
|
||||
g_quark_from_static_string (gst_cuda_quark_strings[i]);
|
||||
|
||||
g_once_init_leave (&once_init, 1);
|
||||
}
|
||||
}
|
||||
GST_CUDA_CALL_ONCE_END;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -446,14 +430,16 @@ gst_cuda_graphics_resource_new (GstCudaContext *
|
|||
{
|
||||
GstCudaGraphicsResource *resource;
|
||||
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), nullptr);
|
||||
|
||||
_init_debug ();
|
||||
|
||||
resource = g_new0 (GstCudaGraphicsResource, 1);
|
||||
resource->cuda_context = gst_object_ref (context);
|
||||
if (graphics_context)
|
||||
resource->graphics_context = gst_object_ref (graphics_context);
|
||||
resource->cuda_context = (GstCudaContext *) gst_object_ref (context);
|
||||
if (graphics_context) {
|
||||
resource->graphics_context =
|
||||
(GstObject *) gst_object_ref (graphics_context);
|
||||
}
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
@ -478,7 +464,7 @@ gst_cuda_graphics_resource_register_gl_buffer (GstCudaGraphicsResource *
|
|||
{
|
||||
CUresult cuda_ret;
|
||||
|
||||
g_return_val_if_fail (resource != NULL, FALSE);
|
||||
g_return_val_if_fail (resource != nullptr, FALSE);
|
||||
g_return_val_if_fail (resource->registered == FALSE, FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
@ -516,7 +502,7 @@ gst_cuda_graphics_resource_register_d3d11_resource (GstCudaGraphicsResource *
|
|||
{
|
||||
CUresult cuda_ret;
|
||||
|
||||
g_return_val_if_fail (resource != NULL, FALSE);
|
||||
g_return_val_if_fail (resource != nullptr, FALSE);
|
||||
g_return_val_if_fail (resource->registered == FALSE, FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
@ -549,7 +535,7 @@ gst_cuda_graphics_resource_register_d3d11_resource (GstCudaGraphicsResource *
|
|||
void
|
||||
gst_cuda_graphics_resource_unregister (GstCudaGraphicsResource * resource)
|
||||
{
|
||||
g_return_if_fail (resource != NULL);
|
||||
g_return_if_fail (resource != nullptr);
|
||||
|
||||
_init_debug ();
|
||||
|
||||
|
@ -557,7 +543,7 @@ gst_cuda_graphics_resource_unregister (GstCudaGraphicsResource * resource)
|
|||
return;
|
||||
|
||||
gst_cuda_result (CuGraphicsUnregisterResource (resource->resource));
|
||||
resource->resource = NULL;
|
||||
resource->resource = nullptr;
|
||||
resource->registered = FALSE;
|
||||
|
||||
return;
|
||||
|
@ -581,18 +567,18 @@ gst_cuda_graphics_resource_map (GstCudaGraphicsResource * resource,
|
|||
{
|
||||
CUresult cuda_ret;
|
||||
|
||||
g_return_val_if_fail (resource != NULL, NULL);
|
||||
g_return_val_if_fail (resource->registered != FALSE, NULL);
|
||||
g_return_val_if_fail (resource != nullptr, nullptr);
|
||||
g_return_val_if_fail (resource->registered != FALSE, nullptr);
|
||||
|
||||
_init_debug ();
|
||||
|
||||
cuda_ret = CuGraphicsResourceSetMapFlags (resource->resource, flags);
|
||||
if (!gst_cuda_result (cuda_ret))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
cuda_ret = CuGraphicsMapResources (1, &resource->resource, stream);
|
||||
if (!gst_cuda_result (cuda_ret))
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
resource->mapped = TRUE;
|
||||
|
||||
|
@ -612,7 +598,7 @@ void
|
|||
gst_cuda_graphics_resource_unmap (GstCudaGraphicsResource * resource,
|
||||
CUstream stream)
|
||||
{
|
||||
g_return_if_fail (resource != NULL);
|
||||
g_return_if_fail (resource != nullptr);
|
||||
g_return_if_fail (resource->registered != FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
@ -639,7 +625,7 @@ unregister_resource_from_gl_thread (GstGLContext * gl_context,
|
|||
|
||||
gst_cuda_graphics_resource_unregister (resource);
|
||||
|
||||
if (!gst_cuda_context_pop (NULL)) {
|
||||
if (!gst_cuda_context_pop (nullptr)) {
|
||||
GST_WARNING_OBJECT (cuda_context, "failed to pop CUDA context");
|
||||
}
|
||||
}
|
||||
|
@ -661,7 +647,7 @@ unregister_d3d11_resource (GstCudaGraphicsResource * resource)
|
|||
gst_cuda_graphics_resource_unregister (resource);
|
||||
gst_d3d11_device_unlock (device);
|
||||
|
||||
if (!gst_cuda_context_pop (NULL)) {
|
||||
if (!gst_cuda_context_pop (nullptr)) {
|
||||
GST_WARNING_OBJECT (cuda_context, "failed to pop CUDA context");
|
||||
}
|
||||
}
|
||||
|
@ -678,7 +664,7 @@ unregister_d3d11_resource (GstCudaGraphicsResource * resource)
|
|||
void
|
||||
gst_cuda_graphics_resource_free (GstCudaGraphicsResource * resource)
|
||||
{
|
||||
g_return_if_fail (resource != NULL);
|
||||
g_return_if_fail (resource != nullptr);
|
||||
|
||||
if (resource->registered) {
|
||||
#ifdef HAVE_NVCODEC_GST_GL
|
||||
|
@ -768,8 +754,8 @@ gst_cuda_buffer_fallback_copy (GstBuffer * dst, const GstVideoInfo * dst_info,
|
|||
dst_stride = GST_VIDEO_FRAME_PLANE_STRIDE (&dst_frame, i);
|
||||
src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (&src_frame, i);
|
||||
|
||||
dst_data = GST_VIDEO_FRAME_PLANE_DATA (&dst_frame, i);
|
||||
src_data = GST_VIDEO_FRAME_PLANE_DATA (&src_frame, i);
|
||||
dst_data = (guint8 *) GST_VIDEO_FRAME_PLANE_DATA (&dst_frame, i);
|
||||
src_data = (guint8 *) GST_VIDEO_FRAME_PLANE_DATA (&src_frame, i);
|
||||
|
||||
for (j = 0; j < height; j++) {
|
||||
memcpy (dst_data, src_data, width_in_bytes);
|
||||
|
@ -909,7 +895,7 @@ map_buffer_and_fill_copy2d (GstBuffer * buf, const GstVideoInfo * info,
|
|||
map_flags = GST_MAP_WRITE;
|
||||
|
||||
if (copy_type == GST_CUDA_BUFFER_COPY_CUDA)
|
||||
map_flags |= GST_MAP_CUDA;
|
||||
map_flags = (GstMapFlags) (map_flags | GST_MAP_CUDA);
|
||||
|
||||
if (!gst_video_frame_map (frame, info, buf, map_flags)) {
|
||||
GST_ERROR ("Failed to map buffer");
|
||||
|
@ -1023,7 +1009,7 @@ gst_cuda_buffer_copy_internal (GstBuffer * dst_buf,
|
|||
}
|
||||
|
||||
gst_cuda_result (CuStreamSynchronize (stream));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
|
||||
unmap_and_out:
|
||||
unmap_buffer_or_frame (dst_buf, &src_frame, &src_map);
|
||||
|
@ -1067,12 +1053,12 @@ static GstCudaGraphicsResource *
|
|||
ensure_cuda_gl_graphics_resource (GstCudaContext * context, GstMemory * mem)
|
||||
{
|
||||
GQuark quark;
|
||||
GstCudaGraphicsResource *ret = NULL;
|
||||
GstCudaGraphicsResource *ret = nullptr;
|
||||
|
||||
if (!gst_is_gl_memory_pbo (mem)) {
|
||||
GST_WARNING_OBJECT (context, "memory is not GL PBO memory, %s",
|
||||
mem->allocator->mem_type);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
quark = gst_cuda_quark_from_id (GST_CUDA_QUARK_GRAPHICS_RESOURCE);
|
||||
|
@ -1091,7 +1077,7 @@ ensure_cuda_gl_graphics_resource (GstCudaContext * context, GstMemory * mem)
|
|||
if (!gst_memory_map (mem, &info, (GstMapFlags) (GST_MAP_READ | GST_MAP_GL))) {
|
||||
GST_ERROR_OBJECT (context, "Failed to map gl memory");
|
||||
gst_cuda_graphics_resource_free (ret);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
pbo = (GstGLMemoryPBO *) mem;
|
||||
|
@ -1103,7 +1089,7 @@ ensure_cuda_gl_graphics_resource (GstCudaContext * context, GstMemory * mem)
|
|||
gst_memory_unmap (mem, &info);
|
||||
gst_cuda_graphics_resource_free (ret);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
gst_memory_unmap (mem, &info);
|
||||
|
@ -1258,7 +1244,7 @@ gl_copy_thread_func (GstGLContext * gl_context, GLCopyData * data)
|
|||
|
||||
out:
|
||||
gst_cuda_result (CuStreamSynchronize (stream));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
unmap_buffer_or_frame (cuda_buf, &cuda_frame, &cuda_map_info);
|
||||
}
|
||||
|
||||
|
@ -1299,7 +1285,7 @@ ensure_d3d11_interop (GstCudaContext * context, GstD3D11Device * device)
|
|||
CUdevice device_list[1] = { 0, };
|
||||
CUresult cuda_ret;
|
||||
|
||||
g_object_get (context, "cuda-device-id", &cuda_device_id, NULL);
|
||||
g_object_get (context, "cuda-device-id", &cuda_device_id, nullptr);
|
||||
|
||||
cuda_ret = CuD3D11GetDevices (&device_count,
|
||||
device_list, 1, gst_d3d11_device_get_device_handle (device),
|
||||
|
@ -1318,12 +1304,12 @@ static GstCudaGraphicsResource *
|
|||
ensure_cuda_d3d11_graphics_resource (GstCudaContext * context, GstMemory * mem)
|
||||
{
|
||||
GQuark quark;
|
||||
GstCudaGraphicsResource *ret = NULL;
|
||||
GstCudaGraphicsResource *ret = nullptr;
|
||||
|
||||
if (!gst_is_d3d11_memory (mem)) {
|
||||
GST_WARNING_OBJECT (context, "memory is not D3D11 memory, %s",
|
||||
mem->allocator->mem_type);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
quark = gst_cuda_quark_from_id (GST_CUDA_QUARK_GRAPHICS_RESOURCE);
|
||||
|
@ -1341,7 +1327,7 @@ ensure_cuda_d3d11_graphics_resource (GstCudaContext * context, GstMemory * mem)
|
|||
GST_ERROR_OBJECT (context, "failed to register d3d11 resource");
|
||||
gst_cuda_graphics_resource_free (ret);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
gst_mini_object_set_qdata (GST_MINI_OBJECT (mem), quark, ret,
|
||||
|
@ -1378,7 +1364,7 @@ cuda_copy_d3d11_interop (GstBuffer * dst_buf, const GstVideoInfo * dst_info,
|
|||
d3d11_buf = src_buf;
|
||||
cuda_buf = dst_buf;
|
||||
if (!gst_video_frame_map (&d3d11_frame, src_info, d3d11_buf,
|
||||
GST_MAP_READ | GST_MAP_D3D11)) {
|
||||
(GstMapFlags) (GST_MAP_READ | GST_MAP_D3D11))) {
|
||||
GST_ERROR_OBJECT (context, "Failed to map input D3D11 buffer");
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -1393,7 +1379,7 @@ cuda_copy_d3d11_interop (GstBuffer * dst_buf, const GstVideoInfo * dst_info,
|
|||
d3d11_buf = dst_buf;
|
||||
cuda_buf = src_buf;
|
||||
if (!gst_video_frame_map (&d3d11_frame, dst_info, d3d11_buf,
|
||||
GST_MAP_WRITE | GST_MAP_D3D11)) {
|
||||
(GstMapFlags) (GST_MAP_WRITE | GST_MAP_D3D11))) {
|
||||
GST_ERROR_OBJECT (context, "Failed to map output D3D11 buffer");
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -1478,7 +1464,7 @@ cuda_copy_d3d11_interop (GstBuffer * dst_buf, const GstVideoInfo * dst_info,
|
|||
|
||||
out:
|
||||
gst_cuda_result (CuStreamSynchronize (stream));
|
||||
gst_cuda_context_pop (NULL);
|
||||
gst_cuda_context_pop (nullptr);
|
||||
gst_video_frame_unmap (&d3d11_frame);
|
||||
unmap_buffer_or_frame (cuda_buf, &cuda_frame, &cuda_map_info);
|
||||
|
||||
|
@ -1498,14 +1484,14 @@ gst_cuda_buffer_copy (GstBuffer * dst, GstCudaBufferCopyType dst_type,
|
|||
D3D11_TEXTURE2D_DESC desc;
|
||||
#endif
|
||||
GstCudaContext *cuda_context = context;
|
||||
GstCudaMemory *cmem = NULL;
|
||||
GstCudaStream *mem_stream = NULL;
|
||||
GstCudaMemory *cmem = nullptr;
|
||||
GstCudaStream *mem_stream = nullptr;
|
||||
gboolean ret;
|
||||
|
||||
g_return_val_if_fail (GST_IS_BUFFER (dst), FALSE);
|
||||
g_return_val_if_fail (dst_info != NULL, FALSE);
|
||||
g_return_val_if_fail (dst_info != nullptr, FALSE);
|
||||
g_return_val_if_fail (GST_IS_BUFFER (src), FALSE);
|
||||
g_return_val_if_fail (src_info != NULL, FALSE);
|
||||
g_return_val_if_fail (src_info != nullptr, FALSE);
|
||||
g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), FALSE);
|
||||
|
||||
_init_debug ();
|
||||
|
@ -1641,7 +1627,7 @@ gst_cuda_buffer_copy (GstBuffer * dst, GstCudaBufferCopyType dst_type,
|
|||
} else if (gst_is_cuda_memory (src_mem)) {
|
||||
cmem = GST_CUDA_MEMORY_CAST (src_mem);
|
||||
} else {
|
||||
cmem = NULL;
|
||||
cmem = nullptr;
|
||||
}
|
||||
|
||||
if (cmem) {
|
|
@ -1,11 +1,11 @@
|
|||
cuda_sources = files([
|
||||
'gstcudabufferpool.c',
|
||||
'gstcudacontext.c',
|
||||
'gstcudaloader.c',
|
||||
'gstcudamemory.c',
|
||||
'gstcudanvrtc.c',
|
||||
'gstcudastream.c',
|
||||
'gstcudautils.c',
|
||||
'gstcudabufferpool.cpp',
|
||||
'gstcudacontext.cpp',
|
||||
'gstcudaloader.cpp',
|
||||
'gstcudamemory.cpp',
|
||||
'gstcudanvrtc.cpp',
|
||||
'gstcudastream.cpp',
|
||||
'gstcudautils.cpp',
|
||||
])
|
||||
|
||||
cuda_headers = files([
|
||||
|
@ -44,21 +44,23 @@ if host_system == 'windows'
|
|||
endforeach
|
||||
endif
|
||||
|
||||
extra_c_args = ['-DGST_USE_UNSTABLE_API']
|
||||
extra_args = ['-DGST_USE_UNSTABLE_API',
|
||||
'-DBUILDING_GST_CUDA',
|
||||
'-DG_LOG_DOMAIN="GStreamer-Cuda"']
|
||||
|
||||
if gstgl_dep.found()
|
||||
extra_c_args += ['-DHAVE_NVCODEC_GST_GL=1']
|
||||
extra_args += ['-DHAVE_NVCODEC_GST_GL=1']
|
||||
endif
|
||||
|
||||
if gstd3d11_dep.found()
|
||||
extra_c_args += ['-DGST_CUDA_HAS_D3D=1', '-DCOBJMACROS']
|
||||
extra_args += ['-DGST_CUDA_HAS_D3D']
|
||||
endif
|
||||
|
||||
pkg_name = 'gstreamer-cuda-' + api_version
|
||||
gstcuda= library('gstcuda-' + api_version,
|
||||
cuda_sources,
|
||||
c_args : gst_plugins_bad_args + extra_c_args + ['-DGST_USE_UNSTABLE_API', '-DBUILDING_GST_CUDA', '-DG_LOG_DOMAIN="GStreamer-Cuda"'],
|
||||
cpp_args : gst_plugins_bad_args,
|
||||
c_args : gst_plugins_bad_args + extra_args,
|
||||
cpp_args : gst_plugins_bad_args + extra_args,
|
||||
include_directories : [configinc, libsinc, cuda_stubinc],
|
||||
version : libversion,
|
||||
soversion : soversion,
|
||||
|
|
Loading…
Reference in a new issue