2020-01-07 08:45:22 +00:00
|
|
|
/* GStreamer
|
|
|
|
* Copyright (C) 2020 Seungha Yang <seungha.yang@navercorp.com>
|
|
|
|
* Copyright (C) 2020 Seungha Yang <seungha@centricular.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
|
|
* Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include "config.h"
|
|
|
|
#endif
|
|
|
|
|
2020-12-20 20:11:03 +00:00
|
|
|
#include "gstmfconfig.h"
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
#include <gst/gst.h>
|
|
|
|
#include "gstmftransform.h"
|
|
|
|
#include "gstmfutils.h"
|
2021-10-15 16:15:06 +00:00
|
|
|
#include "gstmfplatloader.h"
|
2020-01-07 08:45:22 +00:00
|
|
|
#include <string.h>
|
|
|
|
#include <wrl.h>
|
|
|
|
|
2021-07-03 13:56:48 +00:00
|
|
|
/* *INDENT-OFF* */
|
2020-01-07 08:45:22 +00:00
|
|
|
using namespace Microsoft::WRL;
|
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY_EXTERN (gst_mf_transform_debug);
|
|
|
|
#define GST_CAT_DEFAULT gst_mf_transform_debug
|
2020-05-25 11:59:50 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
typedef HRESULT (*GstMFTransformAsyncCallbackOnEvent) (MediaEventType event,
|
|
|
|
GstObject * client);
|
|
|
|
|
|
|
|
class GstMFTransformAsyncCallback : public IMFAsyncCallback
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
static HRESULT
|
|
|
|
CreateInstance (IMFTransform * mft,
|
|
|
|
GstMFTransformAsyncCallbackOnEvent event_cb, GstObject * client,
|
|
|
|
GstMFTransformAsyncCallback ** callback)
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
GstMFTransformAsyncCallback *self;
|
|
|
|
|
|
|
|
if (!mft || !callback)
|
|
|
|
return E_INVALIDARG;
|
|
|
|
|
|
|
|
self = new GstMFTransformAsyncCallback ();
|
|
|
|
|
|
|
|
if (!self)
|
|
|
|
return E_OUTOFMEMORY;
|
|
|
|
|
|
|
|
hr = self->Initialize (mft, event_cb, client);
|
|
|
|
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
self->Release ();
|
|
|
|
return hr;
|
|
|
|
}
|
|
|
|
|
|
|
|
*callback = self;
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
HRESULT
|
|
|
|
BeginGetEvent (void)
|
|
|
|
{
|
|
|
|
if (!gen_)
|
|
|
|
return E_FAIL;
|
|
|
|
|
|
|
|
/* we are running already */
|
|
|
|
if (running_)
|
|
|
|
return S_OK;
|
|
|
|
|
|
|
|
running_ = true;
|
|
|
|
|
|
|
|
return gen_->BeginGetEvent (this, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
HRESULT
|
|
|
|
Stop (void)
|
|
|
|
{
|
|
|
|
running_ = false;
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IUnknown */
|
|
|
|
STDMETHODIMP
|
|
|
|
QueryInterface (REFIID riid, void ** object)
|
|
|
|
{
|
|
|
|
return E_NOTIMPL;
|
|
|
|
}
|
|
|
|
|
|
|
|
STDMETHODIMP_ (ULONG)
|
|
|
|
AddRef (void)
|
|
|
|
{
|
|
|
|
GST_TRACE ("%p, %d", this, ref_count_);
|
|
|
|
return InterlockedIncrement (&ref_count_);
|
|
|
|
}
|
|
|
|
|
|
|
|
STDMETHODIMP_ (ULONG)
|
|
|
|
Release (void)
|
|
|
|
{
|
|
|
|
ULONG ref_count;
|
|
|
|
|
|
|
|
GST_TRACE ("%p, %d", this, ref_count_);
|
|
|
|
ref_count = InterlockedDecrement (&ref_count_);
|
|
|
|
|
|
|
|
if (ref_count == 0) {
|
|
|
|
GST_TRACE ("Delete instance %p", this);
|
|
|
|
delete this;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ref_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IMFAsyncCallback */
|
|
|
|
STDMETHODIMP
|
|
|
|
GetParameters (DWORD * flags, DWORD * queue)
|
|
|
|
{
|
|
|
|
/* this callback could be blocked */
|
|
|
|
*flags = MFASYNC_BLOCKING_CALLBACK;
|
|
|
|
*queue = MFASYNC_CALLBACK_QUEUE_MULTITHREADED;
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
STDMETHODIMP
|
|
|
|
Invoke (IMFAsyncResult * async_result)
|
|
|
|
{
|
|
|
|
ComPtr<IMFMediaEvent> event;
|
|
|
|
HRESULT hr;
|
|
|
|
bool do_next = true;
|
|
|
|
|
|
|
|
hr = gen_->EndGetEvent (async_result, &event);
|
|
|
|
|
|
|
|
if (!gst_mf_result (hr))
|
|
|
|
return hr;
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
MediaEventType type;
|
|
|
|
GstObject *client = nullptr;
|
|
|
|
hr = event->GetType(&type);
|
|
|
|
if (!gst_mf_result (hr))
|
|
|
|
return hr;
|
|
|
|
|
|
|
|
if (!event_cb_)
|
|
|
|
return S_OK;
|
|
|
|
|
|
|
|
client = (GstObject *) g_weak_ref_get (&client_);
|
|
|
|
if (!client)
|
|
|
|
return S_OK;
|
|
|
|
|
|
|
|
hr = event_cb_ (type, client);
|
|
|
|
gst_object_unref (client);
|
|
|
|
if (!gst_mf_result (hr))
|
|
|
|
return hr;
|
|
|
|
|
|
|
|
/* On Drain event, this callback object will stop calling BeginGetEvent()
|
|
|
|
* since there might be no more following events. Client should call
|
|
|
|
* our BeginGetEvent() method to run again */
|
|
|
|
if (type == METransformDrainComplete)
|
|
|
|
do_next = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_next)
|
|
|
|
gen_->BeginGetEvent(this, nullptr);
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
GstMFTransformAsyncCallback ()
|
|
|
|
: ref_count_ (1)
|
|
|
|
, running_ (false)
|
|
|
|
{
|
2022-01-23 17:41:29 +00:00
|
|
|
g_weak_ref_init (&client_, nullptr);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~GstMFTransformAsyncCallback ()
|
|
|
|
{
|
|
|
|
g_weak_ref_clear (&client_);
|
|
|
|
}
|
|
|
|
|
|
|
|
HRESULT
|
|
|
|
Initialize (IMFTransform * mft, GstMFTransformAsyncCallbackOnEvent event_cb,
|
|
|
|
GstObject * client)
|
|
|
|
{
|
|
|
|
HRESULT hr = mft->QueryInterface(IID_PPV_ARGS(&gen_));
|
|
|
|
|
|
|
|
if (!gst_mf_result (hr))
|
|
|
|
return hr;
|
|
|
|
|
|
|
|
event_cb_ = event_cb;
|
|
|
|
g_weak_ref_set (&client_, client);
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2021-03-22 03:34:36 +00:00
|
|
|
ULONG ref_count_;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
ComPtr<IMFMediaEventGenerator> gen_;
|
|
|
|
GstMFTransformAsyncCallbackOnEvent event_cb_;
|
|
|
|
GWeakRef client_;
|
|
|
|
|
|
|
|
bool running_;
|
|
|
|
};
|
2021-07-03 13:56:48 +00:00
|
|
|
/* *INDENT-ON* */
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
PROP_0,
|
|
|
|
PROP_DEVICE_NAME,
|
|
|
|
PROP_HARDWARE,
|
|
|
|
PROP_ENUM_PARAMS,
|
2020-12-20 20:11:03 +00:00
|
|
|
PROP_D3D11_AWARE,
|
2020-01-07 08:45:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct _GstMFTransform
|
|
|
|
{
|
|
|
|
GstObject object;
|
|
|
|
gboolean initialized;
|
|
|
|
|
|
|
|
GstMFTransformEnumParams enum_params;
|
|
|
|
|
|
|
|
gchar *device_name;
|
|
|
|
gboolean hardware;
|
2020-12-20 20:11:03 +00:00
|
|
|
gboolean d3d11_aware;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
IMFActivate *activate;
|
|
|
|
IMFTransform *transform;
|
2021-07-03 13:56:48 +00:00
|
|
|
ICodecAPI *codec_api;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
GstMFTransformAsyncCallback *callback_object;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
GQueue *output_queue;
|
|
|
|
|
|
|
|
DWORD input_id;
|
|
|
|
DWORD output_id;
|
|
|
|
|
2020-05-18 09:12:38 +00:00
|
|
|
gboolean running;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
gint pending_need_input;
|
2020-05-14 11:17:33 +00:00
|
|
|
|
|
|
|
GThread *thread;
|
|
|
|
GMutex lock;
|
|
|
|
GCond cond;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
GMutex event_lock;
|
|
|
|
GCond event_cond;
|
2020-05-14 11:17:33 +00:00
|
|
|
GMainContext *context;
|
|
|
|
GMainLoop *loop;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
gboolean draining;
|
|
|
|
gboolean flushing;
|
|
|
|
|
|
|
|
GstMFTransformNewSampleCallback callback;
|
|
|
|
gpointer user_data;
|
2020-01-07 08:45:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define gst_mf_transform_parent_class parent_class
|
|
|
|
G_DEFINE_TYPE (GstMFTransform, gst_mf_transform, GST_TYPE_OBJECT);
|
|
|
|
|
|
|
|
static void gst_mf_transform_constructed (GObject * object);
|
|
|
|
static void gst_mf_transform_finalize (GObject * object);
|
|
|
|
static void gst_mf_transform_get_property (GObject * object,
|
|
|
|
guint prop_id, GValue * value, GParamSpec * pspec);
|
|
|
|
static void gst_mf_transform_set_property (GObject * object,
|
|
|
|
guint prop_id, const GValue * value, GParamSpec * pspec);
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
static gpointer gst_mf_transform_thread_func (GstMFTransform * self);
|
|
|
|
static gboolean gst_mf_transform_close (GstMFTransform * self);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
static HRESULT gst_mf_transform_on_event (MediaEventType event,
|
|
|
|
GstMFTransform * self);
|
2020-05-14 11:17:33 +00:00
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
static void
|
|
|
|
gst_mf_transform_class_init (GstMFTransformClass * klass)
|
|
|
|
{
|
|
|
|
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
|
|
|
|
|
|
|
|
gobject_class->constructed = gst_mf_transform_constructed;
|
|
|
|
gobject_class->finalize = gst_mf_transform_finalize;
|
|
|
|
gobject_class->get_property = gst_mf_transform_get_property;
|
|
|
|
gobject_class->set_property = gst_mf_transform_set_property;
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, PROP_DEVICE_NAME,
|
|
|
|
g_param_spec_string ("device-name", "device-name",
|
2022-01-23 17:41:29 +00:00
|
|
|
"Device name", nullptr,
|
2020-01-07 08:45:22 +00:00
|
|
|
(GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)));
|
|
|
|
g_object_class_install_property (gobject_class, PROP_HARDWARE,
|
|
|
|
g_param_spec_boolean ("hardware", "Hardware",
|
|
|
|
"Whether hardware device or not", FALSE,
|
|
|
|
(GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)));
|
|
|
|
g_object_class_install_property (gobject_class, PROP_ENUM_PARAMS,
|
|
|
|
g_param_spec_pointer ("enum-params", "Enum Params",
|
|
|
|
"GstMFTransformEnumParams for MFTEnumEx",
|
|
|
|
(GParamFlags) (G_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY |
|
2021-07-03 13:56:48 +00:00
|
|
|
G_PARAM_STATIC_STRINGS)));
|
2020-12-20 20:11:03 +00:00
|
|
|
g_object_class_install_property (gobject_class, PROP_D3D11_AWARE,
|
|
|
|
g_param_spec_boolean ("d3d11-aware", "D3D11 Aware",
|
|
|
|
"Whether Direct3D11 supports Direct3D11", FALSE,
|
|
|
|
(GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)));
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_mf_transform_init (GstMFTransform * self)
|
|
|
|
{
|
|
|
|
self->output_queue = g_queue_new ();
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
g_mutex_init (&self->lock);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_mutex_init (&self->event_lock);
|
2020-05-14 11:17:33 +00:00
|
|
|
g_cond_init (&self->cond);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_cond_init (&self->event_cond);
|
2020-05-14 11:17:33 +00:00
|
|
|
|
|
|
|
self->context = g_main_context_new ();
|
|
|
|
self->loop = g_main_loop_new (self->context, FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_mf_transform_constructed (GObject * object)
|
|
|
|
{
|
|
|
|
GstMFTransform *self = GST_MF_TRANSFORM (object);
|
|
|
|
|
|
|
|
/* Create thread so that ensure COM thread can be MTA thread */
|
|
|
|
g_mutex_lock (&self->lock);
|
|
|
|
self->thread = g_thread_new ("GstMFTransform",
|
|
|
|
(GThreadFunc) gst_mf_transform_thread_func, self);
|
|
|
|
while (!g_main_loop_is_running (self->loop))
|
|
|
|
g_cond_wait (&self->cond, &self->lock);
|
|
|
|
g_mutex_unlock (&self->lock);
|
|
|
|
|
|
|
|
G_OBJECT_CLASS (parent_class)->constructed (object);
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-07-03 13:56:48 +00:00
|
|
|
gst_mf_transform_clear_enum_params (GstMFTransformEnumParams * params)
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
|
|
|
g_free (params->input_typeinfo);
|
2022-01-23 17:41:29 +00:00
|
|
|
params->input_typeinfo = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
g_free (params->output_typeinfo);
|
2022-01-23 17:41:29 +00:00
|
|
|
params->output_typeinfo = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
release_mf_sample (IMFSample * sample)
|
|
|
|
{
|
|
|
|
if (sample)
|
|
|
|
sample->Release ();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_mf_transform_finalize (GObject * object)
|
|
|
|
{
|
|
|
|
GstMFTransform *self = GST_MF_TRANSFORM (object);
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
g_main_loop_quit (self->loop);
|
|
|
|
g_thread_join (self->thread);
|
|
|
|
g_main_loop_unref (self->loop);
|
|
|
|
g_main_context_unref (self->context);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
g_queue_free_full (self->output_queue, (GDestroyNotify) release_mf_sample);
|
2020-01-07 08:45:22 +00:00
|
|
|
gst_mf_transform_clear_enum_params (&self->enum_params);
|
|
|
|
g_free (self->device_name);
|
2020-05-14 11:17:33 +00:00
|
|
|
g_mutex_clear (&self->lock);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_mutex_clear (&self->event_lock);
|
2020-05-14 11:17:33 +00:00
|
|
|
g_cond_clear (&self->cond);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_cond_clear (&self->event_cond);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
G_OBJECT_CLASS (parent_class)->finalize (object);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_mf_transform_get_property (GObject * object, guint prop_id,
|
|
|
|
GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstMFTransform *self = GST_MF_TRANSFORM (object);
|
|
|
|
|
|
|
|
switch (prop_id) {
|
|
|
|
case PROP_DEVICE_NAME:
|
|
|
|
g_value_set_string (value, self->device_name);
|
|
|
|
break;
|
|
|
|
case PROP_HARDWARE:
|
|
|
|
g_value_set_boolean (value, self->hardware);
|
|
|
|
break;
|
2020-12-20 20:11:03 +00:00
|
|
|
case PROP_D3D11_AWARE:
|
|
|
|
g_value_set_boolean (value, self->d3d11_aware);
|
|
|
|
break;
|
2020-01-07 08:45:22 +00:00
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_mf_transform_set_property (GObject * object, guint prop_id,
|
|
|
|
const GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstMFTransform *self = GST_MF_TRANSFORM (object);
|
|
|
|
|
|
|
|
switch (prop_id) {
|
|
|
|
case PROP_ENUM_PARAMS:
|
|
|
|
{
|
|
|
|
GstMFTransformEnumParams *params;
|
|
|
|
params = (GstMFTransformEnumParams *) g_value_get_pointer (value);
|
|
|
|
|
|
|
|
gst_mf_transform_clear_enum_params (&self->enum_params);
|
|
|
|
self->enum_params.category = params->category;
|
|
|
|
self->enum_params.enum_flags = params->enum_flags;
|
|
|
|
self->enum_params.device_index = params->device_index;
|
2020-12-20 20:11:03 +00:00
|
|
|
self->enum_params.adapter_luid = params->adapter_luid;
|
2020-01-07 08:45:22 +00:00
|
|
|
if (params->input_typeinfo) {
|
|
|
|
self->enum_params.input_typeinfo = g_new0 (MFT_REGISTER_TYPE_INFO, 1);
|
|
|
|
memcpy (self->enum_params.input_typeinfo, params->input_typeinfo,
|
|
|
|
sizeof (MFT_REGISTER_TYPE_INFO));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->output_typeinfo) {
|
|
|
|
self->enum_params.output_typeinfo = g_new0 (MFT_REGISTER_TYPE_INFO, 1);
|
|
|
|
memcpy (self->enum_params.output_typeinfo, params->output_typeinfo,
|
|
|
|
sizeof (MFT_REGISTER_TYPE_INFO));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
static gboolean
|
|
|
|
gst_mf_transform_main_loop_running_cb (GstMFTransform * self)
|
|
|
|
{
|
|
|
|
GST_TRACE_OBJECT (self, "Main loop running now");
|
|
|
|
|
|
|
|
g_mutex_lock (&self->lock);
|
|
|
|
g_cond_signal (&self->cond);
|
|
|
|
g_mutex_unlock (&self->lock);
|
|
|
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gpointer
|
|
|
|
gst_mf_transform_thread_func (GstMFTransform * self)
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
2020-12-20 20:11:03 +00:00
|
|
|
HRESULT hr = S_OK;
|
2022-01-23 17:41:29 +00:00
|
|
|
IMFActivate **devices = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
UINT32 num_devices, i;
|
2022-01-23 17:41:29 +00:00
|
|
|
LPWSTR name = nullptr;
|
2020-05-14 11:17:33 +00:00
|
|
|
GSource *source;
|
|
|
|
|
2022-01-23 17:41:29 +00:00
|
|
|
CoInitializeEx (nullptr, COINIT_MULTITHREADED);
|
2020-05-14 11:17:33 +00:00
|
|
|
|
|
|
|
g_main_context_push_thread_default (self->context);
|
|
|
|
|
|
|
|
source = g_idle_source_new ();
|
|
|
|
g_source_set_callback (source,
|
2022-01-23 17:41:29 +00:00
|
|
|
(GSourceFunc) gst_mf_transform_main_loop_running_cb, self, nullptr);
|
2020-05-14 11:17:33 +00:00
|
|
|
g_source_attach (source, self->context);
|
|
|
|
g_source_unref (source);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2020-12-20 20:11:03 +00:00
|
|
|
/* NOTE: MFTEnum2 is desktop only and requires Windows 10 */
|
|
|
|
#if GST_MF_HAVE_D3D11
|
2021-10-15 16:15:06 +00:00
|
|
|
if (gst_mf_plat_load_library () && self->enum_params.adapter_luid &&
|
2020-12-20 20:11:03 +00:00
|
|
|
(self->enum_params.enum_flags & MFT_ENUM_FLAG_HARDWARE) != 0) {
|
2021-07-03 13:56:48 +00:00
|
|
|
ComPtr < IMFAttributes > attr;
|
2020-12-20 20:11:03 +00:00
|
|
|
LUID luid;
|
|
|
|
|
|
|
|
hr = MFCreateAttributes (&attr, 1);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't create IMFAttributes");
|
|
|
|
goto run_loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
GST_INFO_OBJECT (self,
|
|
|
|
"Enumerating MFT for adapter-luid %" G_GINT64_FORMAT,
|
|
|
|
self->enum_params.adapter_luid);
|
|
|
|
|
|
|
|
luid.LowPart = (DWORD) (self->enum_params.adapter_luid & 0xffffffff);
|
|
|
|
luid.HighPart = (LONG) (self->enum_params.adapter_luid >> 32);
|
|
|
|
|
2021-07-03 13:56:48 +00:00
|
|
|
hr = attr->SetBlob (GST_GUID_MFT_ENUM_ADAPTER_LUID, (BYTE *) & luid,
|
2020-12-20 20:11:03 +00:00
|
|
|
sizeof (LUID));
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't set MFT_ENUM_ADAPTER_LUID");
|
|
|
|
goto run_loop;
|
|
|
|
}
|
|
|
|
|
2021-10-15 16:15:06 +00:00
|
|
|
hr = GstMFTEnum2 (self->enum_params.category,
|
2020-12-20 20:11:03 +00:00
|
|
|
self->enum_params.enum_flags, self->enum_params.input_typeinfo,
|
|
|
|
self->enum_params.output_typeinfo, attr.Get (), &devices, &num_devices);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
hr = MFTEnumEx (self->enum_params.category, self->enum_params.enum_flags,
|
|
|
|
self->enum_params.input_typeinfo, self->enum_params.output_typeinfo,
|
|
|
|
&devices, &num_devices);
|
|
|
|
}
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_WARNING_OBJECT (self, "MFTEnumEx failure");
|
2020-05-14 11:17:33 +00:00
|
|
|
goto run_loop;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (num_devices == 0 || self->enum_params.device_index >= num_devices) {
|
|
|
|
GST_WARNING_OBJECT (self, "No available device at index %d",
|
|
|
|
self->enum_params.device_index);
|
2020-05-14 11:17:33 +00:00
|
|
|
for (i = 0; i < num_devices; i++)
|
2020-01-07 08:45:22 +00:00
|
|
|
devices[i]->Release ();
|
|
|
|
|
|
|
|
CoTaskMemFree (devices);
|
2020-05-14 11:17:33 +00:00
|
|
|
goto run_loop;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self->activate = devices[self->enum_params.device_index];
|
|
|
|
self->activate->AddRef ();
|
|
|
|
|
|
|
|
for (i = 0; i < num_devices; i++)
|
|
|
|
devices[i]->Release ();
|
|
|
|
|
|
|
|
hr = self->activate->GetAllocatedString (MFT_FRIENDLY_NAME_Attribute,
|
2022-01-23 17:41:29 +00:00
|
|
|
&name, nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (gst_mf_result (hr)) {
|
|
|
|
self->device_name = g_utf16_to_utf8 ((const gunichar2 *) name,
|
2022-01-23 17:41:29 +00:00
|
|
|
-1, nullptr, nullptr, nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
GST_INFO_OBJECT (self, "Open device %s", self->device_name);
|
2020-05-14 11:17:33 +00:00
|
|
|
CoTaskMemFree (name);
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CoTaskMemFree (devices);
|
|
|
|
|
2022-01-27 16:45:30 +00:00
|
|
|
if ((self->enum_params.enum_flags & MFT_ENUM_FLAG_HARDWARE) != 0)
|
|
|
|
self->hardware = TRUE;
|
|
|
|
else
|
|
|
|
self->hardware = FALSE;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
self->initialized = TRUE;
|
2020-05-14 11:17:33 +00:00
|
|
|
|
|
|
|
run_loop:
|
|
|
|
GST_TRACE_OBJECT (self, "Starting main loop");
|
|
|
|
g_main_loop_run (self->loop);
|
|
|
|
GST_TRACE_OBJECT (self, "Stopped main loop");
|
|
|
|
|
|
|
|
g_main_context_pop_thread_default (self->context);
|
|
|
|
|
|
|
|
/* cleanup internal COM object here */
|
|
|
|
gst_mf_transform_close (self);
|
|
|
|
|
|
|
|
if (self->activate) {
|
|
|
|
self->activate->Release ();
|
2022-01-23 17:41:29 +00:00
|
|
|
self->activate = nullptr;
|
2020-05-14 11:17:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CoUninitialize ();
|
|
|
|
|
2022-01-23 17:41:29 +00:00
|
|
|
return nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static GstFlowReturn
|
|
|
|
gst_mf_transform_process_output (GstMFTransform * self)
|
|
|
|
{
|
|
|
|
DWORD status;
|
|
|
|
HRESULT hr;
|
|
|
|
IMFTransform *transform = self->transform;
|
|
|
|
DWORD stream_id = self->output_id;
|
|
|
|
MFT_OUTPUT_STREAM_INFO out_stream_info = { 0 };
|
|
|
|
MFT_OUTPUT_DATA_BUFFER out_data = { 0 };
|
2020-05-25 10:22:50 +00:00
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2020-06-08 10:22:07 +00:00
|
|
|
GST_TRACE_OBJECT (self, "Process output");
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
hr = transform->GetOutputStreamInfo (stream_id, &out_stream_info);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't get output stream info");
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((out_stream_info.dwFlags & (MFT_OUTPUT_STREAM_PROVIDES_SAMPLES |
|
|
|
|
MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES)) == 0) {
|
2021-07-03 13:56:48 +00:00
|
|
|
ComPtr < IMFMediaBuffer > buffer;
|
|
|
|
ComPtr < IMFSample > new_sample;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2022-01-26 17:26:02 +00:00
|
|
|
hr = MFCreateMemoryBuffer (out_stream_info.cbSize, &buffer);
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't create memory buffer");
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
}
|
|
|
|
|
2022-01-26 17:26:02 +00:00
|
|
|
hr = MFCreateSample (&new_sample);
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't create sample");
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = new_sample->AddBuffer (buffer.Get ());
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't add buffer to sample");
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_data.pSample = new_sample.Detach ();
|
|
|
|
}
|
|
|
|
|
|
|
|
out_data.dwStreamID = stream_id;
|
|
|
|
|
|
|
|
hr = transform->ProcessOutput (0, 1, &out_data, &status);
|
|
|
|
|
|
|
|
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
|
|
|
|
GST_LOG_OBJECT (self, "Need more input data");
|
2020-05-25 10:22:50 +00:00
|
|
|
ret = GST_MF_TRANSFORM_FLOW_NEED_DATA;
|
2020-01-07 08:45:22 +00:00
|
|
|
} else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
|
2021-07-03 13:56:48 +00:00
|
|
|
ComPtr < IMFMediaType > output_type;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (self, "Stream change, set output type again");
|
|
|
|
|
2022-01-26 17:26:02 +00:00
|
|
|
hr = transform->GetOutputAvailableType (stream_id, 0, &output_type);
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't get available output type");
|
2020-05-25 10:22:50 +00:00
|
|
|
ret = GST_FLOW_ERROR;
|
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hr = transform->SetOutputType (stream_id, output_type.Get (), 0);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (self, "Couldn't set output type");
|
2020-05-25 10:22:50 +00:00
|
|
|
ret = GST_FLOW_ERROR;
|
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
2020-05-25 10:22:50 +00:00
|
|
|
ret = GST_MF_TRANSFORM_FLOW_NEED_DATA;
|
2020-01-07 08:45:22 +00:00
|
|
|
} else if (!gst_mf_result (hr)) {
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (self->flushing) {
|
|
|
|
GST_DEBUG_OBJECT (self, "Ignore error on flushing");
|
|
|
|
ret = GST_FLOW_FLUSHING;
|
|
|
|
} else {
|
|
|
|
GST_ERROR_OBJECT (self, "ProcessOutput error, hr 0x%x", hr);
|
|
|
|
ret = GST_FLOW_ERROR;
|
|
|
|
}
|
2020-05-25 10:22:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (ret != GST_FLOW_OK) {
|
2020-01-07 08:45:22 +00:00
|
|
|
if (out_data.pSample)
|
2021-07-03 13:56:48 +00:00
|
|
|
out_data.pSample->Release ();
|
2020-05-25 10:22:50 +00:00
|
|
|
|
|
|
|
return ret;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!out_data.pSample) {
|
|
|
|
GST_WARNING_OBJECT (self, "No output sample");
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (self->callback) {
|
|
|
|
self->callback (self, out_data.pSample, self->user_data);
|
|
|
|
out_data.pSample->Release ();
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
g_queue_push_tail (self->output_queue, out_data.pSample);
|
|
|
|
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
/* Must be called with event_lock */
|
2020-01-07 08:45:22 +00:00
|
|
|
static gboolean
|
2021-07-03 13:56:48 +00:00
|
|
|
gst_mf_transform_process_input_sync (GstMFTransform * self, IMFSample * sample)
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
hr = self->transform->ProcessInput (self->output_id, sample, 0);
|
|
|
|
|
|
|
|
if (self->hardware)
|
|
|
|
self->pending_need_input--;
|
|
|
|
|
|
|
|
return gst_mf_result (hr);
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
2021-07-03 13:56:48 +00:00
|
|
|
gst_mf_transform_process_input (GstMFTransform * object, IMFSample * sample)
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
|
|
|
HRESULT hr;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
gboolean ret = FALSE;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (sample != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2020-06-08 10:22:07 +00:00
|
|
|
GST_TRACE_OBJECT (object, "Process input");
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!object->transform)
|
|
|
|
return FALSE;
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_mutex_lock (&object->event_lock);
|
2020-05-18 09:12:38 +00:00
|
|
|
if (!object->running) {
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
object->pending_need_input = 0;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
hr = object->transform->ProcessMessage (MFT_MESSAGE_NOTIFY_START_OF_STREAM,
|
|
|
|
0);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "Cannot post start-of-stream message");
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hr = object->transform->ProcessMessage (MFT_MESSAGE_NOTIFY_BEGIN_STREAMING,
|
|
|
|
0);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "Cannot post begin-stream message");
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (object->callback_object) {
|
|
|
|
hr = object->callback_object->BeginGetEvent ();
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "BeginGetEvent failed");
|
|
|
|
goto done;
|
|
|
|
}
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
2020-06-08 10:22:07 +00:00
|
|
|
GST_DEBUG_OBJECT (object, "MFT is running now");
|
|
|
|
|
2020-05-18 09:12:38 +00:00
|
|
|
object->running = TRUE;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
object->flushing = FALSE;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
/* Wait METransformNeedInput event. While waiting METransformNeedInput
|
|
|
|
* event, we can still output data if MFT notifyes METransformHaveOutput
|
|
|
|
* event. */
|
2020-01-07 08:45:22 +00:00
|
|
|
if (object->hardware) {
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
while (object->pending_need_input == 0 && !object->flushing)
|
|
|
|
g_cond_wait (&object->event_cond, &object->event_lock);
|
|
|
|
}
|
2020-06-08 10:22:07 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (object->flushing) {
|
|
|
|
GST_DEBUG_OBJECT (object, "We are flushing");
|
|
|
|
ret = TRUE;
|
|
|
|
goto done;
|
|
|
|
}
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
ret = gst_mf_transform_process_input_sync (object, sample);
|
2020-06-08 14:46:43 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
done:
|
|
|
|
g_mutex_unlock (&object->event_lock);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
return ret;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GstFlowReturn
|
2021-07-03 13:56:48 +00:00
|
|
|
gst_mf_transform_get_output (GstMFTransform * object, IMFSample ** sample)
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
GstFlowReturn ret;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), GST_FLOW_ERROR);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (sample != nullptr, GST_FLOW_ERROR);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
/* Hardware MFT must not call this method, instead client must install
|
|
|
|
* new sample callback so that outputting data from Media Foundation's
|
|
|
|
* worker thread */
|
|
|
|
g_return_val_if_fail (!object->hardware, GST_FLOW_ERROR);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->transform)
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
ret = gst_mf_transform_process_output (object);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (ret != GST_MF_TRANSFORM_FLOW_NEED_DATA && ret != GST_FLOW_OK)
|
|
|
|
return ret;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (g_queue_is_empty (object->output_queue))
|
|
|
|
return GST_MF_TRANSFORM_FLOW_NEED_DATA;
|
|
|
|
|
|
|
|
*sample = (IMFSample *) g_queue_pop_head (object->output_queue);
|
|
|
|
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_flush (GstMFTransform * object)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
g_mutex_lock (&object->event_lock);
|
|
|
|
object->flushing = TRUE;
|
|
|
|
g_cond_broadcast (&object->event_cond);
|
|
|
|
g_mutex_unlock (&object->event_lock);
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
if (object->transform) {
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
/* In case of async MFT, there would be no more event after FLUSH,
|
|
|
|
* then callback object shouldn't wait another event.
|
|
|
|
* Call Stop() so that our callback object can stop calling BeginGetEvent()
|
|
|
|
* from it's Invoke() method */
|
|
|
|
if (object->callback_object)
|
|
|
|
object->callback_object->Stop ();
|
|
|
|
|
|
|
|
if (object->running) {
|
2020-01-07 08:45:22 +00:00
|
|
|
object->transform->ProcessMessage (MFT_MESSAGE_COMMAND_FLUSH, 0);
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
}
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
object->pending_need_input = 0;
|
|
|
|
}
|
|
|
|
|
2020-05-18 09:12:38 +00:00
|
|
|
object->running = FALSE;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
while (!g_queue_is_empty (object->output_queue)) {
|
|
|
|
IMFSample *sample = (IMFSample *) g_queue_pop_head (object->output_queue);
|
|
|
|
sample->Release ();
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_drain (GstMFTransform * object)
|
|
|
|
{
|
|
|
|
GstFlowReturn ret;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
2020-06-06 12:01:24 +00:00
|
|
|
if (!object->transform || !object->running)
|
2020-01-07 08:45:22 +00:00
|
|
|
return TRUE;
|
|
|
|
|
2020-05-18 09:12:38 +00:00
|
|
|
object->running = FALSE;
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
object->draining = TRUE;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
GST_DEBUG_OBJECT (object, "Start drain");
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
object->transform->ProcessMessage (MFT_MESSAGE_COMMAND_DRAIN, 0);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (object->hardware) {
|
|
|
|
g_mutex_lock (&object->event_lock);
|
|
|
|
while (object->draining)
|
|
|
|
g_cond_wait (&object->event_cond, &object->event_lock);
|
|
|
|
g_mutex_unlock (&object->event_lock);
|
2020-01-07 08:45:22 +00:00
|
|
|
} else {
|
|
|
|
do {
|
|
|
|
ret = gst_mf_transform_process_output (object);
|
|
|
|
} while (ret == GST_FLOW_OK);
|
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
GST_DEBUG_OBJECT (object, "End drain");
|
|
|
|
|
|
|
|
object->draining = FALSE;
|
|
|
|
object->pending_need_input = 0;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
typedef struct
|
2020-01-07 08:45:22 +00:00
|
|
|
{
|
2020-05-14 11:17:33 +00:00
|
|
|
GstMFTransform *object;
|
|
|
|
gboolean invoked;
|
|
|
|
gboolean ret;
|
|
|
|
} GstMFTransformOpenData;
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
gst_mf_transform_open_internal (GstMFTransformOpenData * data)
|
|
|
|
{
|
|
|
|
GstMFTransform *object = data->object;
|
2020-01-07 08:45:22 +00:00
|
|
|
HRESULT hr;
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
data->ret = FALSE;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
gst_mf_transform_close (object);
|
2021-07-03 15:36:27 +00:00
|
|
|
hr = object->activate->ActivateObject (IID_PPV_ARGS (&object->transform));
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_WARNING_OBJECT (object, "Couldn't open MFT");
|
2020-05-14 11:17:33 +00:00
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (object->hardware) {
|
2021-07-03 13:56:48 +00:00
|
|
|
ComPtr < IMFAttributes > attr;
|
2020-12-20 20:11:03 +00:00
|
|
|
UINT32 supports_d3d11 = 0;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2022-01-26 17:26:02 +00:00
|
|
|
hr = object->transform->GetAttributes (&attr);
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "Couldn't get attribute object");
|
2020-05-14 11:17:33 +00:00
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hr = attr->SetUINT32 (MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "MF_TRANSFORM_ASYNC_UNLOCK error");
|
2020-05-14 11:17:33 +00:00
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
2020-12-20 20:11:03 +00:00
|
|
|
hr = attr->GetUINT32 (GST_GUID_MF_SA_D3D11_AWARE, &supports_d3d11);
|
|
|
|
if (gst_mf_result (hr) && supports_d3d11 != 0) {
|
|
|
|
GST_DEBUG_OBJECT (object, "MFT supports direct3d11");
|
|
|
|
object->d3d11_aware = TRUE;
|
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
/* Create our IMFAsyncCallback object so that listen METransformNeedInput
|
|
|
|
* and METransformHaveOutput events. The event callback will be called from
|
|
|
|
* Media Foundation's worker queue thread */
|
|
|
|
hr = GstMFTransformAsyncCallback::CreateInstance (object->transform,
|
|
|
|
(GstMFTransformAsyncCallbackOnEvent) gst_mf_transform_on_event,
|
|
|
|
GST_OBJECT_CAST (object), &object->callback_object);
|
|
|
|
|
|
|
|
if (!object->callback_object) {
|
2020-01-07 08:45:22 +00:00
|
|
|
GST_ERROR_OBJECT (object, "IMFMediaEventGenerator unavailable");
|
2020-05-14 11:17:33 +00:00
|
|
|
goto done;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = object->transform->GetStreamIDs (1, &object->input_id, 1,
|
|
|
|
&object->output_id);
|
|
|
|
if (hr == E_NOTIMPL) {
|
|
|
|
object->input_id = 0;
|
|
|
|
object->output_id = 0;
|
|
|
|
}
|
|
|
|
|
2021-07-03 15:36:27 +00:00
|
|
|
hr = object->transform->QueryInterface (IID_PPV_ARGS (&object->codec_api));
|
2020-01-07 08:45:22 +00:00
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_WARNING_OBJECT (object, "ICodecAPI is unavailable");
|
|
|
|
}
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
data->ret = TRUE;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
done:
|
|
|
|
if (!data->ret)
|
|
|
|
gst_mf_transform_close (object);
|
|
|
|
|
|
|
|
g_mutex_lock (&object->lock);
|
|
|
|
data->invoked = TRUE;
|
|
|
|
g_cond_broadcast (&object->cond);
|
|
|
|
g_mutex_unlock (&object->lock);
|
|
|
|
|
|
|
|
return G_SOURCE_REMOVE;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
2020-05-14 11:17:33 +00:00
|
|
|
gst_mf_transform_open (GstMFTransform * object)
|
|
|
|
{
|
|
|
|
GstMFTransformOpenData data;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (object->activate != nullptr, FALSE);
|
2020-05-14 11:17:33 +00:00
|
|
|
|
|
|
|
data.object = object;
|
|
|
|
data.invoked = FALSE;
|
|
|
|
data.ret = FALSE;
|
|
|
|
|
|
|
|
g_main_context_invoke (object->context,
|
|
|
|
(GSourceFunc) gst_mf_transform_open_internal, &data);
|
|
|
|
|
|
|
|
g_mutex_lock (&object->lock);
|
|
|
|
while (!data.invoked)
|
|
|
|
g_cond_wait (&object->cond, &object->lock);
|
|
|
|
g_mutex_unlock (&object->lock);
|
|
|
|
|
|
|
|
return data.ret;
|
|
|
|
}
|
|
|
|
|
2020-12-20 20:11:03 +00:00
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_device_manager (GstMFTransform * object,
|
|
|
|
IMFDXGIDeviceManager * manager)
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
|
|
|
if (!object->transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "IMFTransform is not configured yet");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = object->transform->ProcessMessage (MFT_MESSAGE_SET_D3D_MANAGER,
|
|
|
|
(ULONG_PTR) manager);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
GST_ERROR_OBJECT (object, "Couldn't set device manager");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
void
|
|
|
|
gst_mf_transform_set_new_sample_callback (GstMFTransform * object,
|
|
|
|
GstMFTransformNewSampleCallback callback, gpointer user_data)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_MF_TRANSFORM (object));
|
|
|
|
|
|
|
|
object->callback = callback;
|
|
|
|
object->user_data = user_data;
|
|
|
|
}
|
|
|
|
|
2020-05-14 11:17:33 +00:00
|
|
|
static gboolean
|
2020-01-07 08:45:22 +00:00
|
|
|
gst_mf_transform_close (GstMFTransform * object)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
|
|
|
gst_mf_transform_flush (object);
|
|
|
|
|
2021-03-16 08:56:51 +00:00
|
|
|
/* Otherwise IMFTransform will be alive even after we release the IMFTransform
|
|
|
|
* below */
|
|
|
|
if (object->activate)
|
|
|
|
object->activate->ShutdownObject ();
|
|
|
|
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
if (object->callback_object) {
|
|
|
|
object->callback_object->Release ();
|
|
|
|
object->callback_object = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (object->codec_api) {
|
|
|
|
object->codec_api->Release ();
|
2022-01-23 17:41:29 +00:00
|
|
|
object->codec_api = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (object->transform) {
|
|
|
|
object->transform->Release ();
|
2022-01-23 17:41:29 +00:00
|
|
|
object->transform = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2021-10-15 14:18:41 +00:00
|
|
|
static const gchar *
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
gst_mf_transform_event_type_to_string (MediaEventType event)
|
|
|
|
{
|
|
|
|
switch (event) {
|
|
|
|
case METransformNeedInput:
|
|
|
|
return "METransformNeedInput";
|
|
|
|
case METransformHaveOutput:
|
|
|
|
return "METransformHaveOutput";
|
|
|
|
case METransformDrainComplete:
|
|
|
|
return "METransformDrainComplete";
|
|
|
|
case METransformMarker:
|
|
|
|
return "METransformMarker";
|
|
|
|
case METransformInputStreamStateChanged:
|
|
|
|
return "METransformInputStreamStateChanged";
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "Unknown";
|
|
|
|
}
|
|
|
|
|
|
|
|
static HRESULT
|
2021-07-03 13:56:48 +00:00
|
|
|
gst_mf_transform_on_event (MediaEventType event, GstMFTransform * self)
|
mfvideoenc: Improve latency performance for hardware encoder
Unlike software MFT (Media Foundation Transform) which is synchronous
in terms of processing input and output data, hardware MFT works
in asynchronous mode. output data might not be available right after
we pushed one input data into MFT.
Note that async MFT will fire two events, one is "METransformNeedInput"
which happens when MFT can accept more input data,
and the other is "METransformHaveOutput", that's for signaling
there's pending data which can be outputted immediately.
To listen the events, we can wait synchronously via
IMFMediaEventGenerator::GetEvent() or make use of IMFAsyncCallback
object which is asynchronous way and the event will be notified
from Media Foundation's internal worker queue thread.
To handle such asynchronous operation, previous working flow was
as follows (IMFMediaEventGenerator::GetEvent() was used for now)
- Check if there is pending output data and push the data toward downstream.
- Pulling events (from streaming thread) until there's at least
one pending "METransformNeedInput" event
- Then, push one data into MFT from streaming thread
- Check if there is pending "METransformHaveOutput" again.
If there is, push new output data to downstream
(unlikely there is pending output data at this moment)
Above flow was processed from upstream streaming thread. That means
even if there's available output data, it could be outputted later
when the next buffer is pushed from upstream streaming thread.
It would introduce at least one frame latency in case of live stream.
To reduce such latency, this commit modifies the flow to be fully
asynchronous like hardware MFT was designed and to be able to
output encoded data whenever it's available. More specifically,
IMFAsyncCallback object will be used for handling
"METransformNeedInput" and "METransformHaveOutput" events from
Media Foundation's internal thread, and new output data will be
also outputted from the Media Foundation's thread.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1520>
2020-08-18 18:19:26 +00:00
|
|
|
{
|
|
|
|
GST_TRACE_OBJECT (self, "Have event %s (%d)",
|
|
|
|
gst_mf_transform_event_type_to_string (event), (gint) event);
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case METransformNeedInput:
|
|
|
|
g_mutex_lock (&self->event_lock);
|
|
|
|
self->pending_need_input++;
|
|
|
|
g_cond_broadcast (&self->event_cond);
|
|
|
|
g_mutex_unlock (&self->event_lock);
|
|
|
|
break;
|
|
|
|
case METransformHaveOutput:
|
|
|
|
gst_mf_transform_process_output (self);
|
|
|
|
break;
|
|
|
|
case METransformDrainComplete:
|
|
|
|
g_mutex_lock (&self->event_lock);
|
|
|
|
self->draining = FALSE;
|
|
|
|
g_cond_broadcast (&self->event_cond);
|
|
|
|
g_mutex_unlock (&self->event_lock);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
IMFActivate *
|
|
|
|
gst_mf_transform_get_activate_handle (GstMFTransform * object)
|
|
|
|
{
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
return object->activate;
|
|
|
|
}
|
|
|
|
|
|
|
|
IMFTransform *
|
|
|
|
gst_mf_transform_get_transform_handle (GstMFTransform * object)
|
|
|
|
{
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->transform) {
|
|
|
|
GST_WARNING_OBJECT (object,
|
|
|
|
"IMFTransform is not configured, open MFT first");
|
2022-01-23 17:41:29 +00:00
|
|
|
return nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return object->transform;
|
|
|
|
}
|
|
|
|
|
|
|
|
ICodecAPI *
|
|
|
|
gst_mf_transform_get_codec_api_handle (GstMFTransform * object)
|
|
|
|
{
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->codec_api) {
|
2021-07-03 13:56:48 +00:00
|
|
|
GST_WARNING_OBJECT (object, "ICodecAPI is not configured, open MFT first");
|
2022-01-23 17:41:29 +00:00
|
|
|
return nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return object->codec_api;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_get_input_available_types (GstMFTransform * object,
|
|
|
|
GList ** input_types)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
DWORD index = 0;
|
2022-01-23 17:41:29 +00:00
|
|
|
GList *list = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (input_types != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
2022-01-23 17:41:29 +00:00
|
|
|
IMFMediaType *type = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
hr = transform->GetInputAvailableType (object->input_id, index, &type);
|
|
|
|
if (SUCCEEDED (hr))
|
|
|
|
list = g_list_append (list, type);
|
|
|
|
|
|
|
|
index++;
|
|
|
|
} while (SUCCEEDED (hr));
|
|
|
|
|
2022-01-27 16:45:30 +00:00
|
|
|
if (!list)
|
|
|
|
return FALSE;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
*input_types = list;
|
|
|
|
|
2022-01-27 16:45:30 +00:00
|
|
|
return TRUE;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_get_output_available_types (GstMFTransform * object,
|
|
|
|
GList ** output_types)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
DWORD index = 0;
|
2022-01-23 17:41:29 +00:00
|
|
|
GList *list = nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (output_types != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
IMFMediaType *type;
|
|
|
|
|
|
|
|
hr = transform->GetOutputAvailableType (object->input_id, index, &type);
|
|
|
|
if (SUCCEEDED (hr))
|
|
|
|
list = g_list_append (list, type);
|
|
|
|
|
|
|
|
index++;
|
|
|
|
} while (SUCCEEDED (hr));
|
|
|
|
|
2022-01-27 16:45:30 +00:00
|
|
|
if (!list)
|
|
|
|
return FALSE;
|
|
|
|
|
2020-01-07 08:45:22 +00:00
|
|
|
*output_types = list;
|
|
|
|
|
2022-01-27 16:45:30 +00:00
|
|
|
return TRUE;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_input_type (GstMFTransform * object,
|
|
|
|
IMFMediaType * input_type)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = transform->SetInputType (object->input_id, input_type, 0);
|
|
|
|
if (!gst_mf_result (hr))
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_output_type (GstMFTransform * object,
|
|
|
|
IMFMediaType * output_type)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = transform->SetOutputType (object->output_id, output_type, 0);
|
|
|
|
if (!gst_mf_result (hr)) {
|
2020-05-18 09:12:38 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_get_input_current_type (GstMFTransform * object,
|
|
|
|
IMFMediaType ** input_type)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (input_type != nullptr, FALSE);
|
2020-05-18 09:12:38 +00:00
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = transform->GetInputCurrentType (object->input_id, input_type);
|
|
|
|
if (!gst_mf_result (hr)) {
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_get_output_current_type (GstMFTransform * object,
|
|
|
|
IMFMediaType ** output_type)
|
|
|
|
{
|
|
|
|
IMFTransform *transform;
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (output_type != nullptr, FALSE);
|
2020-05-18 09:12:38 +00:00
|
|
|
|
|
|
|
transform = object->transform;
|
|
|
|
|
|
|
|
if (!transform) {
|
|
|
|
GST_ERROR_OBJECT (object, "Should open first");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
hr = transform->GetOutputCurrentType (object->output_id, output_type);
|
|
|
|
if (!gst_mf_result (hr)) {
|
2020-01-07 08:45:22 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
GstMFTransform *
|
|
|
|
gst_mf_transform_new (GstMFTransformEnumParams * params)
|
|
|
|
{
|
|
|
|
GstMFTransform *self;
|
|
|
|
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (params != nullptr, nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
self = (GstMFTransform *) g_object_new (GST_TYPE_MF_TRANSFORM_OBJECT,
|
2022-01-23 17:41:29 +00:00
|
|
|
"enum-params", params, nullptr);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!self->initialized) {
|
|
|
|
gst_object_unref (self);
|
2022-01-23 17:41:29 +00:00
|
|
|
return nullptr;
|
2020-01-07 08:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
gst_object_ref_sink (self);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_codec_api_uint32 (GstMFTransform * object,
|
|
|
|
const GUID * api, guint32 value)
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
VARIANT var;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (api != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->codec_api) {
|
|
|
|
GST_WARNING_OBJECT (object, "codec api unavailable");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
VariantInit (&var);
|
|
|
|
var.vt = VT_UI4;
|
|
|
|
var.ulVal = value;
|
|
|
|
|
|
|
|
hr = object->codec_api->SetValue (api, &var);
|
|
|
|
VariantClear (&var);
|
|
|
|
|
|
|
|
return gst_mf_result (hr);
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_codec_api_uint64 (GstMFTransform * object,
|
|
|
|
const GUID * api, guint64 value)
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
VARIANT var;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (api != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->codec_api) {
|
|
|
|
GST_WARNING_OBJECT (object, "codec api unavailable");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
VariantInit (&var);
|
|
|
|
var.vt = VT_UI8;
|
|
|
|
var.ullVal = value;
|
|
|
|
|
|
|
|
hr = object->codec_api->SetValue (api, &var);
|
|
|
|
VariantClear (&var);
|
|
|
|
|
|
|
|
return gst_mf_result (hr);
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_mf_transform_set_codec_api_boolean (GstMFTransform * object,
|
|
|
|
const GUID * api, gboolean value)
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
|
|
|
VARIANT var;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_MF_TRANSFORM (object), FALSE);
|
2022-01-23 17:41:29 +00:00
|
|
|
g_return_val_if_fail (api != nullptr, FALSE);
|
2020-01-07 08:45:22 +00:00
|
|
|
|
|
|
|
if (!object->codec_api) {
|
|
|
|
GST_WARNING_OBJECT (object, "codec api unavailable");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
VariantInit (&var);
|
|
|
|
var.vt = VT_BOOL;
|
|
|
|
var.boolVal = value ? VARIANT_TRUE : VARIANT_FALSE;
|
|
|
|
|
|
|
|
hr = object->codec_api->SetValue (api, &var);
|
|
|
|
VariantClear (&var);
|
|
|
|
|
|
|
|
return gst_mf_result (hr);
|
|
|
|
}
|