2020-05-25 20:17:41 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
|
|
|
|
* Copyright (C) 2018 Centricular Ltd.
|
|
|
|
* Author: Nirbheek Chauhan <nirbheek@centricular.com>
|
|
|
|
* Copyright (C) 2020 Seungha Yang <seungha@centricular.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
|
|
* Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* SECTION:element-wasapi2src
|
|
|
|
* @title: wasapi2src
|
|
|
|
*
|
|
|
|
* Provides audio capture from the Windows Audio Session API available with
|
|
|
|
* Windows 10.
|
|
|
|
*
|
|
|
|
* ## Example pipelines
|
|
|
|
* |[
|
2021-06-06 08:28:56 +00:00
|
|
|
* gst-launch-1.0 -v wasapi2src ! fakesink
|
|
|
|
* ]| Capture from the default audio device and render to fakesink.
|
2020-05-25 20:17:41 +00:00
|
|
|
*
|
|
|
|
* |[
|
2021-06-06 08:28:56 +00:00
|
|
|
* gst-launch-1.0 -v wasapi2src low-latency=true ! fakesink
|
|
|
|
* ]| Capture from the default audio device with the minimum possible latency and render to fakesink.
|
2020-05-25 20:17:41 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include <config.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "gstwasapi2src.h"
|
|
|
|
#include "gstwasapi2util.h"
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
#include "gstwasapi2ringbuffer.h"
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY_STATIC (gst_wasapi2_src_debug);
|
|
|
|
#define GST_CAT_DEFAULT gst_wasapi2_src_debug
|
|
|
|
|
|
|
|
static GstStaticPadTemplate src_template = GST_STATIC_PAD_TEMPLATE ("src",
|
|
|
|
GST_PAD_SRC,
|
|
|
|
GST_PAD_ALWAYS,
|
|
|
|
GST_STATIC_CAPS (GST_WASAPI2_STATIC_CAPS));
|
|
|
|
|
2022-10-16 15:40:46 +00:00
|
|
|
/**
|
|
|
|
* GstWasapi2SrcLoopbackMode:
|
|
|
|
*
|
|
|
|
* Loopback capture mode
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
typedef enum
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* GstWasapi2SrcLoopbackMode::default:
|
|
|
|
*
|
|
|
|
* Default loopback mode
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
GST_WASAPI2_SRC_LOOPBACK_DEFAULT,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* GstWasapi2SrcLoopbackMode::include-process-tree:
|
|
|
|
*
|
|
|
|
* Captures only specified process and its child process
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
GST_WASAPI2_SRC_LOOPBACK_INCLUDE_PROCESS_TREE,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* GstWasapi2SrcLoopbackMode::exclude-process-tree:
|
|
|
|
*
|
|
|
|
* Excludes specified process and its child process
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
GST_WASAPI2_SRC_LOOPBACK_EXCLUDE_PROCESS_TREE,
|
|
|
|
} GstWasapi2SrcLoopbackMode;
|
|
|
|
|
|
|
|
#define GST_TYPE_WASAPI2_SRC_LOOPBACK_MODE (gst_wasapi2_src_loopback_mode_get_type ())
|
|
|
|
static GType
|
|
|
|
gst_wasapi2_src_loopback_mode_get_type (void)
|
|
|
|
{
|
|
|
|
static GType loopback_type = 0;
|
|
|
|
static const GEnumValue types[] = {
|
|
|
|
{GST_WASAPI2_SRC_LOOPBACK_DEFAULT, "Default", "default"},
|
|
|
|
{GST_WASAPI2_SRC_LOOPBACK_INCLUDE_PROCESS_TREE,
|
|
|
|
"Include process and its child processes",
|
|
|
|
"include-process-tree"},
|
|
|
|
{GST_WASAPI2_SRC_LOOPBACK_EXCLUDE_PROCESS_TREE,
|
|
|
|
"Exclude process and its child processes",
|
|
|
|
"exclude-process-tree"},
|
|
|
|
{0, NULL, NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (g_once_init_enter (&loopback_type)) {
|
|
|
|
GType gtype = g_enum_register_static ("GstWasapi2SrcLoopbackMode", types);
|
|
|
|
g_once_init_leave (&loopback_type, gtype);
|
|
|
|
}
|
|
|
|
|
|
|
|
return loopback_type;
|
|
|
|
}
|
|
|
|
|
2020-05-25 20:17:41 +00:00
|
|
|
#define DEFAULT_LOW_LATENCY FALSE
|
|
|
|
#define DEFAULT_MUTE FALSE
|
|
|
|
#define DEFAULT_VOLUME 1.0
|
2021-06-08 12:35:20 +00:00
|
|
|
#define DEFAULT_LOOPBACK FALSE
|
2022-10-16 15:40:46 +00:00
|
|
|
#define DEFAULT_LOOPBACK_MODE GST_WASAPI2_SRC_LOOPBACK_DEFAULT
|
2023-01-18 14:42:38 +00:00
|
|
|
#define DEFAULT_LOOPBACK_SILENCE_ON_DEVICE_MUTE FALSE
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
PROP_0,
|
|
|
|
PROP_DEVICE,
|
|
|
|
PROP_LOW_LATENCY,
|
|
|
|
PROP_MUTE,
|
|
|
|
PROP_VOLUME,
|
2020-07-24 12:53:37 +00:00
|
|
|
PROP_DISPATCHER,
|
2021-06-08 12:35:20 +00:00
|
|
|
PROP_LOOPBACK,
|
2022-10-16 15:40:46 +00:00
|
|
|
PROP_LOOPBACK_MODE,
|
|
|
|
PROP_LOOPBACK_TARGET_PID,
|
2023-01-18 14:42:38 +00:00
|
|
|
PROP_LOOPBACK_SILENCE_ON_DEVICE_MUTE,
|
2020-05-25 20:17:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct _GstWasapi2Src
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc parent;
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
/* properties */
|
|
|
|
gchar *device_id;
|
|
|
|
gboolean low_latency;
|
|
|
|
gboolean mute;
|
|
|
|
gdouble volume;
|
2020-07-24 12:53:37 +00:00
|
|
|
gpointer dispatcher;
|
2021-06-08 12:35:20 +00:00
|
|
|
gboolean loopback;
|
2022-10-16 15:40:46 +00:00
|
|
|
GstWasapi2SrcLoopbackMode loopback_mode;
|
|
|
|
guint loopback_pid;
|
2023-01-18 14:42:38 +00:00
|
|
|
gboolean loopback_silence_on_device_mute;
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
gboolean mute_changed;
|
|
|
|
gboolean volume_changed;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void gst_wasapi2_src_finalize (GObject * object);
|
|
|
|
static void gst_wasapi2_src_set_property (GObject * object, guint prop_id,
|
|
|
|
const GValue * value, GParamSpec * pspec);
|
|
|
|
static void gst_wasapi2_src_get_property (GObject * object, guint prop_id,
|
|
|
|
GValue * value, GParamSpec * pspec);
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
static GstStateChangeReturn gst_wasapi2_src_change_state (GstElement *
|
|
|
|
element, GstStateChange transition);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
static GstCaps *gst_wasapi2_src_get_caps (GstBaseSrc * bsrc, GstCaps * filter);
|
|
|
|
static GstAudioRingBuffer *gst_wasapi2_src_create_ringbuffer (GstAudioBaseSrc *
|
|
|
|
src);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
static void gst_wasapi2_src_set_mute (GstWasapi2Src * self, gboolean mute);
|
|
|
|
static gboolean gst_wasapi2_src_get_mute (GstWasapi2Src * self);
|
|
|
|
static void gst_wasapi2_src_set_volume (GstWasapi2Src * self, gdouble volume);
|
|
|
|
static gdouble gst_wasapi2_src_get_volume (GstWasapi2Src * self);
|
2023-01-18 14:42:38 +00:00
|
|
|
static void gst_wasapi2_src_set_silence_on_mute (GstWasapi2Src * self,
|
|
|
|
gboolean value);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
#define gst_wasapi2_src_parent_class parent_class
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
G_DEFINE_TYPE_WITH_CODE (GstWasapi2Src, gst_wasapi2_src,
|
|
|
|
GST_TYPE_AUDIO_BASE_SRC,
|
2020-05-25 20:17:41 +00:00
|
|
|
G_IMPLEMENT_INTERFACE (GST_TYPE_STREAM_VOLUME, NULL));
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_class_init (GstWasapi2SrcClass * klass)
|
|
|
|
{
|
|
|
|
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
|
|
|
|
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
|
|
|
|
GstBaseSrcClass *basesrc_class = GST_BASE_SRC_CLASS (klass);
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrcClass *audiobasesrc_class = GST_AUDIO_BASE_SRC_CLASS (klass);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
gobject_class->finalize = gst_wasapi2_src_finalize;
|
|
|
|
gobject_class->set_property = gst_wasapi2_src_set_property;
|
|
|
|
gobject_class->get_property = gst_wasapi2_src_get_property;
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, PROP_DEVICE,
|
|
|
|
g_param_spec_string ("device", "Device",
|
2023-05-16 18:54:43 +00:00
|
|
|
"Audio device ID as provided by "
|
|
|
|
"Windows.Devices.Enumeration.DeviceInformation.Id",
|
2020-05-25 20:17:41 +00:00
|
|
|
NULL, GST_PARAM_MUTABLE_READY | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, PROP_LOW_LATENCY,
|
|
|
|
g_param_spec_boolean ("low-latency", "Low latency",
|
|
|
|
"Optimize all settings for lowest latency. Always safe to enable.",
|
|
|
|
DEFAULT_LOW_LATENCY, GST_PARAM_MUTABLE_READY | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, PROP_MUTE,
|
|
|
|
g_param_spec_boolean ("mute", "Mute", "Mute state of this stream",
|
|
|
|
DEFAULT_MUTE, GST_PARAM_MUTABLE_PLAYING | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
|
|
|
g_object_class_install_property (gobject_class, PROP_VOLUME,
|
|
|
|
g_param_spec_double ("volume", "Volume", "Volume of this stream",
|
|
|
|
0.0, 1.0, DEFAULT_VOLUME,
|
|
|
|
GST_PARAM_MUTABLE_PLAYING | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
2020-07-31 11:57:29 +00:00
|
|
|
/**
|
|
|
|
* GstWasapi2Src:dispatcher:
|
|
|
|
*
|
|
|
|
* ICoreDispatcher COM object used for activating device from UI thread.
|
|
|
|
*
|
|
|
|
* Since: 1.18
|
|
|
|
*/
|
2020-07-24 12:53:37 +00:00
|
|
|
g_object_class_install_property (gobject_class, PROP_DISPATCHER,
|
|
|
|
g_param_spec_pointer ("dispatcher", "Dispatcher",
|
|
|
|
"ICoreDispatcher COM object to use. In order for application to ask "
|
|
|
|
"permission of audio device, device activation should be running "
|
2020-07-31 11:57:29 +00:00
|
|
|
"on UI thread via ICoreDispatcher. This element will increase "
|
|
|
|
"the reference count of given ICoreDispatcher and release it after "
|
|
|
|
"use. Therefore, caller does not need to consider additional "
|
|
|
|
"reference count management",
|
|
|
|
GST_PARAM_MUTABLE_READY | G_PARAM_WRITABLE | G_PARAM_STATIC_STRINGS));
|
2020-07-24 12:53:37 +00:00
|
|
|
|
2021-06-08 12:35:20 +00:00
|
|
|
/**
|
|
|
|
* GstWasapi2Src:loopback:
|
|
|
|
*
|
|
|
|
* Open render device for loopback recording
|
|
|
|
*
|
|
|
|
* Since: 1.20
|
|
|
|
*/
|
|
|
|
g_object_class_install_property (gobject_class, PROP_LOOPBACK,
|
|
|
|
g_param_spec_boolean ("loopback", "Loopback recording",
|
|
|
|
"Open render device for loopback recording", DEFAULT_LOOPBACK,
|
|
|
|
GST_PARAM_MUTABLE_READY | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
2022-10-16 15:40:46 +00:00
|
|
|
if (gst_wasapi2_can_process_loopback ()) {
|
|
|
|
/**
|
|
|
|
* GstWasapi2Src:loopback-mode:
|
|
|
|
*
|
|
|
|
* Loopback mode. "target-process-id" must be specified in case of
|
|
|
|
* process loopback modes.
|
|
|
|
*
|
|
|
|
* This feature requires "Windows 10 build 20348"
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
g_object_class_install_property (gobject_class, PROP_LOOPBACK_MODE,
|
|
|
|
g_param_spec_enum ("loopback-mode", "Loopback Mode",
|
|
|
|
"Loopback mode to use", GST_TYPE_WASAPI2_SRC_LOOPBACK_MODE,
|
|
|
|
DEFAULT_LOOPBACK_MODE,
|
|
|
|
GST_PARAM_CONDITIONALLY_AVAILABLE | GST_PARAM_MUTABLE_READY |
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
|
|
|
|
/**
|
|
|
|
* GstWasapi2Src:loopback-target-pid:
|
|
|
|
*
|
|
|
|
* Target process id to be recorded or excluded depending on loopback mode
|
|
|
|
*
|
|
|
|
* This feature requires "Windows 10 build 20348"
|
|
|
|
*
|
|
|
|
* Since: 1.22
|
|
|
|
*/
|
|
|
|
g_object_class_install_property (gobject_class, PROP_LOOPBACK_TARGET_PID,
|
|
|
|
g_param_spec_uint ("loopback-target-pid", "Loopback Target PID",
|
|
|
|
"Process ID to be recorded or excluded for process loopback mode",
|
|
|
|
0, G_MAXUINT32, 0,
|
|
|
|
GST_PARAM_CONDITIONALLY_AVAILABLE | GST_PARAM_MUTABLE_READY |
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
}
|
|
|
|
|
2023-01-18 14:42:38 +00:00
|
|
|
/**
|
|
|
|
* GstWasapi2Src:loopback-silence-on-device-mute:
|
|
|
|
*
|
|
|
|
* When loopback recording, if the device is muted, inject silence in the pipeline
|
|
|
|
*
|
|
|
|
* Since: 1.24
|
|
|
|
*/
|
|
|
|
g_object_class_install_property (gobject_class,
|
|
|
|
PROP_LOOPBACK_SILENCE_ON_DEVICE_MUTE,
|
|
|
|
g_param_spec_boolean ("loopback-silence-on-device-mute",
|
|
|
|
"Loopback Silence On Device Mute",
|
|
|
|
"When loopback recording, if the device is muted, inject silence in the pipeline",
|
|
|
|
DEFAULT_LOOPBACK_SILENCE_ON_DEVICE_MUTE,
|
|
|
|
GST_PARAM_MUTABLE_PLAYING | G_PARAM_READWRITE |
|
|
|
|
G_PARAM_STATIC_STRINGS));
|
|
|
|
|
2020-05-25 20:17:41 +00:00
|
|
|
gst_element_class_add_static_pad_template (element_class, &src_template);
|
|
|
|
gst_element_class_set_static_metadata (element_class, "Wasapi2Src",
|
|
|
|
"Source/Audio/Hardware",
|
|
|
|
"Stream audio from an audio capture device through WASAPI",
|
|
|
|
"Nirbheek Chauhan <nirbheek@centricular.com>, "
|
|
|
|
"Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>, "
|
|
|
|
"Seungha Yang <seungha@centricular.com>");
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
element_class->change_state =
|
|
|
|
GST_DEBUG_FUNCPTR (gst_wasapi2_src_change_state);
|
|
|
|
|
2020-05-25 20:17:41 +00:00
|
|
|
basesrc_class->get_caps = GST_DEBUG_FUNCPTR (gst_wasapi2_src_get_caps);
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
audiobasesrc_class->create_ringbuffer =
|
|
|
|
GST_DEBUG_FUNCPTR (gst_wasapi2_src_create_ringbuffer);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY_INIT (gst_wasapi2_src_debug, "wasapi2src",
|
|
|
|
0, "Windows audio session API source");
|
2022-10-16 15:40:46 +00:00
|
|
|
|
|
|
|
if (gst_wasapi2_can_process_loopback ())
|
|
|
|
gst_type_mark_as_plugin_api (GST_TYPE_WASAPI2_SRC_LOOPBACK_MODE, 0);
|
2020-05-25 20:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_init (GstWasapi2Src * self)
|
|
|
|
{
|
|
|
|
self->mute = DEFAULT_MUTE;
|
|
|
|
self->volume = DEFAULT_VOLUME;
|
|
|
|
self->low_latency = DEFAULT_LOW_LATENCY;
|
2021-06-08 12:35:20 +00:00
|
|
|
self->loopback = DEFAULT_LOOPBACK;
|
2023-01-18 14:42:38 +00:00
|
|
|
self->loopback_silence_on_device_mute =
|
|
|
|
DEFAULT_LOOPBACK_SILENCE_ON_DEVICE_MUTE;
|
2020-05-25 20:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_finalize (GObject * object)
|
|
|
|
{
|
|
|
|
GstWasapi2Src *self = GST_WASAPI2_SRC (object);
|
|
|
|
|
|
|
|
g_free (self->device_id);
|
|
|
|
|
|
|
|
G_OBJECT_CLASS (parent_class)->finalize (object);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_set_property (GObject * object, guint prop_id,
|
|
|
|
const GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstWasapi2Src *self = GST_WASAPI2_SRC (object);
|
|
|
|
|
|
|
|
switch (prop_id) {
|
|
|
|
case PROP_DEVICE:
|
|
|
|
g_free (self->device_id);
|
|
|
|
self->device_id = g_value_dup_string (value);
|
|
|
|
break;
|
|
|
|
case PROP_LOW_LATENCY:
|
|
|
|
self->low_latency = g_value_get_boolean (value);
|
|
|
|
break;
|
|
|
|
case PROP_MUTE:
|
|
|
|
gst_wasapi2_src_set_mute (self, g_value_get_boolean (value));
|
|
|
|
break;
|
|
|
|
case PROP_VOLUME:
|
|
|
|
gst_wasapi2_src_set_volume (self, g_value_get_double (value));
|
|
|
|
break;
|
2020-07-24 12:53:37 +00:00
|
|
|
case PROP_DISPATCHER:
|
|
|
|
self->dispatcher = g_value_get_pointer (value);
|
|
|
|
break;
|
2021-06-08 12:35:20 +00:00
|
|
|
case PROP_LOOPBACK:
|
|
|
|
self->loopback = g_value_get_boolean (value);
|
|
|
|
break;
|
2022-10-16 15:40:46 +00:00
|
|
|
case PROP_LOOPBACK_MODE:
|
|
|
|
self->loopback_mode = g_value_get_enum (value);
|
|
|
|
break;
|
|
|
|
case PROP_LOOPBACK_TARGET_PID:
|
|
|
|
self->loopback_pid = g_value_get_uint (value);
|
|
|
|
break;
|
2023-01-18 14:42:38 +00:00
|
|
|
case PROP_LOOPBACK_SILENCE_ON_DEVICE_MUTE:
|
|
|
|
gst_wasapi2_src_set_silence_on_mute (self, g_value_get_boolean (value));
|
|
|
|
break;
|
2020-05-25 20:17:41 +00:00
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_get_property (GObject * object, guint prop_id,
|
|
|
|
GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstWasapi2Src *self = GST_WASAPI2_SRC (object);
|
|
|
|
|
|
|
|
switch (prop_id) {
|
|
|
|
case PROP_DEVICE:
|
|
|
|
g_value_set_string (value, self->device_id);
|
|
|
|
break;
|
|
|
|
case PROP_LOW_LATENCY:
|
|
|
|
g_value_set_boolean (value, self->low_latency);
|
|
|
|
break;
|
|
|
|
case PROP_MUTE:
|
|
|
|
g_value_set_boolean (value, gst_wasapi2_src_get_mute (self));
|
|
|
|
break;
|
|
|
|
case PROP_VOLUME:
|
|
|
|
g_value_set_double (value, gst_wasapi2_src_get_volume (self));
|
|
|
|
break;
|
2021-06-08 12:35:20 +00:00
|
|
|
case PROP_LOOPBACK:
|
|
|
|
g_value_set_boolean (value, self->loopback);
|
|
|
|
break;
|
2022-10-16 15:40:46 +00:00
|
|
|
case PROP_LOOPBACK_MODE:
|
|
|
|
g_value_set_enum (value, self->loopback_mode);
|
|
|
|
break;
|
|
|
|
case PROP_LOOPBACK_TARGET_PID:
|
|
|
|
g_value_set_uint (value, self->loopback_pid);
|
|
|
|
break;
|
2023-01-18 14:42:38 +00:00
|
|
|
case PROP_LOOPBACK_SILENCE_ON_DEVICE_MUTE:
|
|
|
|
g_value_set_boolean (value, self->loopback_silence_on_device_mute);
|
|
|
|
break;
|
2020-05-25 20:17:41 +00:00
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
static GstStateChangeReturn
|
|
|
|
gst_wasapi2_src_change_state (GstElement * element, GstStateChange transition)
|
|
|
|
{
|
|
|
|
GstWasapi2Src *self = GST_WASAPI2_SRC (element);
|
|
|
|
GstAudioBaseSrc *asrc = GST_AUDIO_BASE_SRC_CAST (element);
|
|
|
|
|
|
|
|
switch (transition) {
|
|
|
|
case GST_STATE_CHANGE_READY_TO_PAUSED:
|
|
|
|
/* If we have pending volume/mute values to set, do here */
|
|
|
|
GST_OBJECT_LOCK (self);
|
|
|
|
if (asrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (asrc->ringbuffer);
|
|
|
|
|
|
|
|
if (self->volume_changed) {
|
|
|
|
gst_wasapi2_ring_buffer_set_volume (ringbuffer, self->volume);
|
|
|
|
self->volume_changed = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (self->mute_changed) {
|
|
|
|
gst_wasapi2_ring_buffer_set_mute (ringbuffer, self->mute);
|
|
|
|
self->mute_changed = FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
GST_OBJECT_UNLOCK (self);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
|
|
|
|
}
|
|
|
|
|
2020-05-25 20:17:41 +00:00
|
|
|
static GstCaps *
|
|
|
|
gst_wasapi2_src_get_caps (GstBaseSrc * bsrc, GstCaps * filter)
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc *asrc = GST_AUDIO_BASE_SRC_CAST (bsrc);
|
2020-05-25 20:17:41 +00:00
|
|
|
GstCaps *caps = NULL;
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_LOCK (bsrc);
|
|
|
|
if (asrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (asrc->ringbuffer);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
gst_object_ref (ringbuffer);
|
|
|
|
GST_OBJECT_UNLOCK (bsrc);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
/* Get caps might be able to block if device is not activated yet */
|
|
|
|
caps = gst_wasapi2_ring_buffer_get_caps (ringbuffer);
|
|
|
|
gst_object_unref (ringbuffer);
|
|
|
|
} else {
|
|
|
|
GST_OBJECT_UNLOCK (bsrc);
|
|
|
|
}
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
if (!caps)
|
|
|
|
caps = gst_pad_get_pad_template_caps (bsrc->srcpad);
|
|
|
|
|
|
|
|
if (filter) {
|
|
|
|
GstCaps *filtered =
|
|
|
|
gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
|
|
|
|
gst_caps_unref (caps);
|
|
|
|
caps = filtered;
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_DEBUG_OBJECT (bsrc, "returning caps %" GST_PTR_FORMAT, caps);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
return caps;
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
static GstAudioRingBuffer *
|
|
|
|
gst_wasapi2_src_create_ringbuffer (GstAudioBaseSrc * src)
|
2020-05-25 20:17:41 +00:00
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstWasapi2Src *self = GST_WASAPI2_SRC (src);
|
|
|
|
GstAudioRingBuffer *ringbuffer;
|
|
|
|
gchar *name;
|
2021-06-08 12:35:20 +00:00
|
|
|
GstWasapi2ClientDeviceClass device_class =
|
|
|
|
GST_WASAPI2_CLIENT_DEVICE_CLASS_CAPTURE;
|
|
|
|
|
2022-10-16 15:40:46 +00:00
|
|
|
if (self->loopback_pid) {
|
|
|
|
if (self->loopback_mode == GST_WASAPI2_SRC_LOOPBACK_INCLUDE_PROCESS_TREE) {
|
|
|
|
device_class =
|
|
|
|
GST_WASAPI2_CLIENT_DEVICE_CLASS_INCLUDE_PROCESS_LOOPBACK_CAPTURE;
|
|
|
|
} else if (self->loopback_mode ==
|
|
|
|
GST_WASAPI2_SRC_LOOPBACK_EXCLUDE_PROCESS_TREE) {
|
|
|
|
device_class =
|
|
|
|
GST_WASAPI2_CLIENT_DEVICE_CLASS_EXCLUDE_PROCESS_LOOPBACK_CAPTURE;
|
|
|
|
}
|
|
|
|
} else if (self->loopback) {
|
2021-06-08 12:35:20 +00:00
|
|
|
device_class = GST_WASAPI2_CLIENT_DEVICE_CLASS_LOOPBACK_CAPTURE;
|
2022-10-16 15:40:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (self, "Device class %d", device_class);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
name = g_strdup_printf ("%s-ringbuffer", GST_OBJECT_NAME (src));
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
ringbuffer =
|
2021-06-08 12:35:20 +00:00
|
|
|
gst_wasapi2_ring_buffer_new (device_class,
|
2022-10-16 15:40:46 +00:00
|
|
|
self->low_latency, self->device_id, self->dispatcher, name,
|
|
|
|
self->loopback_pid);
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
g_free (name);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
2023-01-18 14:42:38 +00:00
|
|
|
if (self->loopback) {
|
|
|
|
gst_wasapi2_ring_buffer_set_device_mute_monitoring (GST_WASAPI2_RING_BUFFER
|
|
|
|
(ringbuffer), self->loopback_silence_on_device_mute);
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
return ringbuffer;
|
2020-05-25 20:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_set_mute (GstWasapi2Src * self, gboolean mute)
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc *bsrc = GST_AUDIO_BASE_SRC_CAST (self);
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
self->mute = mute;
|
|
|
|
self->mute_changed = TRUE;
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
if (bsrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (bsrc->ringbuffer);
|
|
|
|
|
|
|
|
hr = gst_wasapi2_ring_buffer_set_mute (ringbuffer, mute);
|
2021-04-21 08:34:26 +00:00
|
|
|
if (FAILED (hr)) {
|
2020-05-25 20:17:41 +00:00
|
|
|
GST_INFO_OBJECT (self, "Couldn't set mute");
|
|
|
|
} else {
|
|
|
|
self->mute_changed = FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_UNLOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
gst_wasapi2_src_get_mute (GstWasapi2Src * self)
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc *bsrc = GST_AUDIO_BASE_SRC_CAST (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
gboolean mute;
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
HRESULT hr;
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_LOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
mute = self->mute;
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
if (bsrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (bsrc->ringbuffer);
|
|
|
|
|
|
|
|
hr = gst_wasapi2_ring_buffer_get_mute (ringbuffer, &mute);
|
|
|
|
|
2021-04-21 08:34:26 +00:00
|
|
|
if (FAILED (hr)) {
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_INFO_OBJECT (self, "Couldn't get mute");
|
2020-05-25 20:17:41 +00:00
|
|
|
} else {
|
|
|
|
self->mute = mute;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_UNLOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
return mute;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_set_volume (GstWasapi2Src * self, gdouble volume)
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc *bsrc = GST_AUDIO_BASE_SRC_CAST (self);
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
self->volume = volume;
|
|
|
|
/* clip volume value */
|
|
|
|
self->volume = MAX (0.0, self->volume);
|
|
|
|
self->volume = MIN (1.0, self->volume);
|
|
|
|
self->volume_changed = TRUE;
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
if (bsrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (bsrc->ringbuffer);
|
|
|
|
|
|
|
|
hr = gst_wasapi2_ring_buffer_set_volume (ringbuffer, (gfloat) self->volume);
|
|
|
|
|
2021-04-21 08:34:26 +00:00
|
|
|
if (FAILED (hr)) {
|
2020-05-25 20:17:41 +00:00
|
|
|
GST_INFO_OBJECT (self, "Couldn't set volume");
|
|
|
|
} else {
|
|
|
|
self->volume_changed = FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_UNLOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static gdouble
|
|
|
|
gst_wasapi2_src_get_volume (GstWasapi2Src * self)
|
|
|
|
{
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GstAudioBaseSrc *bsrc = GST_AUDIO_BASE_SRC_CAST (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
gfloat volume;
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
HRESULT hr;
|
2020-05-25 20:17:41 +00:00
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_LOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
volume = (gfloat) self->volume;
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
if (bsrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (bsrc->ringbuffer);
|
|
|
|
|
|
|
|
hr = gst_wasapi2_ring_buffer_get_volume (ringbuffer, &volume);
|
|
|
|
|
2021-04-21 08:34:26 +00:00
|
|
|
if (FAILED (hr)) {
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_INFO_OBJECT (self, "Couldn't set volume");
|
2020-05-25 20:17:41 +00:00
|
|
|
} else {
|
|
|
|
self->volume = volume;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
wasapi2: Rewrite plugin and implement audioringbuffer subclass
... based on MediaFoundation work queue API.
By this commit, wasapi2 plugin will make use of pull mode scheduling
with audioringbuffer subclass.
There are several drawbacks of audiosrc/audiosink subclassing
(not audiobasesrc/audiobasesink) for WASAPI API, which are:
* audiosrc/audiosink classes try to set high priority to
read/write thread via MMCSS (Multimedia Class Scheduler Service)
but it's not allowed in case of UWP application.
In order to use MMCSS in UWP, application should use MediaFoundation
work queue indirectly.
Since audiosrc/audiosink scheduling model is not compatible with
MediaFoundation's work queue model, audioringbuffer subclassing
is required.
* WASAPI capture device might report larger packet size than expected
(i.e., larger frames we can read than expected frame size per period).
Meanwhile, in any case, application should drain all packets at that moment.
In order to handle the case, wasapi/wasapi2 plugins were making use of
GstAdapter which is obviously sub-optimal because it requires additional
memory allocation and copy.
By implementing audioringbuffer subclassing, we can avoid such inefficiency.
In this commit, all the device read/write operations will be moved
to newly implemented wasapi2ringbuffer class and
existing wasapi2client class will take care of device enumeration
and activation parts only.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
|
|
|
GST_OBJECT_UNLOCK (self);
|
2020-05-25 20:17:41 +00:00
|
|
|
|
|
|
|
volume = MAX (0.0, volume);
|
|
|
|
volume = MIN (1.0, volume);
|
|
|
|
|
|
|
|
return volume;
|
|
|
|
}
|
2023-01-18 14:42:38 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
gst_wasapi2_src_set_silence_on_mute (GstWasapi2Src * self, gboolean value)
|
|
|
|
{
|
|
|
|
GstAudioBaseSrc *bsrc = GST_AUDIO_BASE_SRC_CAST (self);
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (self);
|
|
|
|
|
|
|
|
self->loopback_silence_on_device_mute = value;
|
|
|
|
|
|
|
|
if (self->loopback && bsrc->ringbuffer) {
|
|
|
|
GstWasapi2RingBuffer *ringbuffer =
|
|
|
|
GST_WASAPI2_RING_BUFFER (bsrc->ringbuffer);
|
|
|
|
|
|
|
|
gst_wasapi2_ring_buffer_set_device_mute_monitoring (ringbuffer, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
GST_OBJECT_UNLOCK (self);
|
|
|
|
}
|