gstreamer/subprojects/gst-plugins-bad/sys/wasapi2/gstwasapi2util.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

81 lines
2.8 KiB
C
Raw Normal View History

/* GStreamer
* Copyright (C) 2020 Seungha Yang <seungha@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef __GST_WASAPI2_UTIL_H__
#define __GST_WASAPI2_UTIL_H__
#include <gst/gst.h>
#include <gst/audio/audio.h>
#include <windows.h>
wasapi2: Rewrite plugin and implement audioringbuffer subclass ... based on MediaFoundation work queue API. By this commit, wasapi2 plugin will make use of pull mode scheduling with audioringbuffer subclass. There are several drawbacks of audiosrc/audiosink subclassing (not audiobasesrc/audiobasesink) for WASAPI API, which are: * audiosrc/audiosink classes try to set high priority to read/write thread via MMCSS (Multimedia Class Scheduler Service) but it's not allowed in case of UWP application. In order to use MMCSS in UWP, application should use MediaFoundation work queue indirectly. Since audiosrc/audiosink scheduling model is not compatible with MediaFoundation's work queue model, audioringbuffer subclassing is required. * WASAPI capture device might report larger packet size than expected (i.e., larger frames we can read than expected frame size per period). Meanwhile, in any case, application should drain all packets at that moment. In order to handle the case, wasapi/wasapi2 plugins were making use of GstAdapter which is obviously sub-optimal because it requires additional memory allocation and copy. By implementing audioringbuffer subclassing, we can avoid such inefficiency. In this commit, all the device read/write operations will be moved to newly implemented wasapi2ringbuffer class and existing wasapi2client class will take care of device enumeration and activation parts only. Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
#include <initguid.h>
#include <audioclient.h>
#include <endpointvolume.h>
G_BEGIN_DECLS
/* Static Caps shared between source, sink, and device provider */
#define GST_WASAPI2_STATIC_CAPS "audio/x-raw, " \
"format = (string) " GST_AUDIO_FORMATS_ALL ", " \
"layout = (string) interleaved, " \
"rate = " GST_AUDIO_RATE_RANGE ", " \
"channels = " GST_AUDIO_CHANNELS_RANGE
wasapi2: Rewrite plugin and implement audioringbuffer subclass ... based on MediaFoundation work queue API. By this commit, wasapi2 plugin will make use of pull mode scheduling with audioringbuffer subclass. There are several drawbacks of audiosrc/audiosink subclassing (not audiobasesrc/audiobasesink) for WASAPI API, which are: * audiosrc/audiosink classes try to set high priority to read/write thread via MMCSS (Multimedia Class Scheduler Service) but it's not allowed in case of UWP application. In order to use MMCSS in UWP, application should use MediaFoundation work queue indirectly. Since audiosrc/audiosink scheduling model is not compatible with MediaFoundation's work queue model, audioringbuffer subclassing is required. * WASAPI capture device might report larger packet size than expected (i.e., larger frames we can read than expected frame size per period). Meanwhile, in any case, application should drain all packets at that moment. In order to handle the case, wasapi/wasapi2 plugins were making use of GstAdapter which is obviously sub-optimal because it requires additional memory allocation and copy. By implementing audioringbuffer subclassing, we can avoid such inefficiency. In this commit, all the device read/write operations will be moved to newly implemented wasapi2ringbuffer class and existing wasapi2client class will take care of device enumeration and activation parts only. Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
#define GST_WASAPI2_CLEAR_COM(obj) G_STMT_START { \
if (obj) { \
(obj)->Release (); \
(obj) = NULL; \
} \
} G_STMT_END
gboolean _gst_wasapi2_result (HRESULT hr,
GstDebugCategory * cat,
const gchar * file,
const gchar * function,
gint line);
#ifndef GST_DISABLE_GST_DEBUG
#define gst_wasapi2_result(result) \
_gst_wasapi2_result (result, GST_CAT_DEFAULT, __FILE__, GST_FUNCTION, __LINE__)
#else
#define gst_wasapi2_result(result) \
_gst_wasapi2_result (result, NULL, __FILE__, GST_FUNCTION, __LINE__)
#endif
wasapi2: Rewrite plugin and implement audioringbuffer subclass ... based on MediaFoundation work queue API. By this commit, wasapi2 plugin will make use of pull mode scheduling with audioringbuffer subclass. There are several drawbacks of audiosrc/audiosink subclassing (not audiobasesrc/audiobasesink) for WASAPI API, which are: * audiosrc/audiosink classes try to set high priority to read/write thread via MMCSS (Multimedia Class Scheduler Service) but it's not allowed in case of UWP application. In order to use MMCSS in UWP, application should use MediaFoundation work queue indirectly. Since audiosrc/audiosink scheduling model is not compatible with MediaFoundation's work queue model, audioringbuffer subclassing is required. * WASAPI capture device might report larger packet size than expected (i.e., larger frames we can read than expected frame size per period). Meanwhile, in any case, application should drain all packets at that moment. In order to handle the case, wasapi/wasapi2 plugins were making use of GstAdapter which is obviously sub-optimal because it requires additional memory allocation and copy. By implementing audioringbuffer subclassing, we can avoid such inefficiency. In this commit, all the device read/write operations will be moved to newly implemented wasapi2ringbuffer class and existing wasapi2client class will take care of device enumeration and activation parts only. Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/2306>
2021-05-10 11:45:28 +00:00
guint64 gst_wasapi2_util_waveformatex_to_channel_mask (WAVEFORMATEX * format,
GstAudioChannelPosition ** out_position);
const gchar * gst_wasapi2_util_waveformatex_to_audio_format (WAVEFORMATEX * format);
gboolean gst_wasapi2_util_parse_waveformatex (WAVEFORMATEX * format,
GstCaps * template_caps,
GstCaps ** out_caps,
GstAudioChannelPosition ** out_positions);
gchar * gst_wasapi2_util_get_error_message (HRESULT hr);
gboolean gst_wasapi2_can_automatic_stream_routing (void);
gboolean gst_wasapi2_can_process_loopback (void);
WAVEFORMATEX * gst_wasapi2_get_default_mix_format (void);
G_END_DECLS
#endif /* __GST_WASAPI_UTIL_H__ */