mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2025-03-30 12:49:40 +00:00
webrtcdsp: Bump to WebRTC AudioProcessing 2.1
Keep 1.0 support around so distros can manage this bump more easily. Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/8270>
This commit is contained in:
parent
432ada66f2
commit
9b647a0bbd
3 changed files with 58 additions and 11 deletions
|
@ -241,12 +241,14 @@ struct _GstWebrtcDsp
|
||||||
gboolean interleaved;
|
gboolean interleaved;
|
||||||
guint period_size;
|
guint period_size;
|
||||||
guint period_samples;
|
guint period_samples;
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
gboolean stream_has_voice;
|
gboolean stream_has_voice;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Protected by the stream lock */
|
/* Protected by the stream lock */
|
||||||
GstAdapter *adapter;
|
GstAdapter *adapter;
|
||||||
GstPlanarAudioAdapter *padapter;
|
GstPlanarAudioAdapter *padapter;
|
||||||
webrtc::AudioProcessing *apm;
|
rtc::scoped_refptr<webrtc::AudioProcessing> apm;
|
||||||
|
|
||||||
/* Protected by the object lock */
|
/* Protected by the object lock */
|
||||||
gchar *probe_name;
|
gchar *probe_name;
|
||||||
|
@ -263,7 +265,9 @@ struct _GstWebrtcDsp
|
||||||
gint startup_min_volume;
|
gint startup_min_volume;
|
||||||
gboolean limiter;
|
gboolean limiter;
|
||||||
webrtc::AudioProcessing::Config::GainController1::Mode gain_control_mode;
|
webrtc::AudioProcessing::Config::GainController1::Mode gain_control_mode;
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
gboolean voice_detection;
|
gboolean voice_detection;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
G_DEFINE_TYPE_WITH_CODE (GstWebrtcDsp, gst_webrtc_dsp, GST_TYPE_AUDIO_FILTER,
|
G_DEFINE_TYPE_WITH_CODE (GstWebrtcDsp, gst_webrtc_dsp, GST_TYPE_AUDIO_FILTER,
|
||||||
|
@ -368,7 +372,7 @@ gst_webrtc_dsp_analyze_reverse_stream (GstWebrtcDsp * self,
|
||||||
GstClockTime rec_time)
|
GstClockTime rec_time)
|
||||||
{
|
{
|
||||||
GstWebrtcEchoProbe *probe = NULL;
|
GstWebrtcEchoProbe *probe = NULL;
|
||||||
webrtc::AudioProcessing *apm;
|
rtc::scoped_refptr<webrtc::AudioProcessing> apm;
|
||||||
GstBuffer *buf = NULL;
|
GstBuffer *buf = NULL;
|
||||||
GstAudioInfo info;
|
GstAudioInfo info;
|
||||||
gboolean interleaved = self->interleaved;
|
gboolean interleaved = self->interleaved;
|
||||||
|
@ -392,7 +396,11 @@ gst_webrtc_dsp_analyze_reverse_stream (GstWebrtcDsp * self,
|
||||||
apm = self->apm;
|
apm = self->apm;
|
||||||
apm->set_stream_delay_ms (delay);
|
apm->set_stream_delay_ms (delay);
|
||||||
|
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
webrtc::StreamConfig config (info.rate, info.channels, false);
|
webrtc::StreamConfig config (info.rate, info.channels, false);
|
||||||
|
#else
|
||||||
|
webrtc::StreamConfig config (info.rate, info.channels);
|
||||||
|
#endif
|
||||||
|
|
||||||
g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
|
g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
|
||||||
|
|
||||||
|
@ -433,6 +441,7 @@ done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
static void
|
static void
|
||||||
gst_webrtc_vad_post_activity (GstWebrtcDsp *self, GstBuffer *buffer,
|
gst_webrtc_vad_post_activity (GstWebrtcDsp *self, GstBuffer *buffer,
|
||||||
gboolean stream_has_voice, guint8 level)
|
gboolean stream_has_voice, guint8 level)
|
||||||
|
@ -464,14 +473,19 @@ gst_webrtc_vad_post_activity (GstWebrtcDsp *self, GstBuffer *buffer,
|
||||||
gst_element_post_message (GST_ELEMENT (self),
|
gst_element_post_message (GST_ELEMENT (self),
|
||||||
gst_message_new_element (GST_OBJECT (self), s));
|
gst_message_new_element (GST_OBJECT (self), s));
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static GstFlowReturn
|
static GstFlowReturn
|
||||||
gst_webrtc_dsp_process_stream (GstWebrtcDsp * self,
|
gst_webrtc_dsp_process_stream (GstWebrtcDsp * self,
|
||||||
GstBuffer * buffer)
|
GstBuffer * buffer)
|
||||||
{
|
{
|
||||||
GstAudioBuffer abuf;
|
GstAudioBuffer abuf;
|
||||||
webrtc::AudioProcessing * apm = self->apm;
|
rtc::scoped_refptr<webrtc::AudioProcessing> apm = self->apm;
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
webrtc::StreamConfig config (self->info.rate, self->info.channels, false);
|
webrtc::StreamConfig config (self->info.rate, self->info.channels, false);
|
||||||
|
#else
|
||||||
|
webrtc::StreamConfig config (self->info.rate, self->info.channels);
|
||||||
|
#endif
|
||||||
gint err;
|
gint err;
|
||||||
|
|
||||||
if (!gst_audio_buffer_map (&abuf, &self->info, buffer,
|
if (!gst_audio_buffer_map (&abuf, &self->info, buffer,
|
||||||
|
@ -492,6 +506,7 @@ gst_webrtc_dsp_process_stream (GstWebrtcDsp * self,
|
||||||
GST_WARNING_OBJECT (self, "Failed to filter the audio: %s.",
|
GST_WARNING_OBJECT (self, "Failed to filter the audio: %s.",
|
||||||
webrtc_error_to_string (err));
|
webrtc_error_to_string (err));
|
||||||
} else {
|
} else {
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
if (self->voice_detection) {
|
if (self->voice_detection) {
|
||||||
webrtc::AudioProcessingStats stats = apm->GetStatistics ();
|
webrtc::AudioProcessingStats stats = apm->GetStatistics ();
|
||||||
gboolean stream_has_voice = stats.voice_detected && *stats.voice_detected;
|
gboolean stream_has_voice = stats.voice_detected && *stats.voice_detected;
|
||||||
|
@ -503,6 +518,7 @@ gst_webrtc_dsp_process_stream (GstWebrtcDsp * self,
|
||||||
|
|
||||||
self->stream_has_voice = stream_has_voice;
|
self->stream_has_voice = stream_has_voice;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
gst_audio_buffer_unmap (&abuf);
|
gst_audio_buffer_unmap (&abuf);
|
||||||
|
@ -604,7 +620,11 @@ gst_webrtc_dsp_setup (GstAudioFilter * filter, const GstAudioInfo * info)
|
||||||
|
|
||||||
self->info = *info;
|
self->info = *info;
|
||||||
self->interleaved = (info->layout == GST_AUDIO_LAYOUT_INTERLEAVED);
|
self->interleaved = (info->layout == GST_AUDIO_LAYOUT_INTERLEAVED);
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
|
self->apm = rtc::scoped_refptr(webrtc::AudioProcessingBuilder().Create());
|
||||||
|
#else
|
||||||
self->apm = webrtc::AudioProcessingBuilder().Create();
|
self->apm = webrtc::AudioProcessingBuilder().Create();
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!self->interleaved)
|
if (!self->interleaved)
|
||||||
gst_planar_audio_adapter_configure (self->padapter, info);
|
gst_planar_audio_adapter_configure (self->padapter, info);
|
||||||
|
@ -650,13 +670,13 @@ gst_webrtc_dsp_setup (GstAudioFilter * filter, const GstAudioInfo * info)
|
||||||
config.noise_suppression.level = self->noise_suppression_level;
|
config.noise_suppression.level = self->noise_suppression_level;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: expose transient suppression
|
#ifdef HAVE_WEBRTC1
|
||||||
|
|
||||||
if (self->voice_detection) {
|
if (self->voice_detection) {
|
||||||
GST_DEBUG_OBJECT (self, "Enabling Voice Activity Detection");
|
GST_DEBUG_OBJECT (self, "Enabling Voice Activity Detection");
|
||||||
config.voice_detection.enabled = true;
|
config.voice_detection.enabled = true;
|
||||||
self->stream_has_voice = FALSE;
|
self->stream_has_voice = FALSE;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (self->gain_control) {
|
if (self->gain_control) {
|
||||||
GEnumClass *mode_class = (GEnumClass *)
|
GEnumClass *mode_class = (GEnumClass *)
|
||||||
|
@ -674,7 +694,9 @@ gst_webrtc_dsp_setup (GstAudioFilter * filter, const GstAudioInfo * info)
|
||||||
config.gain_controller1.target_level_dbfs = self->target_level_dbfs;
|
config.gain_controller1.target_level_dbfs = self->target_level_dbfs;
|
||||||
config.gain_controller1.compression_gain_db = self->compression_gain_db;
|
config.gain_controller1.compression_gain_db = self->compression_gain_db;
|
||||||
config.gain_controller1.enable_limiter = self->limiter;
|
config.gain_controller1.enable_limiter = self->limiter;
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
config.level_estimation.enabled = true;
|
config.level_estimation.enabled = true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: expose gain controller 2
|
// TODO: expose gain controller 2
|
||||||
|
@ -719,8 +741,7 @@ gst_webrtc_dsp_stop (GstBaseTransform * btrans)
|
||||||
self->probe = NULL;
|
self->probe = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
delete self->apm;
|
self->apm = nullptr;
|
||||||
self->apm = NULL;
|
|
||||||
|
|
||||||
GST_OBJECT_UNLOCK (self);
|
GST_OBJECT_UNLOCK (self);
|
||||||
|
|
||||||
|
@ -780,7 +801,11 @@ gst_webrtc_dsp_set_property (GObject * object,
|
||||||
(GstWebrtcGainControlMode) g_value_get_enum (value);
|
(GstWebrtcGainControlMode) g_value_get_enum (value);
|
||||||
break;
|
break;
|
||||||
case PROP_VOICE_DETECTION:
|
case PROP_VOICE_DETECTION:
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
self->voice_detection = g_value_get_boolean (value);
|
self->voice_detection = g_value_get_boolean (value);
|
||||||
|
#else
|
||||||
|
GST_WARNING_OBJECT (self, "Voice activity detection is no longer supported");
|
||||||
|
#endif
|
||||||
break;
|
break;
|
||||||
case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
|
case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
|
||||||
break;
|
break;
|
||||||
|
@ -847,7 +872,11 @@ gst_webrtc_dsp_get_property (GObject * object,
|
||||||
g_value_set_enum (value, self->gain_control_mode);
|
g_value_set_enum (value, self->gain_control_mode);
|
||||||
break;
|
break;
|
||||||
case PROP_VOICE_DETECTION:
|
case PROP_VOICE_DETECTION:
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
g_value_set_boolean (value, self->voice_detection);
|
g_value_set_boolean (value, self->voice_detection);
|
||||||
|
#else
|
||||||
|
g_value_set_boolean (value, FALSE);
|
||||||
|
#endif
|
||||||
break;
|
break;
|
||||||
case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
|
case PROP_VOICE_DETECTION_FRAME_SIZE_MS:
|
||||||
g_value_set_int (value, 0);
|
g_value_set_int (value, 0);
|
||||||
|
@ -1041,7 +1070,11 @@ gst_webrtc_dsp_class_init (GstWebrtcDspClass * klass)
|
||||||
g_object_class_install_property (gobject_class,
|
g_object_class_install_property (gobject_class,
|
||||||
PROP_VOICE_DETECTION,
|
PROP_VOICE_DETECTION,
|
||||||
g_param_spec_boolean ("voice-detection", "Voice Detection",
|
g_param_spec_boolean ("voice-detection", "Voice Detection",
|
||||||
|
#ifdef HAVE_WEBRTC1
|
||||||
"Enable or disable the voice activity detector",
|
"Enable or disable the voice activity detector",
|
||||||
|
#else
|
||||||
|
"Enable or disable the voice activity detector (deprecated)",
|
||||||
|
#endif
|
||||||
DEFAULT_VOICE_DETECTION, (GParamFlags) (G_PARAM_READWRITE |
|
DEFAULT_VOICE_DETECTION, (GParamFlags) (G_PARAM_READWRITE |
|
||||||
G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
|
G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT)));
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,22 @@ plugin_sources += {
|
||||||
'webrtcdsp': pathsep.join(doc_sources)
|
'webrtcdsp': pathsep.join(doc_sources)
|
||||||
}
|
}
|
||||||
|
|
||||||
webrtc_dep = dependency('webrtc-audio-processing-1', version : ['>= 1.0'],
|
webrtc_dep = dependency('webrtc-audio-processing-2', version : ['>= 2.0'],
|
||||||
required : get_option('webrtcdsp'))
|
required : false)
|
||||||
|
if not webrtc_dep.found()
|
||||||
|
webrtc_dep = dependency('webrtc-audio-processing-1', version : ['>= 1.0'],
|
||||||
|
required : false)
|
||||||
|
if webrtc_dep.found()
|
||||||
|
cdata.set('HAVE_WEBRTC1', 1)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
if not webrtc_dep.found()
|
||||||
|
# Try again, and this time use fallback if requested and possible
|
||||||
|
webrtc_dep = dependency('webrtc-audio-processing-2', version : ['>= 2.0'],
|
||||||
|
allow_fallback : true,
|
||||||
|
required : get_option('webrtcdsp'))
|
||||||
|
endif
|
||||||
|
|
||||||
if webrtc_dep.found()
|
if webrtc_dep.found()
|
||||||
gstwebrtcdsp = library('gstwebrtcdsp',
|
gstwebrtcdsp = library('gstwebrtcdsp',
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
directory = webrtc-audio-processing
|
directory = webrtc-audio-processing
|
||||||
url = https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing.git
|
url = https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing.git
|
||||||
push-url = git@gitlab.freedesktop.org:pulseaudio/webrtc-audio-processing.git
|
push-url = git@gitlab.freedesktop.org:pulseaudio/webrtc-audio-processing.git
|
||||||
revision = v1.3
|
revision = v2.1
|
||||||
|
|
||||||
[provide]
|
[provide]
|
||||||
dependency_names = webrtc-audio-coding-1, webrtc-audio-processing-1
|
dependency_names = webrtc-audio-processing-2
|
||||||
|
|
Loading…
Reference in a new issue