mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-11-18 15:51:11 +00:00
b333e1204e
Init the AVFrame with the right method. This sets the extended_data field correctly that is needed for some formats (G726 for example).
747 lines
22 KiB
C
747 lines
22 KiB
C
/* GStreamer
|
|
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
|
|
* Copyright (C) <2012> Collabora Ltd.
|
|
* Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Library General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Library General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Library General Public
|
|
* License along with this library; if not, write to the
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
* Boston, MA 02110-1301, USA.
|
|
*/
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
#include "config.h"
|
|
#endif
|
|
|
|
#include <assert.h>
|
|
#include <string.h>
|
|
/* for stats file handling */
|
|
#include <stdio.h>
|
|
#include <glib/gstdio.h>
|
|
#include <errno.h>
|
|
|
|
#include <libavcodec/avcodec.h>
|
|
|
|
#include <gst/gst.h>
|
|
|
|
#include "gstav.h"
|
|
#include "gstavcodecmap.h"
|
|
#include "gstavutils.h"
|
|
#include "gstavaudenc.h"
|
|
|
|
#define DEFAULT_AUDIO_BITRATE 128000
|
|
|
|
enum
|
|
{
|
|
/* FILL ME */
|
|
LAST_SIGNAL
|
|
};
|
|
|
|
enum
|
|
{
|
|
PROP_0,
|
|
PROP_BIT_RATE,
|
|
PROP_RTP_PAYLOAD_SIZE,
|
|
};
|
|
|
|
/* A number of function prototypes are given so we can refer to them later. */
|
|
static void gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass);
|
|
static void gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass);
|
|
static void gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc);
|
|
static void gst_ffmpegaudenc_finalize (GObject * object);
|
|
|
|
static GstCaps *gst_ffmpegaudenc_getcaps (GstAudioEncoder * encoder,
|
|
GstCaps * filter);
|
|
static gboolean gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder,
|
|
GstAudioInfo * info);
|
|
static GstFlowReturn gst_ffmpegaudenc_handle_frame (GstAudioEncoder * encoder,
|
|
GstBuffer * inbuf);
|
|
static gboolean gst_ffmpegaudenc_stop (GstAudioEncoder * encoder);
|
|
static void gst_ffmpegaudenc_flush (GstAudioEncoder * encoder);
|
|
|
|
static void gst_ffmpegaudenc_set_property (GObject * object,
|
|
guint prop_id, const GValue * value, GParamSpec * pspec);
|
|
static void gst_ffmpegaudenc_get_property (GObject * object,
|
|
guint prop_id, GValue * value, GParamSpec * pspec);
|
|
|
|
#define GST_FFENC_PARAMS_QDATA g_quark_from_static_string("avenc-params")
|
|
|
|
static GstElementClass *parent_class = NULL;
|
|
|
|
/*static guint gst_ffmpegaudenc_signals[LAST_SIGNAL] = { 0 }; */
|
|
|
|
static void
|
|
gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass)
|
|
{
|
|
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
|
|
AVCodec *in_plugin;
|
|
GstPadTemplate *srctempl = NULL, *sinktempl = NULL;
|
|
GstCaps *srccaps = NULL, *sinkcaps = NULL;
|
|
gchar *longname, *description;
|
|
|
|
in_plugin =
|
|
(AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
|
|
GST_FFENC_PARAMS_QDATA);
|
|
g_assert (in_plugin != NULL);
|
|
|
|
/* construct the element details struct */
|
|
longname = g_strdup_printf ("libav %s encoder", in_plugin->long_name);
|
|
description = g_strdup_printf ("libav %s encoder", in_plugin->name);
|
|
gst_element_class_set_metadata (element_class, longname,
|
|
"Codec/Encoder/Audio", description,
|
|
"Wim Taymans <wim.taymans@gmail.com>, "
|
|
"Ronald Bultje <rbultje@ronald.bitfreak.net>");
|
|
g_free (longname);
|
|
g_free (description);
|
|
|
|
if (!(srccaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, TRUE))) {
|
|
GST_DEBUG ("Couldn't get source caps for encoder '%s'", in_plugin->name);
|
|
srccaps = gst_caps_new_empty_simple ("unknown/unknown");
|
|
}
|
|
|
|
sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
|
|
in_plugin->id, TRUE, in_plugin);
|
|
if (!sinkcaps) {
|
|
GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
|
|
sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
|
|
}
|
|
|
|
/* pad templates */
|
|
sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
|
|
GST_PAD_ALWAYS, sinkcaps);
|
|
srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
|
|
|
|
gst_element_class_add_pad_template (element_class, srctempl);
|
|
gst_element_class_add_pad_template (element_class, sinktempl);
|
|
|
|
klass->in_plugin = in_plugin;
|
|
klass->srctempl = srctempl;
|
|
klass->sinktempl = sinktempl;
|
|
|
|
return;
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass)
|
|
{
|
|
GObjectClass *gobject_class;
|
|
GstAudioEncoderClass *gstaudioencoder_class;
|
|
|
|
gobject_class = (GObjectClass *) klass;
|
|
gstaudioencoder_class = (GstAudioEncoderClass *) klass;
|
|
|
|
parent_class = g_type_class_peek_parent (klass);
|
|
|
|
gobject_class->set_property = gst_ffmpegaudenc_set_property;
|
|
gobject_class->get_property = gst_ffmpegaudenc_get_property;
|
|
|
|
/* FIXME: could use -1 for a sensible per-codec defaults */
|
|
g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BIT_RATE,
|
|
g_param_spec_int ("bitrate", "Bit Rate",
|
|
"Target Audio Bitrate", 0, G_MAXINT, DEFAULT_AUDIO_BITRATE,
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
gobject_class->finalize = gst_ffmpegaudenc_finalize;
|
|
|
|
gstaudioencoder_class->stop = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_stop);
|
|
gstaudioencoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_getcaps);
|
|
gstaudioencoder_class->flush = GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_flush);
|
|
gstaudioencoder_class->set_format =
|
|
GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_set_format);
|
|
gstaudioencoder_class->handle_frame =
|
|
GST_DEBUG_FUNCPTR (gst_ffmpegaudenc_handle_frame);
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc)
|
|
{
|
|
GstFFMpegAudEncClass *klass =
|
|
(GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
|
|
|
|
/* ffmpeg objects */
|
|
ffmpegaudenc->context = avcodec_alloc_context3 (klass->in_plugin);
|
|
ffmpegaudenc->opened = FALSE;
|
|
|
|
gst_audio_encoder_set_drainable (GST_AUDIO_ENCODER (ffmpegaudenc), TRUE);
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_finalize (GObject * object)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) object;
|
|
|
|
/* clean up remaining allocated data */
|
|
av_free (ffmpegaudenc->context);
|
|
|
|
G_OBJECT_CLASS (parent_class)->finalize (object);
|
|
}
|
|
|
|
static gboolean
|
|
gst_ffmpegaudenc_stop (GstAudioEncoder * encoder)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
|
|
|
|
/* close old session */
|
|
if (ffmpegaudenc->opened) {
|
|
gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
|
|
ffmpegaudenc->opened = FALSE;
|
|
}
|
|
|
|
return TRUE;
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_flush (GstAudioEncoder * encoder)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
|
|
|
|
if (ffmpegaudenc->opened) {
|
|
avcodec_flush_buffers (ffmpegaudenc->context);
|
|
}
|
|
}
|
|
|
|
static GstCaps *
|
|
gst_ffmpegaudenc_getcaps (GstAudioEncoder * encoder, GstCaps * filter)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
|
|
GstCaps *caps = NULL;
|
|
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "getting caps");
|
|
|
|
/* audio needs no special care */
|
|
caps = gst_audio_encoder_proxy_getcaps (encoder, NULL, filter);
|
|
|
|
GST_DEBUG_OBJECT (ffmpegaudenc,
|
|
"audio caps, return template %" GST_PTR_FORMAT, caps);
|
|
|
|
return caps;
|
|
}
|
|
|
|
static gboolean
|
|
gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
|
|
GstCaps *other_caps;
|
|
GstCaps *allowed_caps;
|
|
GstCaps *icaps;
|
|
gsize frame_size;
|
|
GstFFMpegAudEncClass *oclass =
|
|
(GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
|
|
|
|
/* close old session */
|
|
if (ffmpegaudenc->opened) {
|
|
gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
|
|
ffmpegaudenc->opened = FALSE;
|
|
}
|
|
|
|
/* set defaults */
|
|
avcodec_get_context_defaults3 (ffmpegaudenc->context, oclass->in_plugin);
|
|
|
|
/* if we set it in _getcaps we should set it also in _link */
|
|
ffmpegaudenc->context->strict_std_compliance = -1;
|
|
|
|
/* user defined properties */
|
|
if (ffmpegaudenc->bitrate > 0) {
|
|
GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
|
|
ffmpegaudenc->bitrate);
|
|
ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
|
|
ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
|
|
} else {
|
|
GST_INFO_OBJECT (ffmpegaudenc, "Using avcontext default bitrate %d",
|
|
ffmpegaudenc->context->bit_rate);
|
|
}
|
|
|
|
/* RTP payload used for GOB production (for Asterisk) */
|
|
if (ffmpegaudenc->rtp_payload_size) {
|
|
ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
|
|
}
|
|
|
|
/* some other defaults */
|
|
ffmpegaudenc->context->rc_strategy = 2;
|
|
ffmpegaudenc->context->b_frame_strategy = 0;
|
|
ffmpegaudenc->context->coder_type = 0;
|
|
ffmpegaudenc->context->context_model = 0;
|
|
ffmpegaudenc->context->scenechange_threshold = 0;
|
|
ffmpegaudenc->context->inter_threshold = 0;
|
|
|
|
/* fetch pix_fmt and so on */
|
|
gst_ffmpeg_audioinfo_to_context (info, ffmpegaudenc->context);
|
|
if (!ffmpegaudenc->context->time_base.den) {
|
|
ffmpegaudenc->context->time_base.den = GST_AUDIO_INFO_RATE (info);
|
|
ffmpegaudenc->context->time_base.num = 1;
|
|
ffmpegaudenc->context->ticks_per_frame = 1;
|
|
}
|
|
|
|
if (ffmpegaudenc->context->channel_layout) {
|
|
gst_ffmpeg_channel_layout_to_gst (ffmpegaudenc->context->channel_layout,
|
|
ffmpegaudenc->context->channels, ffmpegaudenc->ffmpeg_layout);
|
|
ffmpegaudenc->needs_reorder =
|
|
(memcmp (ffmpegaudenc->ffmpeg_layout, info->position,
|
|
sizeof (GstAudioChannelPosition) *
|
|
ffmpegaudenc->context->channels) != 0);
|
|
}
|
|
|
|
/* open codec */
|
|
if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
|
|
if (ffmpegaudenc->context->priv_data)
|
|
gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
|
|
oclass->in_plugin->name);
|
|
return FALSE;
|
|
}
|
|
|
|
/* some codecs support more than one format, first auto-choose one */
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
|
|
allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
|
|
if (!allowed_caps) {
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
|
|
/* we need to copy because get_allowed_caps returns a ref, and
|
|
* get_pad_template_caps doesn't */
|
|
allowed_caps =
|
|
gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
|
|
}
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
|
|
gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
|
|
oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);
|
|
|
|
/* try to set this caps on the other side */
|
|
other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
|
|
ffmpegaudenc->context, TRUE);
|
|
|
|
if (!other_caps) {
|
|
gst_caps_unref (allowed_caps);
|
|
gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
|
|
GST_DEBUG ("Unsupported codec - no caps found");
|
|
return FALSE;
|
|
}
|
|
|
|
icaps = gst_caps_intersect (allowed_caps, other_caps);
|
|
gst_caps_unref (allowed_caps);
|
|
gst_caps_unref (other_caps);
|
|
if (gst_caps_is_empty (icaps)) {
|
|
gst_caps_unref (icaps);
|
|
return FALSE;
|
|
}
|
|
icaps = gst_caps_truncate (icaps);
|
|
|
|
if (!gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (ffmpegaudenc),
|
|
icaps)) {
|
|
gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
|
|
gst_caps_unref (icaps);
|
|
return FALSE;
|
|
}
|
|
gst_caps_unref (icaps);
|
|
|
|
frame_size = ffmpegaudenc->context->frame_size;
|
|
if (frame_size > 1) {
|
|
gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
|
|
frame_size);
|
|
gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
|
|
frame_size);
|
|
gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 1);
|
|
} else {
|
|
gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
|
|
0);
|
|
gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
|
|
0);
|
|
gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 0);
|
|
}
|
|
|
|
/* success! */
|
|
ffmpegaudenc->opened = TRUE;
|
|
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
static GstFlowReturn
|
|
gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
|
|
guint8 * audio_in, guint in_size, gint * have_data)
|
|
{
|
|
GstAudioEncoder *enc;
|
|
AVCodecContext *ctx;
|
|
gint res;
|
|
GstFlowReturn ret;
|
|
GstAudioInfo *info;
|
|
AVPacket pkt;
|
|
AVFrame frame;
|
|
gboolean planar;
|
|
|
|
enc = GST_AUDIO_ENCODER (ffmpegaudenc);
|
|
|
|
ctx = ffmpegaudenc->context;
|
|
|
|
GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer ");
|
|
|
|
memset (&pkt, 0, sizeof (pkt));
|
|
memset (&frame, 0, sizeof (frame));
|
|
avcodec_get_frame_defaults (&frame);
|
|
|
|
info = gst_audio_encoder_get_audio_info (enc);
|
|
planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
|
|
|
|
if (planar && info->channels > 1) {
|
|
gint channels, nsamples;
|
|
gint i, j;
|
|
|
|
nsamples = frame.nb_samples = in_size / info->bpf;
|
|
channels = info->channels;
|
|
|
|
if (info->channels > AV_NUM_DATA_POINTERS) {
|
|
frame.extended_data = g_new (uint8_t *, info->channels);
|
|
} else {
|
|
frame.extended_data = frame.data;
|
|
}
|
|
|
|
frame.extended_data[0] = g_malloc (in_size);
|
|
frame.linesize[0] = in_size / channels;
|
|
for (i = 1; i < channels; i++)
|
|
frame.extended_data[i] = frame.extended_data[i - 1] + frame.linesize[0];
|
|
|
|
switch (info->finfo->width) {
|
|
case 8:{
|
|
const guint8 *idata = (const guint8 *) audio_in;
|
|
|
|
for (i = 0; i < nsamples; i++) {
|
|
for (j = 0; j < channels; j++) {
|
|
((guint8 *) frame.extended_data[j])[i] = idata[j];
|
|
}
|
|
idata += channels;
|
|
}
|
|
break;
|
|
}
|
|
case 16:{
|
|
const guint16 *idata = (const guint16 *) audio_in;
|
|
|
|
for (i = 0; i < nsamples; i++) {
|
|
for (j = 0; j < channels; j++) {
|
|
((guint16 *) frame.extended_data[j])[i] = idata[j];
|
|
}
|
|
idata += channels;
|
|
}
|
|
break;
|
|
}
|
|
case 32:{
|
|
const guint32 *idata = (const guint32 *) audio_in;
|
|
|
|
for (i = 0; i < nsamples; i++) {
|
|
for (j = 0; j < channels; j++) {
|
|
((guint32 *) frame.extended_data[j])[i] = idata[j];
|
|
}
|
|
idata += channels;
|
|
}
|
|
|
|
break;
|
|
}
|
|
case 64:{
|
|
const guint64 *idata = (const guint64 *) audio_in;
|
|
|
|
for (i = 0; i < nsamples; i++) {
|
|
for (j = 0; j < channels; j++) {
|
|
((guint64 *) frame.extended_data[j])[i] = idata[j];
|
|
}
|
|
idata += channels;
|
|
}
|
|
|
|
break;
|
|
}
|
|
default:
|
|
g_assert_not_reached ();
|
|
break;
|
|
}
|
|
|
|
} else {
|
|
frame.data[0] = audio_in;
|
|
frame.extended_data = frame.data;
|
|
frame.linesize[0] = in_size;
|
|
frame.nb_samples = in_size / info->bpf;
|
|
}
|
|
|
|
res = avcodec_encode_audio2 (ctx, &pkt, &frame, have_data);
|
|
if (planar && info->channels > 1)
|
|
g_free (frame.data[0]);
|
|
if (frame.extended_data != frame.data)
|
|
g_free (frame.extended_data);
|
|
|
|
if (res < 0) {
|
|
char error_str[128] = { 0, };
|
|
|
|
av_strerror (res, error_str, sizeof (error_str));
|
|
GST_ERROR_OBJECT (enc, "Failed to encode buffer: %d - %s", res, error_str);
|
|
return GST_FLOW_OK;
|
|
}
|
|
GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res);
|
|
|
|
if (*have_data) {
|
|
GstBuffer *outbuf;
|
|
const AVCodec *codec;
|
|
|
|
GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d", pkt.size);
|
|
|
|
outbuf =
|
|
gst_buffer_new_wrapped_full (0, pkt.data, pkt.size, 0, pkt.size,
|
|
pkt.data, av_free);
|
|
|
|
codec = ffmpegaudenc->context->codec;
|
|
if ((codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
|
|
ret = gst_audio_encoder_finish_frame (enc, outbuf, -1);
|
|
} else {
|
|
ret = gst_audio_encoder_finish_frame (enc, outbuf, frame.nb_samples);
|
|
}
|
|
} else {
|
|
GST_LOG_OBJECT (ffmpegaudenc, "no output produced");
|
|
ret = GST_FLOW_OK;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_drain (GstFFMpegAudEnc * ffmpegaudenc)
|
|
{
|
|
GstFFMpegAudEncClass *oclass;
|
|
|
|
oclass = (GstFFMpegAudEncClass *) (G_OBJECT_GET_CLASS (ffmpegaudenc));
|
|
|
|
if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
|
|
gint have_data, try = 0;
|
|
|
|
GST_LOG_OBJECT (ffmpegaudenc,
|
|
"codec has delay capabilities, calling until libav has drained everything");
|
|
|
|
do {
|
|
GstFlowReturn ret;
|
|
|
|
ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, NULL, 0, &have_data);
|
|
if (ret != GST_FLOW_OK || have_data == 0)
|
|
break;
|
|
} while (try++ < 10);
|
|
}
|
|
}
|
|
|
|
static GstFlowReturn
|
|
gst_ffmpegaudenc_handle_frame (GstAudioEncoder * encoder, GstBuffer * inbuf)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc;
|
|
gsize size;
|
|
GstFlowReturn ret;
|
|
guint8 *in_data;
|
|
GstMapInfo map;
|
|
gint have_data;
|
|
|
|
ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
|
|
|
|
if (G_UNLIKELY (!ffmpegaudenc->opened))
|
|
goto not_negotiated;
|
|
|
|
if (!inbuf) {
|
|
gst_ffmpegaudenc_drain (ffmpegaudenc);
|
|
return GST_FLOW_OK;
|
|
}
|
|
|
|
inbuf = gst_buffer_ref (inbuf);
|
|
|
|
GST_DEBUG_OBJECT (ffmpegaudenc,
|
|
"Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT
|
|
", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)),
|
|
GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)), gst_buffer_get_size (inbuf));
|
|
|
|
/* Reorder channels to the GStreamer channel order */
|
|
if (ffmpegaudenc->needs_reorder) {
|
|
GstAudioInfo *info = gst_audio_encoder_get_audio_info (encoder);
|
|
|
|
inbuf = gst_buffer_make_writable (inbuf);
|
|
gst_audio_buffer_reorder_channels (inbuf, info->finfo->format,
|
|
info->channels, info->position, ffmpegaudenc->ffmpeg_layout);
|
|
}
|
|
|
|
gst_buffer_map (inbuf, &map, GST_MAP_READ);
|
|
in_data = map.data;
|
|
size = map.size;
|
|
ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, size, &have_data);
|
|
gst_buffer_unmap (inbuf, &map);
|
|
gst_buffer_unref (inbuf);
|
|
|
|
if (ret != GST_FLOW_OK)
|
|
goto push_failed;
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
/* ERRORS */
|
|
not_negotiated:
|
|
{
|
|
GST_ELEMENT_ERROR (ffmpegaudenc, CORE, NEGOTIATION, (NULL),
|
|
("not configured to input format before data start"));
|
|
gst_buffer_unref (inbuf);
|
|
return GST_FLOW_NOT_NEGOTIATED;
|
|
}
|
|
push_failed:
|
|
{
|
|
GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to push buffer %d (%s)", ret,
|
|
gst_flow_get_name (ret));
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
static void
|
|
gst_ffmpegaudenc_set_property (GObject * object,
|
|
guint prop_id, const GValue * value, GParamSpec * pspec)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc;
|
|
|
|
/* Get a pointer of the right type. */
|
|
ffmpegaudenc = (GstFFMpegAudEnc *) (object);
|
|
|
|
if (ffmpegaudenc->opened) {
|
|
GST_WARNING_OBJECT (ffmpegaudenc,
|
|
"Can't change properties once decoder is setup !");
|
|
return;
|
|
}
|
|
|
|
/* Check the argument id to see which argument we're setting. */
|
|
switch (prop_id) {
|
|
case PROP_BIT_RATE:
|
|
ffmpegaudenc->bitrate = g_value_get_int (value);
|
|
break;
|
|
case PROP_RTP_PAYLOAD_SIZE:
|
|
ffmpegaudenc->rtp_payload_size = g_value_get_int (value);
|
|
break;
|
|
default:
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* The set function is simply the inverse of the get fuction. */
|
|
static void
|
|
gst_ffmpegaudenc_get_property (GObject * object,
|
|
guint prop_id, GValue * value, GParamSpec * pspec)
|
|
{
|
|
GstFFMpegAudEnc *ffmpegaudenc;
|
|
|
|
/* It's not null if we got it, but it might not be ours */
|
|
ffmpegaudenc = (GstFFMpegAudEnc *) (object);
|
|
|
|
switch (prop_id) {
|
|
case PROP_BIT_RATE:
|
|
g_value_set_int (value, ffmpegaudenc->bitrate);
|
|
break;
|
|
break;
|
|
case PROP_RTP_PAYLOAD_SIZE:
|
|
g_value_set_int (value, ffmpegaudenc->rtp_payload_size);
|
|
break;
|
|
default:
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
break;
|
|
}
|
|
}
|
|
|
|
gboolean
|
|
gst_ffmpegaudenc_register (GstPlugin * plugin)
|
|
{
|
|
GTypeInfo typeinfo = {
|
|
sizeof (GstFFMpegAudEncClass),
|
|
(GBaseInitFunc) gst_ffmpegaudenc_base_init,
|
|
NULL,
|
|
(GClassInitFunc) gst_ffmpegaudenc_class_init,
|
|
NULL,
|
|
NULL,
|
|
sizeof (GstFFMpegAudEnc),
|
|
0,
|
|
(GInstanceInitFunc) gst_ffmpegaudenc_init,
|
|
};
|
|
GType type;
|
|
AVCodec *in_plugin;
|
|
|
|
|
|
GST_LOG ("Registering encoders");
|
|
|
|
in_plugin = av_codec_next (NULL);
|
|
while (in_plugin) {
|
|
gchar *type_name;
|
|
|
|
/* Skip non-AV codecs */
|
|
if (in_plugin->type != AVMEDIA_TYPE_AUDIO)
|
|
goto next;
|
|
|
|
/* no quasi codecs, please */
|
|
if ((in_plugin->id >= CODEC_ID_PCM_S16LE &&
|
|
in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
|
|
goto next;
|
|
}
|
|
|
|
/* No encoders depending on external libraries (we don't build them, but
|
|
* people who build against an external ffmpeg might have them.
|
|
* We have native gstreamer plugins for all of those libraries anyway. */
|
|
if (!strncmp (in_plugin->name, "lib", 3)) {
|
|
GST_DEBUG
|
|
("Not using external library encoder %s. Use the gstreamer-native ones instead.",
|
|
in_plugin->name);
|
|
goto next;
|
|
}
|
|
|
|
/* only encoders */
|
|
if (!av_codec_is_encoder (in_plugin)) {
|
|
goto next;
|
|
}
|
|
|
|
/* FIXME : We should have a method to know cheaply whether we have a mapping
|
|
* for the given plugin or not */
|
|
|
|
GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
|
|
|
|
/* no codecs for which we're GUARANTEED to have better alternatives */
|
|
if (!strcmp (in_plugin->name, "vorbis")
|
|
|| !strcmp (in_plugin->name, "flac")) {
|
|
GST_LOG ("Ignoring encoder %s", in_plugin->name);
|
|
goto next;
|
|
}
|
|
|
|
/* construct the type */
|
|
type_name = g_strdup_printf ("avenc_%s", in_plugin->name);
|
|
|
|
type = g_type_from_name (type_name);
|
|
|
|
if (!type) {
|
|
|
|
/* create the glib type now */
|
|
type =
|
|
g_type_register_static (GST_TYPE_AUDIO_ENCODER, type_name, &typeinfo,
|
|
0);
|
|
g_type_set_qdata (type, GST_FFENC_PARAMS_QDATA, (gpointer) in_plugin);
|
|
|
|
{
|
|
static const GInterfaceInfo preset_info = {
|
|
NULL,
|
|
NULL,
|
|
NULL
|
|
};
|
|
g_type_add_interface_static (type, GST_TYPE_PRESET, &preset_info);
|
|
}
|
|
}
|
|
|
|
if (!gst_element_register (plugin, type_name, GST_RANK_SECONDARY, type)) {
|
|
g_free (type_name);
|
|
return FALSE;
|
|
}
|
|
|
|
g_free (type_name);
|
|
|
|
next:
|
|
in_plugin = av_codec_next (in_plugin);
|
|
}
|
|
|
|
GST_LOG ("Finished registering encoders");
|
|
|
|
return TRUE;
|
|
}
|