basevideoencoder: add documentation and related cosmetics

This commit is contained in:
Mark Nauwelaerts 2011-03-25 09:28:24 +01:00
parent d15b8c7ad3
commit 5a8bc266c8
2 changed files with 219 additions and 31 deletions

View file

@ -1,5 +1,8 @@
/* GStreamer
* Copyright (C) 2008 David Schleef <ds@schleef.org>
* Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
* Copyright (C) 2011 Nokia Corporation. All rights reserved.
* Contact: Stefan Kost <stefan.kost@nokia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@ -17,6 +20,87 @@
* Boston, MA 02111-1307, USA.
*/
/**
* SECTION:gstbasevideoencoder
* @short_description: Base class for video encoders
* @see_also: #GstBaseTransform
*
* This base class is for video encoders turning raw video into
* encoded video data.
*
* GstBaseVideoEncoder and subclass should cooperate as follows.
* <orderedlist>
* <listitem>
* <itemizedlist><title>Configuration</title>
* <listitem><para>
* Initially, GstBaseVideoEncoder calls @start when the encoder element
* is activated, which allows subclass to perform any global setup.
* </para></listitem>
* <listitem><para>
* GstBaseVideoEncoder calls @set_format to inform subclass of the format
* of input video data that it is about to receive. Subclass should
* setup for encoding and configure base class as appropriate
* (e.g. latency). While unlikely, it might be called more than once,
* if changing input parameters require reconfiguration. Baseclass
* will ensure that processing of current configuration is finished.
* </para></listitem>
* <listitem><para>
* GstBaseVideoEncoder calls @stop at end of all processing.
* </para></listitem>
* </itemizedlist>
* </listitem>
* <listitem>
* <itemizedlist>
* <title>Data processing</title>
* <listitem><para>
* Base class collects input data and metadata into a frame and hands
* this to subclass' @handle_frame.
* </para></listitem>
* <listitem><para>
* If codec processing results in encoded data, subclass should call
* @gst_base_video_encoder_finish_frame to have encoded data pushed
* downstream.
* </para></listitem>
* <listitem><para>
* If implemented, baseclass calls subclass @shape_output which then sends
* data downstream in desired form. Otherwise, it is sent as-is.
* </para></listitem>
* <listitem><para>
* GstBaseVideoEncoderClass will handle both srcpad and sinkpad events.
* Sink events will be passed to subclass if @event callback has been
* provided.
* </para></listitem>
* </itemizedlist>
* </listitem>
* <listitem>
* <itemizedlist><title>Shutdown phase</title>
* <listitem><para>
* GstBaseVideoEncoder class calls @stop to inform the subclass that data
* parsing will be stopped.
* </para></listitem>
* </itemizedlist>
* </listitem>
* </orderedlist>
*
* Subclass is responsible for providing pad template caps for
* source and sink pads. The pads need to be named "sink" and "src". It should
* also be able to provide fixed src pad caps in @getcaps by the time it calls
* @gst_base_video_encoder_finish_frame.
*
* Things that subclass need to take care of:
* <itemizedlist>
* <listitem><para>Provide pad templates</para></listitem>
* <listitem><para>
* Provide source pad caps in @get_caps.
* </para></listitem>
* <listitem><para>
* Accept data in @handle_frame and provide encoded results to
* @gst_base_video_encoder_finish_frame.
* </para></listitem>
* </itemizedlist>
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@ -595,6 +679,18 @@ gst_base_video_encoder_change_state (GstElement * element,
return ret;
}
/**
* gst_base_video_encoder_finish_frame:
* @base_video_encoder: a #GstBaseVideoEncoder
* @frame: an encoded #GstVideoFrame
*
* @frame must have a valid encoded data buffer, whose metadata fields
* are then appropriately set according to frame data.
* It is subsequently pushed downstream or provided to @shape_output.
* In any case, the frame is considered finished and released.
*
* Returns: a #GstFlowReturn resulting from sending data downstream
*/
GstFlowReturn
gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
GstVideoFrame * frame)
@ -602,6 +698,8 @@ gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
GstFlowReturn ret;
GstBaseVideoEncoderClass *base_video_encoder_class;
g_return_val_if_fail (frame->src_buffer != NULL, GST_FLOW_ERROR);
base_video_encoder_class =
GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);
@ -635,6 +733,10 @@ gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
GST_BASE_VIDEO_CODEC (base_video_encoder)->frames =
g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame);
/* FIXME get rid of this ?
* seems a roundabout way that adds little benefit to simply get
* and subsequently set. subclass is adult enough to set_caps itself ...
* so simply check/ensure/assert that src pad caps are set by now */
if (!base_video_encoder->set_output_caps) {
GstCaps *caps;
@ -699,12 +801,26 @@ gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder,
return ret;
}
/**
* gst_base_video_encoder_get_state:
* @base_video_encoder: a #GstBaseVideoEncoder
*
* Returns: #GstVideoState describing format of video data.
*/
const GstVideoState *
gst_base_video_encoder_get_state (GstBaseVideoEncoder * base_video_encoder)
{
return &GST_BASE_VIDEO_CODEC (base_video_encoder)->state;
}
/**
* gst_base_video_encoder_set_latency:
* @base_video_encoder: a #GstBaseVideoEncoder
* @min_latency: minimum latency
* @max_latency: maximum latency
*
* Informs baseclass of encoding latency.
*/
void
gst_base_video_encoder_set_latency (GstBaseVideoEncoder * base_video_encoder,
GstClockTime min_latency, GstClockTime max_latency)
@ -721,6 +837,14 @@ gst_base_video_encoder_set_latency (GstBaseVideoEncoder * base_video_encoder,
gst_message_new_latency (GST_OBJECT_CAST (base_video_encoder)));
}
/**
* gst_base_video_encoder_set_latency_fields:
* @base_video_encoder: a #GstBaseVideoEncoder
* @fields: latency in fields
*
* Informs baseclass of encoding latency in terms of fields (both min
* and max latency).
*/
void
gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *
base_video_encoder, int n_fields)
@ -735,6 +859,12 @@ gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *
}
/**
* gst_base_video_encoder_get_oldest_frame:
* @base_video_encoder: a #GstBaseVideoEncoder
*
* Returns: oldest unfinished pending #GstVideoFrame
*/
GstVideoFrame *
gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *
base_video_encoder)
@ -747,3 +877,6 @@ gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *
return NULL;
return (GstVideoFrame *) (g->data);
}
/* FIXME there could probably be more of these;
* get by presentation_number, by presentation_time ? */

View file

@ -1,5 +1,8 @@
/* GStreamer
* Copyright (C) 2008 David Schleef <ds@schleef.org>
* Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
* Copyright (C) 2011 Nokia Corporation. All rights reserved.
* Contact: Stefan Kost <stefan.kost@nokia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@ -59,27 +62,34 @@ G_BEGIN_DECLS
typedef struct _GstBaseVideoEncoder GstBaseVideoEncoder;
typedef struct _GstBaseVideoEncoderClass GstBaseVideoEncoderClass;
/**
* GstBaseVideoEncoder:
* @element: the parent element.
*
* The opaque #GstBaseVideoEncoder data structure.
*/
struct _GstBaseVideoEncoder
{
GstBaseVideoCodec base_video_codec;
/*< protected >*/
gboolean sink_clipping;
gboolean sink_clipping;
guint64 presentation_frame_number;
int distance_from_sync;
guint64 presentation_frame_number;
int distance_from_sync;
gboolean force_keyframe;
gboolean force_keyframe;
/*< private >*/
/* FIXME move to real private part ? */
gboolean set_output_caps;
gboolean drained;
/* FIXME move to real private part ?
* (and introduce a context ?) */
gboolean set_output_caps;
gboolean drained;
gint64 min_latency;
gint64 max_latency;
gint64 min_latency;
gint64 max_latency;
GstEvent *force_keyunit_event;
GstEvent *force_keyunit_event;
union {
void *padding;
@ -87,39 +97,84 @@ struct _GstBaseVideoEncoder
} a;
/* FIXME before moving to base */
void *padding[GST_PADDING_LARGE-1];
void *padding[GST_PADDING_LARGE-1];
};
/**
* GstBaseVideoEncoderClass:
* @start: Optional.
* Called when the element starts processing.
* Allows opening external resources.
* @stop: Optional.
* Called when the element stops processing.
* Allows closing external resources.
* @set_format: Optional.
* Notifies subclass of incoming data format.
* GstVideoState fields have already been
* set according to provided caps.
* @handle_frame: Provides input frame to subclass.
* @finish: Optional.
* Called to request subclass to dispatch any pending remaining
* data (e.g. at EOS).
* @shape_output: Optional.
* Allows subclass to push frame downstream in whatever
* shape or form it deems appropriate. If not provided,
* provided encoded frame data is simply pushed downstream.
* @event: Optional.
* Event handler on the sink pad. This function should return
* TRUE if the event was handled and should be discarded
* (i.e. not unref'ed).
* @getcaps: Optional, but recommended.
* Provides src pad caps to baseclass.
*
* Subclasses can override any of the available virtual methods or not, as
* needed. At minimum @handle_frame needs to be overridden, and @set_format
* and @get_caps are likely needed as well.
*/
struct _GstBaseVideoEncoderClass
{
GstBaseVideoCodecClass base_video_codec_class;
GstBaseVideoCodecClass base_video_codec_class;
gboolean (*set_format) (GstBaseVideoEncoder *coder, GstVideoState *state);
gboolean (*start) (GstBaseVideoEncoder *coder);
gboolean (*stop) (GstBaseVideoEncoder *coder);
gboolean (*finish) (GstBaseVideoEncoder *coder);
GstFlowReturn (*handle_frame) (GstBaseVideoEncoder *coder, GstVideoFrame *frame);
GstFlowReturn (*shape_output) (GstBaseVideoEncoder *coder, GstVideoFrame *frame);
gboolean (*event) (GstBaseVideoEncoder *coder, GstEvent *event);
GstCaps *(*get_caps) (GstBaseVideoEncoder *coder);
/*< public >*/
/* virtual methods for subclasses */
gboolean (*start) (GstBaseVideoEncoder *coder);
gboolean (*stop) (GstBaseVideoEncoder *coder);
gboolean (*set_format) (GstBaseVideoEncoder *coder,
GstVideoState *state);
GstFlowReturn (*handle_frame) (GstBaseVideoEncoder *coder,
GstVideoFrame *frame);
gboolean (*finish) (GstBaseVideoEncoder *coder);
GstFlowReturn (*shape_output) (GstBaseVideoEncoder *coder,
GstVideoFrame *frame);
gboolean (*event) (GstBaseVideoEncoder *coder,
GstEvent *event);
GstCaps * (*get_caps) (GstBaseVideoEncoder *coder);
/*< private >*/
/* FIXME before moving to base */
void *padding[GST_PADDING_LARGE];
gpointer _gst_reserved[GST_PADDING_LARGE];
};
GType gst_base_video_encoder_get_type (void);
GType gst_base_video_encoder_get_type (void);
const GstVideoState *gst_base_video_encoder_get_state (GstBaseVideoEncoder *coder);
const GstVideoState* gst_base_video_encoder_get_state (GstBaseVideoEncoder *coder);
GstVideoFrame *gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *coder);
GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder *base_video_encoder,
GstVideoFrame *frame);
void gst_base_video_encoder_set_latency (GstBaseVideoEncoder *base_video_encoder,
GstClockTime min_latency, GstClockTime max_latency);
void gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *base_video_encoder,
int n_fields);
GstVideoFrame* gst_base_video_encoder_get_oldest_frame (GstBaseVideoEncoder *coder);
GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder *base_video_encoder,
GstVideoFrame *frame);
void gst_base_video_encoder_set_latency (GstBaseVideoEncoder *base_video_encoder,
GstClockTime min_latency, GstClockTime max_latency);
void gst_base_video_encoder_set_latency_fields (GstBaseVideoEncoder *base_video_encoder,
int n_fields);
G_END_DECLS