mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2025-01-11 09:55:36 +00:00
videoaggregator: Move aggregated_frame and the pad buffer into the private struct
The aggregated_frame is now called prepared_frame and passed to the prepare_frame and cleanup_frame virtual methods directly. For the currently queued buffer there is a method on the video aggregator pad now.
This commit is contained in:
parent
404b802846
commit
1c8110ab17
6 changed files with 204 additions and 140 deletions
|
@ -39,9 +39,10 @@ static void gst_gl_mixer_pad_get_property (GObject * object, guint prop_id,
|
|||
static void gst_gl_mixer_pad_set_property (GObject * object, guint prop_id,
|
||||
const GValue * value, GParamSpec * pspec);
|
||||
static gboolean gst_gl_mixer_pad_prepare_frame (GstVideoAggregatorPad * vpad,
|
||||
GstVideoAggregator * vagg);
|
||||
GstVideoAggregator * vagg, GstBuffer * buffer,
|
||||
GstVideoFrame * prepared_frame);
|
||||
static void gst_gl_mixer_pad_clean_frame (GstVideoAggregatorPad * vpad,
|
||||
GstVideoAggregator * vagg);
|
||||
GstVideoAggregator * vagg, GstVideoFrame * prepared_frame);
|
||||
|
||||
enum
|
||||
{
|
||||
|
@ -101,54 +102,45 @@ gst_gl_mixer_pad_set_property (GObject * object, guint prop_id,
|
|||
|
||||
static gboolean
|
||||
gst_gl_mixer_pad_prepare_frame (GstVideoAggregatorPad * vpad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstBuffer * buffer,
|
||||
GstVideoFrame * prepared_frame)
|
||||
{
|
||||
GstGLMixerPad *pad = GST_GL_MIXER_PAD (vpad);
|
||||
GstGLMixer *mix = GST_GL_MIXER (vagg);
|
||||
GstVideoInfo gl_info;
|
||||
GstGLSyncMeta *sync_meta;
|
||||
|
||||
pad->current_texture = 0;
|
||||
vpad->aggregated_frame = NULL;
|
||||
|
||||
if (vpad->buffer != NULL) {
|
||||
GstVideoInfo gl_info;
|
||||
GstVideoFrame aggregated_frame;
|
||||
GstGLSyncMeta *sync_meta;
|
||||
gst_video_info_set_format (&gl_info,
|
||||
GST_VIDEO_FORMAT_RGBA,
|
||||
GST_VIDEO_INFO_WIDTH (&vpad->info), GST_VIDEO_INFO_HEIGHT (&vpad->info));
|
||||
|
||||
gst_video_info_set_format (&gl_info,
|
||||
GST_VIDEO_FORMAT_RGBA,
|
||||
GST_VIDEO_INFO_WIDTH (&vpad->info),
|
||||
GST_VIDEO_INFO_HEIGHT (&vpad->info));
|
||||
sync_meta = gst_buffer_get_gl_sync_meta (buffer);
|
||||
if (sync_meta)
|
||||
gst_gl_sync_meta_wait (sync_meta, GST_GL_BASE_MIXER (mix)->context);
|
||||
|
||||
sync_meta = gst_buffer_get_gl_sync_meta (vpad->buffer);
|
||||
if (sync_meta)
|
||||
gst_gl_sync_meta_wait (sync_meta, GST_GL_BASE_MIXER (mix)->context);
|
||||
|
||||
if (!gst_video_frame_map (&aggregated_frame, &gl_info, vpad->buffer,
|
||||
GST_MAP_READ | GST_MAP_GL)) {
|
||||
GST_ERROR_OBJECT (pad, "Failed to map input frame");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
pad->current_texture = *(guint *) aggregated_frame.data[0];
|
||||
|
||||
vpad->aggregated_frame = g_slice_new0 (GstVideoFrame);
|
||||
*vpad->aggregated_frame = aggregated_frame;
|
||||
if (!gst_video_frame_map (prepared_frame, &gl_info, buffer,
|
||||
GST_MAP_READ | GST_MAP_GL)) {
|
||||
GST_ERROR_OBJECT (pad, "Failed to map input frame");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
pad->current_texture = *(guint *) prepared_frame->data[0];
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
gst_gl_mixer_pad_clean_frame (GstVideoAggregatorPad * vpad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstVideoFrame * prepared_frame)
|
||||
{
|
||||
GstGLMixerPad *pad = GST_GL_MIXER_PAD (vpad);
|
||||
|
||||
pad->current_texture = 0;
|
||||
if (vpad->aggregated_frame) {
|
||||
gst_video_frame_unmap (vpad->aggregated_frame);
|
||||
g_slice_free (GstVideoFrame, vpad->aggregated_frame);
|
||||
vpad->aggregated_frame = NULL;
|
||||
if (prepared_frame->buffer) {
|
||||
gst_video_frame_unmap (prepared_frame);
|
||||
memset (prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ G_DEFINE_TYPE_WITH_CODE (GstGLStereoMix, gst_gl_stereo_mix, GST_TYPE_GL_MIXER,
|
|||
|
||||
static GstCaps *_update_caps (GstVideoAggregator * vagg, GstCaps * caps);
|
||||
static gboolean _negotiated_caps (GstAggregator * aggregator, GstCaps * caps);
|
||||
gboolean gst_gl_stereo_mix_make_output (GstGLStereoMix * mix);
|
||||
static gboolean gst_gl_stereo_mix_make_output (GstGLStereoMix * mix);
|
||||
static gboolean gst_gl_stereo_mix_process_frames (GstGLStereoMix * mixer);
|
||||
|
||||
#define DEFAULT_DOWNMIX GST_GL_STEREO_DOWNMIX_ANAGLYPH_GREEN_MAGENTA_DUBOIS
|
||||
|
@ -301,7 +301,7 @@ gst_gl_stereo_mix_get_output_buffer (GstVideoAggregator * videoaggregator,
|
|||
return ret;
|
||||
}
|
||||
|
||||
gboolean
|
||||
static gboolean
|
||||
gst_gl_stereo_mix_make_output (GstGLStereoMix * mix)
|
||||
{
|
||||
GList *walk;
|
||||
|
@ -316,11 +316,12 @@ gst_gl_stereo_mix_make_output (GstGLStereoMix * mix)
|
|||
while (walk) {
|
||||
GstVideoAggregatorPad *vaggpad = walk->data;
|
||||
GstGLStereoMixPad *pad = walk->data;
|
||||
GstBuffer *buffer = gst_video_aggregator_pad_get_current_buffer (vaggpad);
|
||||
|
||||
GST_LOG_OBJECT (mix, "Checking pad %" GST_PTR_FORMAT, vaggpad);
|
||||
|
||||
if (vaggpad->buffer != NULL) {
|
||||
pad->current_buffer = vaggpad->buffer;
|
||||
if (buffer != NULL) {
|
||||
pad->current_buffer = buffer;
|
||||
|
||||
GST_DEBUG_OBJECT (pad, "Got buffer %" GST_PTR_FORMAT,
|
||||
pad->current_buffer);
|
||||
|
|
|
@ -1583,9 +1583,10 @@ gst_gl_video_mixer_callback (gpointer stuff)
|
|||
{
|
||||
GstVideoAffineTransformationMeta *af_meta;
|
||||
gfloat matrix[16];
|
||||
GstBuffer *buffer =
|
||||
gst_video_aggregator_pad_get_current_buffer (vagg_pad);
|
||||
|
||||
af_meta =
|
||||
gst_buffer_get_video_affine_transformation_meta (vagg_pad->buffer);
|
||||
af_meta = gst_buffer_get_video_affine_transformation_meta (buffer);
|
||||
gst_gl_get_affine_transformation_meta_as_ndc_ext (af_meta, matrix);
|
||||
gst_gl_shader_set_uniform_matrix_4fv (video_mixer->shader,
|
||||
"u_transformation", 1, FALSE, matrix);
|
||||
|
|
|
@ -65,6 +65,9 @@ enum
|
|||
|
||||
struct _GstVideoAggregatorPadPrivate
|
||||
{
|
||||
GstBuffer *buffer;
|
||||
GstVideoFrame prepared_frame;
|
||||
|
||||
/* properties */
|
||||
guint zorder;
|
||||
gboolean repeat_after_eos;
|
||||
|
@ -146,7 +149,7 @@ _flush_pad (GstAggregatorPad * aggpad, GstAggregator * aggregator)
|
|||
GstVideoAggregatorPad *pad = GST_VIDEO_AGGREGATOR_PAD (aggpad);
|
||||
|
||||
gst_video_aggregator_reset_qos (vagg);
|
||||
gst_buffer_replace (&pad->buffer, NULL);
|
||||
gst_buffer_replace (&pad->priv->buffer, NULL);
|
||||
pad->priv->start_time = -1;
|
||||
pad->priv->end_time = -1;
|
||||
|
||||
|
@ -259,28 +262,26 @@ gst_video_aggregator_pad_finalize (GObject * o)
|
|||
|
||||
static gboolean
|
||||
gst_video_aggregator_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstBuffer * buffer,
|
||||
GstVideoFrame * prepared_frame)
|
||||
{
|
||||
guint outsize;
|
||||
GstVideoFrame *converted_frame;
|
||||
GstBuffer *converted_buf = NULL;
|
||||
GstVideoFrame *frame;
|
||||
static GstAllocationParams params = { 0, 15, 0, 0, };
|
||||
GstVideoFrame frame;
|
||||
|
||||
if (!pad->buffer)
|
||||
if (!pad->priv->buffer)
|
||||
return TRUE;
|
||||
|
||||
frame = g_slice_new0 (GstVideoFrame);
|
||||
|
||||
if (!gst_video_frame_map (frame, &pad->info, pad->buffer, GST_MAP_READ)) {
|
||||
if (!gst_video_frame_map (&frame, &pad->info, pad->priv->buffer,
|
||||
GST_MAP_READ)) {
|
||||
GST_WARNING_OBJECT (vagg, "Could not map input buffer");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (pad->priv->convert) {
|
||||
GstVideoFrame converted_frame;
|
||||
GstBuffer *converted_buf = NULL;
|
||||
static GstAllocationParams params = { 0, 15, 0, 0, };
|
||||
gint converted_size;
|
||||
|
||||
converted_frame = g_slice_new0 (GstVideoFrame);
|
||||
guint outsize;
|
||||
|
||||
/* We wait until here to set the conversion infos, in case vagg->info changed */
|
||||
converted_size = pad->priv->conversion_info.size;
|
||||
|
@ -288,37 +289,32 @@ gst_video_aggregator_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
converted_size = converted_size > outsize ? converted_size : outsize;
|
||||
converted_buf = gst_buffer_new_allocate (NULL, converted_size, ¶ms);
|
||||
|
||||
if (!gst_video_frame_map (converted_frame, &(pad->priv->conversion_info),
|
||||
if (!gst_video_frame_map (&converted_frame, &(pad->priv->conversion_info),
|
||||
converted_buf, GST_MAP_READWRITE)) {
|
||||
GST_WARNING_OBJECT (vagg, "Could not map converted frame");
|
||||
|
||||
g_slice_free (GstVideoFrame, converted_frame);
|
||||
gst_video_frame_unmap (frame);
|
||||
g_slice_free (GstVideoFrame, frame);
|
||||
gst_video_frame_unmap (&frame);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
gst_video_converter_frame (pad->priv->convert, frame, converted_frame);
|
||||
gst_video_converter_frame (pad->priv->convert, &frame, &converted_frame);
|
||||
pad->priv->converted_buffer = converted_buf;
|
||||
gst_video_frame_unmap (frame);
|
||||
g_slice_free (GstVideoFrame, frame);
|
||||
gst_video_frame_unmap (&frame);
|
||||
*prepared_frame = converted_frame;
|
||||
} else {
|
||||
converted_frame = frame;
|
||||
*prepared_frame = frame;
|
||||
}
|
||||
|
||||
pad->aggregated_frame = converted_frame;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
gst_video_aggregator_pad_clean_frame (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstVideoFrame * prepared_frame)
|
||||
{
|
||||
if (pad->aggregated_frame) {
|
||||
gst_video_frame_unmap (pad->aggregated_frame);
|
||||
g_slice_free (GstVideoFrame, pad->aggregated_frame);
|
||||
pad->aggregated_frame = NULL;
|
||||
if (prepared_frame->buffer) {
|
||||
gst_video_frame_unmap (prepared_frame);
|
||||
memset (prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
}
|
||||
|
||||
if (pad->priv->converted_buffer) {
|
||||
|
@ -368,11 +364,77 @@ gst_video_aggregator_pad_init (GstVideoAggregatorPad * vaggpad)
|
|||
vaggpad->priv->zorder = DEFAULT_PAD_ZORDER;
|
||||
vaggpad->priv->repeat_after_eos = DEFAULT_PAD_REPEAT_AFTER_EOS;
|
||||
vaggpad->priv->converted_buffer = NULL;
|
||||
vaggpad->aggregated_frame = NULL;
|
||||
memset (&vaggpad->priv->prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
|
||||
vaggpad->priv->convert = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* gst_video_aggregator_pad_has_current_buffer:
|
||||
* @pad: a #GstVideoAggregatorPad
|
||||
*
|
||||
* Checks if the pad currently has a buffer queued that is going to be used
|
||||
* for the current output frame.
|
||||
*
|
||||
* This must only be called from the aggregate_frames() virtual method,
|
||||
* or from the prepare_frame() virtual method of the aggregator pads.
|
||||
*
|
||||
* Returns: %TRUE if the pad has currently a buffer queued
|
||||
*/
|
||||
gboolean
|
||||
gst_video_aggregator_pad_has_current_buffer (GstVideoAggregatorPad * pad)
|
||||
{
|
||||
g_return_val_if_fail (GST_IS_VIDEO_AGGREGATOR_PAD (pad), FALSE);
|
||||
|
||||
return pad->priv->buffer != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* gst_video_aggregator_pad_has_current_buffer:
|
||||
* @pad: a #GstVideoAggregatorPad
|
||||
*
|
||||
* Returns the currently queued buffer that is going to be used
|
||||
* for the current output frame.
|
||||
*
|
||||
* This must only be called from the aggregate_frames() virtual method,
|
||||
* or from the prepare_frame() virtual method of the aggregator pads.
|
||||
*
|
||||
* The return value is only valid until aggregate_frames() or prepare_frames()
|
||||
* returns.
|
||||
*
|
||||
* Returns: (transfer none): The currently queued buffer
|
||||
*/
|
||||
GstBuffer *
|
||||
gst_video_aggregator_pad_get_current_buffer (GstVideoAggregatorPad * pad)
|
||||
{
|
||||
g_return_val_if_fail (GST_IS_VIDEO_AGGREGATOR_PAD (pad), NULL);
|
||||
|
||||
return pad->priv->buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* gst_video_aggregator_pad_get_prepared_frame:
|
||||
* @pad: a #GstVideoAggregatorPad
|
||||
*
|
||||
* Returns the currently prepared video frame that has to be aggregated into
|
||||
* the current output frame.
|
||||
*
|
||||
* This must only be called from the aggregate_frames() virtual method,
|
||||
* or from the prepare_frame() virtual method of the aggregator pads.
|
||||
*
|
||||
* The return value is only valid until aggregate_frames() or prepare_frames()
|
||||
* returns.
|
||||
*
|
||||
* Returns: (transfer none): The currently prepared video frame
|
||||
*/
|
||||
GstVideoFrame *
|
||||
gst_video_aggregator_pad_get_prepared_frame (GstVideoAggregatorPad * pad)
|
||||
{
|
||||
g_return_val_if_fail (GST_IS_VIDEO_AGGREGATOR_PAD (pad), NULL);
|
||||
|
||||
return pad->priv->prepared_frame.buffer ? &pad->priv->prepared_frame : NULL;
|
||||
}
|
||||
|
||||
/**************************************
|
||||
* GstVideoAggregator implementation *
|
||||
**************************************/
|
||||
|
@ -1017,7 +1079,7 @@ gst_video_aggregator_reset (GstVideoAggregator * vagg)
|
|||
for (l = GST_ELEMENT (vagg)->sinkpads; l; l = l->next) {
|
||||
GstVideoAggregatorPad *p = l->data;
|
||||
|
||||
gst_buffer_replace (&p->buffer, NULL);
|
||||
gst_buffer_replace (&p->priv->buffer, NULL);
|
||||
p->priv->start_time = -1;
|
||||
p->priv->end_time = -1;
|
||||
|
||||
|
@ -1038,7 +1100,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
gboolean need_reconfigure = FALSE;
|
||||
GstSegment *agg_segment = &GST_AGGREGATOR_PAD (agg->srcpad)->segment;
|
||||
|
||||
/* get a set of buffers into pad->buffer that are within output_start_running_time
|
||||
/* get a set of buffers into pad->priv->buffer that are within output_start_running_time
|
||||
* and output_end_running_time taking into account finished and unresponsive pads */
|
||||
|
||||
GST_OBJECT_LOCK (vagg);
|
||||
|
@ -1078,7 +1140,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
gst_segment_to_running_time (&segment, GST_FORMAT_TIME, start_time);
|
||||
|
||||
if (start_time >= output_end_running_time) {
|
||||
if (pad->buffer) {
|
||||
if (pad->priv->buffer) {
|
||||
GST_DEBUG_OBJECT (pad, "buffer duration is -1, start_time >= "
|
||||
"output_end_running_time. Keeping previous buffer");
|
||||
} else {
|
||||
|
@ -1090,7 +1152,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
} else if (start_time < output_start_running_time) {
|
||||
GST_DEBUG_OBJECT (pad, "buffer duration is -1, start_time < "
|
||||
"output_start_running_time. Discarding old buffer");
|
||||
gst_buffer_replace (&pad->buffer, buf);
|
||||
gst_buffer_replace (&pad->priv->buffer, buf);
|
||||
if (pad->priv->pending_vinfo.finfo) {
|
||||
pad->info = pad->priv->pending_vinfo;
|
||||
need_reconfigure = TRUE;
|
||||
|
@ -1103,7 +1165,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
}
|
||||
gst_buffer_unref (buf);
|
||||
buf = gst_aggregator_pad_pop_buffer (bpad);
|
||||
gst_buffer_replace (&pad->buffer, buf);
|
||||
gst_buffer_replace (&pad->priv->buffer, buf);
|
||||
if (pad->priv->pending_vinfo.finfo) {
|
||||
pad->info = pad->priv->pending_vinfo;
|
||||
need_reconfigure = TRUE;
|
||||
|
@ -1168,7 +1230,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
GST_DEBUG_OBJECT (pad,
|
||||
"Taking new buffer with start time %" GST_TIME_FORMAT,
|
||||
GST_TIME_ARGS (start_time));
|
||||
gst_buffer_replace (&pad->buffer, buf);
|
||||
gst_buffer_replace (&pad->priv->buffer, buf);
|
||||
if (pad->priv->pending_vinfo.finfo) {
|
||||
pad->info = pad->priv->pending_vinfo;
|
||||
need_reconfigure = TRUE;
|
||||
|
@ -1186,7 +1248,7 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
gst_buffer_unref (buf);
|
||||
eos = FALSE;
|
||||
} else {
|
||||
gst_buffer_replace (&pad->buffer, buf);
|
||||
gst_buffer_replace (&pad->priv->buffer, buf);
|
||||
if (pad->priv->pending_vinfo.finfo) {
|
||||
pad->info = pad->priv->pending_vinfo;
|
||||
need_reconfigure = TRUE;
|
||||
|
@ -1218,13 +1280,13 @@ gst_video_aggregator_fill_queues (GstVideoAggregator * vagg,
|
|||
GST_DEBUG ("I just need more data");
|
||||
need_more_data = TRUE;
|
||||
} else {
|
||||
gst_buffer_replace (&pad->buffer, NULL);
|
||||
gst_buffer_replace (&pad->priv->buffer, NULL);
|
||||
}
|
||||
} else if (is_eos) {
|
||||
eos = FALSE;
|
||||
}
|
||||
} else if (is_eos) {
|
||||
gst_buffer_replace (&pad->buffer, NULL);
|
||||
gst_buffer_replace (&pad->priv->buffer, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1249,10 +1311,10 @@ sync_pad_values (GstElement * vagg, GstPad * pad, gpointer user_data)
|
|||
GstClockTime timestamp;
|
||||
gint64 stream_time;
|
||||
|
||||
if (vpad->buffer == NULL)
|
||||
if (vpad->priv->buffer == NULL)
|
||||
return TRUE;
|
||||
|
||||
timestamp = GST_BUFFER_TIMESTAMP (vpad->buffer);
|
||||
timestamp = GST_BUFFER_TIMESTAMP (vpad->priv->buffer);
|
||||
GST_OBJECT_LOCK (bpad);
|
||||
stream_time = gst_segment_to_stream_time (&bpad->segment, GST_FORMAT_TIME,
|
||||
timestamp);
|
||||
|
@ -1272,10 +1334,13 @@ prepare_frames (GstElement * agg, GstPad * pad, gpointer user_data)
|
|||
GstVideoAggregatorPadClass *vaggpad_class =
|
||||
GST_VIDEO_AGGREGATOR_PAD_GET_CLASS (pad);
|
||||
|
||||
if (vpad->buffer == NULL || !vaggpad_class->prepare_frame)
|
||||
memset (&vpad->priv->prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
|
||||
if (vpad->priv->buffer == NULL || !vaggpad_class->prepare_frame)
|
||||
return TRUE;
|
||||
|
||||
return vaggpad_class->prepare_frame (vpad, GST_VIDEO_AGGREGATOR_CAST (agg));
|
||||
return vaggpad_class->prepare_frame (vpad, GST_VIDEO_AGGREGATOR_CAST (agg),
|
||||
vpad->priv->buffer, &vpad->priv->prepared_frame);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
|
@ -1287,7 +1352,9 @@ clean_pad (GstElement * agg, GstPad * pad, gpointer user_data)
|
|||
GST_VIDEO_AGGREGATOR_PAD_GET_CLASS (pad);
|
||||
|
||||
if (vaggpad_class->clean_frame)
|
||||
vaggpad_class->clean_frame (vpad, vagg);
|
||||
vaggpad_class->clean_frame (vpad, vagg, &vpad->priv->prepared_frame);
|
||||
|
||||
memset (&vpad->priv->prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
@ -1749,11 +1816,11 @@ gst_video_aggregator_flush (GstAggregator * agg)
|
|||
|
||||
/* Convert to the output segment rate */
|
||||
if (ABS (agg_segment->rate) != abs_rate) {
|
||||
if (ABS (agg_segment->rate) != 1.0 && p->buffer) {
|
||||
if (ABS (agg_segment->rate) != 1.0 && p->priv->buffer) {
|
||||
p->priv->start_time /= ABS (agg_segment->rate);
|
||||
p->priv->end_time /= ABS (agg_segment->rate);
|
||||
}
|
||||
if (abs_rate != 1.0 && p->buffer) {
|
||||
if (abs_rate != 1.0 && p->priv->buffer) {
|
||||
p->priv->start_time *= abs_rate;
|
||||
p->priv->end_time *= abs_rate;
|
||||
}
|
||||
|
@ -1879,7 +1946,7 @@ gst_video_aggregator_release_pad (GstElement * element, GstPad * pad)
|
|||
if (last_pad)
|
||||
gst_video_aggregator_reset (vagg);
|
||||
|
||||
gst_buffer_replace (&vaggpad->buffer, NULL);
|
||||
gst_buffer_replace (&vaggpad->priv->buffer, NULL);
|
||||
|
||||
GST_ELEMENT_CLASS (gst_video_aggregator_parent_class)->release_pad
|
||||
(GST_ELEMENT (vagg), pad);
|
||||
|
|
|
@ -71,9 +71,6 @@ struct _GstVideoAggregatorPad
|
|||
/* read-only, with OBJECT_LOCK */
|
||||
GstVideoInfo info;
|
||||
|
||||
GstBuffer *buffer;
|
||||
GstVideoFrame *aggregated_frame;
|
||||
|
||||
/* Subclasses can force an alpha channel in the (input thus output)
|
||||
* colorspace format */
|
||||
gboolean needs_alpha;
|
||||
|
@ -102,10 +99,13 @@ struct _GstVideoAggregatorPadClass
|
|||
GstVideoInfo * wanted_info);
|
||||
|
||||
gboolean (*prepare_frame) (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * videoaggregator);
|
||||
GstVideoAggregator * videoaggregator,
|
||||
GstBuffer * buffer,
|
||||
GstVideoFrame * prepared_frame);
|
||||
|
||||
void (*clean_frame) (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * videoaggregator);
|
||||
GstVideoAggregator * videoaggregator,
|
||||
GstVideoFrame * prepared_frame);
|
||||
|
||||
gpointer _gst_reserved[GST_PADDING_LARGE];
|
||||
};
|
||||
|
@ -113,6 +113,15 @@ struct _GstVideoAggregatorPadClass
|
|||
GST_VIDEO_BAD_API
|
||||
GType gst_video_aggregator_pad_get_type (void);
|
||||
|
||||
GST_VIDEO_BAD_API
|
||||
gboolean gst_video_aggregator_pad_has_current_buffer (GstVideoAggregatorPad *pad);
|
||||
|
||||
GST_VIDEO_BAD_API
|
||||
GstBuffer * gst_video_aggregator_pad_get_current_buffer (GstVideoAggregatorPad *pad);
|
||||
|
||||
GST_VIDEO_BAD_API
|
||||
GstVideoFrame * gst_video_aggregator_pad_get_prepared_frame (GstVideoAggregatorPad *pad);
|
||||
|
||||
#define GST_TYPE_VIDEO_AGGREGATOR (gst_video_aggregator_get_type())
|
||||
#define GST_VIDEO_AGGREGATOR(obj) \
|
||||
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_AGGREGATOR, GstVideoAggregator))
|
||||
|
|
|
@ -376,14 +376,13 @@ clamp_rectangle (gint x, gint y, gint w, gint h, gint outer_width,
|
|||
|
||||
static gboolean
|
||||
gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstBuffer * buffer,
|
||||
GstVideoFrame * prepared_frame)
|
||||
{
|
||||
GstCompositor *comp = GST_COMPOSITOR (vagg);
|
||||
GstCompositorPad *cpad = GST_COMPOSITOR_PAD (pad);
|
||||
guint outsize;
|
||||
GstVideoFrame *converted_frame;
|
||||
GstBuffer *converted_buf = NULL;
|
||||
GstVideoFrame *frame;
|
||||
GstVideoFrame frame;
|
||||
static GstAllocationParams params = { 0, 15, 0, 0, };
|
||||
gint width, height;
|
||||
gboolean frame_obscured = FALSE;
|
||||
|
@ -392,9 +391,6 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
* Due to the clamping, this is different from the frame width/height above. */
|
||||
GstVideoRectangle frame_rect;
|
||||
|
||||
if (!pad->buffer)
|
||||
return TRUE;
|
||||
|
||||
/* There's three types of width/height here:
|
||||
* 1. GST_VIDEO_FRAME_WIDTH/HEIGHT:
|
||||
* The frame width/height (same as pad->info.height/width;
|
||||
|
@ -476,7 +472,6 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
|
||||
if (cpad->alpha == 0.0) {
|
||||
GST_DEBUG_OBJECT (vagg, "Pad has alpha 0.0, not converting frame");
|
||||
converted_frame = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -486,7 +481,6 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
if (frame_rect.w == 0 || frame_rect.h == 0) {
|
||||
GST_DEBUG_OBJECT (vagg, "Resulting frame is zero-width or zero-height "
|
||||
"(w: %i, h: %i), skipping", frame_rect.w, frame_rect.h);
|
||||
converted_frame = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -523,9 +517,9 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
|
||||
/* Check if there's a buffer to be aggregated, ensure it can't have an alpha
|
||||
* channel, then check opacity and frame boundaries */
|
||||
if (pad2->buffer && cpad2->alpha == 1.0 &&
|
||||
!GST_VIDEO_INFO_HAS_ALPHA (&pad2->info) &&
|
||||
is_rectangle_contained (frame_rect, frame2_rect)) {
|
||||
if (gst_video_aggregator_pad_has_current_buffer (pad2)
|
||||
&& cpad2->alpha == 1.0 && !GST_VIDEO_INFO_HAS_ALPHA (&pad2->info)
|
||||
&& is_rectangle_contained (frame_rect, frame2_rect)) {
|
||||
frame_obscured = TRUE;
|
||||
GST_DEBUG_OBJECT (pad, "%ix%i@(%i,%i) obscured by %s %ix%i@(%i,%i) "
|
||||
"in output of size %ix%i; skipping frame", frame_rect.w, frame_rect.h,
|
||||
|
@ -538,22 +532,18 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
}
|
||||
GST_OBJECT_UNLOCK (vagg);
|
||||
|
||||
if (frame_obscured) {
|
||||
converted_frame = NULL;
|
||||
if (frame_obscured)
|
||||
goto done;
|
||||
}
|
||||
|
||||
frame = g_slice_new0 (GstVideoFrame);
|
||||
|
||||
if (!gst_video_frame_map (frame, &pad->info, pad->buffer, GST_MAP_READ)) {
|
||||
if (!gst_video_frame_map (&frame, &pad->info, buffer, GST_MAP_READ)) {
|
||||
GST_WARNING_OBJECT (vagg, "Could not map input buffer");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (cpad->convert) {
|
||||
gint converted_size;
|
||||
|
||||
converted_frame = g_slice_new0 (GstVideoFrame);
|
||||
GstVideoFrame converted_frame;
|
||||
GstBuffer *converted_buf = NULL;
|
||||
|
||||
/* We wait until here to set the conversion infos, in case vagg->info changed */
|
||||
converted_size = GST_VIDEO_INFO_SIZE (&cpad->conversion_info);
|
||||
|
@ -561,40 +551,36 @@ gst_compositor_pad_prepare_frame (GstVideoAggregatorPad * pad,
|
|||
converted_size = converted_size > outsize ? converted_size : outsize;
|
||||
converted_buf = gst_buffer_new_allocate (NULL, converted_size, ¶ms);
|
||||
|
||||
if (!gst_video_frame_map (converted_frame, &(cpad->conversion_info),
|
||||
if (!gst_video_frame_map (&converted_frame, &(cpad->conversion_info),
|
||||
converted_buf, GST_MAP_READWRITE)) {
|
||||
GST_WARNING_OBJECT (vagg, "Could not map converted frame");
|
||||
|
||||
g_slice_free (GstVideoFrame, converted_frame);
|
||||
gst_video_frame_unmap (frame);
|
||||
g_slice_free (GstVideoFrame, frame);
|
||||
gst_video_frame_unmap (&frame);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
gst_video_converter_frame (cpad->convert, frame, converted_frame);
|
||||
gst_video_converter_frame (cpad->convert, &frame, &converted_frame);
|
||||
cpad->converted_buffer = converted_buf;
|
||||
gst_video_frame_unmap (frame);
|
||||
g_slice_free (GstVideoFrame, frame);
|
||||
gst_video_frame_unmap (&frame);
|
||||
*prepared_frame = converted_frame;
|
||||
} else {
|
||||
converted_frame = frame;
|
||||
*prepared_frame = frame;
|
||||
}
|
||||
|
||||
done:
|
||||
pad->aggregated_frame = converted_frame;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
gst_compositor_pad_clean_frame (GstVideoAggregatorPad * pad,
|
||||
GstVideoAggregator * vagg)
|
||||
GstVideoAggregator * vagg, GstVideoFrame * prepared_frame)
|
||||
{
|
||||
GstCompositorPad *cpad = GST_COMPOSITOR_PAD (pad);
|
||||
|
||||
if (pad->aggregated_frame) {
|
||||
gst_video_frame_unmap (pad->aggregated_frame);
|
||||
g_slice_free (GstVideoFrame, pad->aggregated_frame);
|
||||
pad->aggregated_frame = NULL;
|
||||
if (prepared_frame->buffer) {
|
||||
gst_video_frame_unmap (prepared_frame);
|
||||
memset (prepared_frame, 0, sizeof (GstVideoFrame));
|
||||
}
|
||||
|
||||
if (cpad->converted_buffer) {
|
||||
|
@ -1065,44 +1051,50 @@ gst_compositor_crossfade_frames (GstCompositor * self, GstVideoFrame * outframe)
|
|||
for (l = GST_ELEMENT (self)->sinkpads; l; l = l->next) {
|
||||
GstVideoAggregatorPad *pad = l->data;
|
||||
GstCompositorPad *compo_pad = GST_COMPOSITOR_PAD (pad);
|
||||
GstVideoFrame *prepared_frame =
|
||||
gst_video_aggregator_pad_get_prepared_frame (pad);
|
||||
|
||||
if (compo_pad->crossfade >= 0.0f && pad->aggregated_frame) {
|
||||
if (compo_pad->crossfade >= 0.0f && prepared_frame) {
|
||||
gfloat alpha = compo_pad->crossfade * compo_pad->alpha;
|
||||
GstVideoAggregatorPad *npad = l->next ? l->next->data : NULL;
|
||||
GstVideoFrame *nframe;
|
||||
GstVideoFrame *next_prepared_frame;
|
||||
GstVideoFrame nframe;
|
||||
|
||||
next_prepared_frame =
|
||||
npad ? gst_video_aggregator_pad_get_prepared_frame (npad) : NULL;
|
||||
|
||||
if (!all_crossfading) {
|
||||
nframe = g_slice_new0 (GstVideoFrame);
|
||||
gst_compositor_fill_transparent (self, outframe, nframe);
|
||||
gst_compositor_fill_transparent (self, outframe, &nframe);
|
||||
} else {
|
||||
nframe = outframe;
|
||||
nframe = *outframe;
|
||||
}
|
||||
|
||||
self->overlay (pad->aggregated_frame,
|
||||
self->overlay (prepared_frame,
|
||||
compo_pad->crossfaded ? 0 : compo_pad->xpos,
|
||||
compo_pad->crossfaded ? 0 : compo_pad->ypos,
|
||||
alpha, nframe, COMPOSITOR_BLEND_MODE_ADDITIVE);
|
||||
alpha, &nframe, COMPOSITOR_BLEND_MODE_ADDITIVE);
|
||||
|
||||
if (npad && npad->aggregated_frame) {
|
||||
if (npad && next_prepared_frame) {
|
||||
GstCompositorPad *next_compo_pad = GST_COMPOSITOR_PAD (npad);
|
||||
|
||||
alpha = (1.0 - compo_pad->crossfade) * next_compo_pad->alpha;
|
||||
self->overlay (npad->aggregated_frame, next_compo_pad->xpos,
|
||||
next_compo_pad->ypos, alpha, nframe,
|
||||
self->overlay (next_prepared_frame, next_compo_pad->xpos,
|
||||
next_compo_pad->ypos, alpha, &nframe,
|
||||
COMPOSITOR_BLEND_MODE_ADDITIVE);
|
||||
|
||||
/* Replace frame with current frame */
|
||||
gst_compositor_pad_clean_frame (npad, vagg);
|
||||
npad->aggregated_frame = !all_crossfading ? nframe : NULL;
|
||||
gst_compositor_pad_clean_frame (npad, vagg, next_prepared_frame);
|
||||
if (!all_crossfading)
|
||||
*next_prepared_frame = nframe;
|
||||
next_compo_pad->crossfaded = TRUE;
|
||||
|
||||
/* Frame is now consumed, clean it up */
|
||||
gst_compositor_pad_clean_frame (pad, vagg);
|
||||
pad->aggregated_frame = NULL;
|
||||
gst_compositor_pad_clean_frame (pad, vagg, prepared_frame);
|
||||
} else {
|
||||
GST_LOG_OBJECT (self, "Simply fading out as no following pad found");
|
||||
gst_compositor_pad_clean_frame (pad, vagg);
|
||||
pad->aggregated_frame = !all_crossfading ? nframe : NULL;
|
||||
gst_compositor_pad_clean_frame (pad, vagg, prepared_frame);
|
||||
if (!all_crossfading)
|
||||
*prepared_frame = nframe;
|
||||
compo_pad->crossfaded = TRUE;
|
||||
}
|
||||
}
|
||||
|
@ -1156,9 +1148,11 @@ gst_compositor_aggregate_frames (GstVideoAggregator * vagg, GstBuffer * outbuf)
|
|||
for (l = GST_ELEMENT (vagg)->sinkpads; l; l = l->next) {
|
||||
GstVideoAggregatorPad *pad = l->data;
|
||||
GstCompositorPad *compo_pad = GST_COMPOSITOR_PAD (pad);
|
||||
GstVideoFrame *prepared_frame =
|
||||
gst_video_aggregator_pad_get_prepared_frame (pad);
|
||||
|
||||
if (pad->aggregated_frame != NULL) {
|
||||
composite (pad->aggregated_frame,
|
||||
if (prepared_frame != NULL) {
|
||||
composite (prepared_frame,
|
||||
compo_pad->crossfaded ? 0 : compo_pad->xpos,
|
||||
compo_pad->crossfaded ? 0 : compo_pad->ypos, compo_pad->alpha,
|
||||
outframe, COMPOSITOR_BLEND_MODE_NORMAL);
|
||||
|
|
Loading…
Reference in a new issue