dvdspu: render to AYUV overlay

Instead of only supporting writing SPU data directly to YUV frames,
render the SPU data to an intermediate AYUV overlay buffer. The overlay
data is then blended to the video frame.

For the PGS format, the overlay buffer size is set to the size of the
Composition Window, and its position in the overlay composition is set
to the window position. The objects to render are now cropped when the
cropping flag is set.

For the Vobsub format, the overlay buffer size is set to the size of the
Display Area.

Once rendered, the overlay composition rectangle is now moved and scaled
to fit the video output size, to avoid clipping.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/5827>
This commit is contained in:
Arnaud Vrac 2013-01-23 17:59:01 +01:00 committed by GStreamer Marge Bot
parent 087e39564d
commit b0ce390d50
10 changed files with 416 additions and 483 deletions

View file

@ -1,99 +0,0 @@
/* GStreamer DVD Sub-Picture Unit
* Copyright (C) 2007 Fluendo S.A. <info@fluendo.com>
* Copyright (C) 2009 Jan Schmidt <thaytan@noraisin.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <string.h>
#include <gst/gst.h>
#include "gstdvdspu.h"
GST_DEBUG_CATEGORY_EXTERN (dvdspu_debug);
#define GST_CAT_DEFAULT dvdspu_debug
void
gstspu_clear_comp_buffers (SpuState * state)
{
/* The area to clear is the line inside the disp_rect, each entry 4 bytes,
* of the sub-sampled UV planes. */
gint16 left = state->comp_left / 2;
gint16 right = state->comp_right / 2;
gint16 uv_width = sizeof (guint32) * (right - left + 1);
memset (state->comp_bufs[0] + left, 0, uv_width);
memset (state->comp_bufs[1] + left, 0, uv_width);
memset (state->comp_bufs[2] + left, 0, uv_width);
}
void
gstspu_blend_comp_buffers (SpuState * state, guint8 * planes[3])
{
gint16 uv_end;
gint16 left, x;
guint8 *out_U;
guint8 *out_V;
guint32 *in_U;
guint32 *in_V;
guint32 *in_A;
gint16 comp_last_x = state->comp_right;
if (comp_last_x < state->comp_left)
return; /* Didn't draw in the comp buffers, nothing to do... */
#if 0
GST_LOG ("Blending comp buffers from x=%d to x=%d",
state->comp_left, state->comp_right);
#endif
/* Set up the output pointers */
out_U = planes[1]; /* U plane */
out_V = planes[2]; /* V plane */
/* Input starts at the first pixel of the compositing buffer */
in_U = state->comp_bufs[0]; /* U comp buffer */
in_V = state->comp_bufs[1]; /* V comp buffer */
in_A = state->comp_bufs[2]; /* A comp buffer */
/* Calculate how many pixels to blend based on the maximum X value that was
* drawn in the render_line function, divided by 2 (rounding up) to account
* for UV sub-sampling */
uv_end = (comp_last_x + 1) / 2;
left = state->comp_left / 2;
out_U += left * GST_VIDEO_INFO_COMP_PSTRIDE (&state->info, 1);
out_V += left * GST_VIDEO_INFO_COMP_PSTRIDE (&state->info, 2);
for (x = left; x < uv_end; x++) {
guint32 tmp;
/* Each entry in the compositing buffer is 4 summed pixels, so the
* inverse alpha is (4 * 0xff) - in_A[x] */
guint16 inv_A = (4 * 0xff) - in_A[x];
tmp = in_U[x] + inv_A * *out_U;
*out_U = (guint8) (tmp / (4 * 0xff));
tmp = in_V[x] + inv_A * *out_V;
*out_V = (guint8) (tmp / (4 * 0xff));
out_U += GST_VIDEO_INFO_COMP_PSTRIDE (&state->info, 1);
out_V += GST_VIDEO_INFO_COMP_PSTRIDE (&state->info, 2);
}
}

View file

@ -34,6 +34,9 @@
#include <glib/gi18n-lib.h>
#include <gst/video/video.h>
#include <gst/video/video-overlay-composition.h>
#include <gst/video/gstvideometa.h>
#include <gst/video/gstvideosink.h>
#include <string.h>
@ -53,19 +56,19 @@ enum
LAST_SIGNAL
};
#define VIDEO_FORMATS GST_VIDEO_OVERLAY_COMPOSITION_BLEND_FORMATS
static GstStaticPadTemplate video_sink_factory =
GST_STATIC_PAD_TEMPLATE ("video",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("video/x-raw, " "format = (string) { I420, NV12, YV12 }, "
"width = (int) [ 16, 4096 ], " "height = (int) [ 16, 4096 ]")
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE (VIDEO_FORMATS))
);
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("video/x-raw, " "format = (string) { I420, NV12, YV12 }, "
"width = (int) [ 16, 4096 ], " "height = (int) [ 16, 4096 ]")
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE (VIDEO_FORMATS))
);
static GstStaticPadTemplate subpic_sink_factory =
@ -211,14 +214,7 @@ static void
gst_dvd_spu_finalize (GObject * object)
{
GstDVDSpu *dvdspu = GST_DVD_SPU (object);
gint i;
for (i = 0; i < 3; i++) {
if (dvdspu->spu_state.comp_bufs[i] != NULL) {
g_free (dvdspu->spu_state.comp_bufs[i]);
dvdspu->spu_state.comp_bufs[i] = NULL;
}
}
g_queue_free (dvdspu->pending_spus);
g_mutex_clear (&dvdspu->spu_lock);
@ -323,21 +319,14 @@ gst_dvd_spu_video_set_caps (GstDVDSpu * dvdspu, GstPad * pad, GstCaps * caps)
{
gboolean res = FALSE;
GstVideoInfo info;
gint i;
SpuState *state;
if (!gst_video_info_from_caps (&info, caps))
goto done;
DVD_SPU_LOCK (dvdspu);
state = &dvdspu->spu_state;
state->info = info;
for (i = 0; i < 3; i++) {
state->comp_bufs[i] = g_realloc (state->comp_bufs[i],
sizeof (guint32) * info.width);
}
DVD_SPU_UNLOCK (dvdspu);
res = TRUE;
@ -662,14 +651,131 @@ no_ref_frame:
}
static void
gstspu_render (GstDVDSpu * dvdspu, GstBuffer * buf)
/*
* Transform the overlay composition rectangle to fit completely in the video.
* This is needed to work with ripped videos, which might be cropped and scaled
* compared to the original (for example to remove black borders). The same
* transformations were probably not applied to the SPU data, so we need to fit
* the rendered SPU to the video.
*/
static gboolean
gstspu_fit_overlay_rectangle (GstDVDSpu * dvdspu, GstVideoRectangle * rect,
gint spu_width, gint spu_height, gboolean keep_aspect)
{
GstVideoFrame frame;
gint video_width = GST_VIDEO_INFO_WIDTH (&dvdspu->spu_state.info);
gint video_height = GST_VIDEO_INFO_HEIGHT (&dvdspu->spu_state.info);
GstVideoRectangle r;
if (!gst_video_frame_map (&frame, &dvdspu->spu_state.info, buf,
GST_MAP_READWRITE))
return;
r = *rect;
/*
* Compute scale first, so that the SPU window size matches the video size.
* If @keep_aspect is %TRUE, the overlay rectangle aspect is kept and
* centered around the video.
*/
if (spu_width != video_width || spu_height != video_height) {
gdouble hscale, vscale;
hscale = (gdouble) video_width / (gdouble) spu_width;
vscale = (gdouble) video_height / (gdouble) spu_height;
if (keep_aspect) {
if (vscale < hscale)
vscale = hscale;
else if (hscale < vscale)
hscale = vscale;
}
r.x *= hscale;
r.y *= vscale;
r.w *= hscale;
r.h *= vscale;
if (keep_aspect) {
r.x += (video_width - (spu_width * hscale)) / 2;
r.y += (video_height - (spu_height * vscale)) / 2;
}
}
/*
* Next fit the overlay rectangle inside the video, to avoid cropping.
*/
if (r.x + r.w > video_width)
r.x = video_width - r.w;
if (r.x < 0) {
r.x = 0;
if (r.w > video_width)
r.w = video_width;
}
if (r.y + r.h > video_height)
r.y = video_height - r.h;
if (r.y < 0) {
r.y = 0;
if (r.h > video_height)
r.h = video_height;
}
if (r.x != rect->x || r.y != rect->y || r.w != rect->w || r.h != rect->h) {
*rect = r;
return TRUE;
}
return FALSE;
}
static GstVideoOverlayComposition *
gstspu_render_composition (GstDVDSpu * dvdspu)
{
GstBuffer *buffer;
GstVideoInfo overlay_info;
GstVideoFormat format;
GstVideoFrame frame;
GstVideoOverlayRectangle *rectangle;
GstVideoOverlayComposition *composition;
GstVideoRectangle win;
gint spu_w, spu_h;
gsize size;
format = GST_VIDEO_OVERLAY_COMPOSITION_FORMAT_YUV;
switch (dvdspu->spu_input_type) {
case SPU_INPUT_TYPE_PGS:
gstspu_pgs_get_render_geometry (dvdspu, &spu_w, &spu_h, &win);
break;
case SPU_INPUT_TYPE_VOBSUB:
gstspu_vobsub_get_render_geometry (dvdspu, &spu_w, &spu_h, &win);
break;
default:
return NULL;
}
if (win.w <= 0 || win.h <= 0 || spu_w <= 0 || spu_h <= 0) {
GST_DEBUG_OBJECT (dvdspu, "skip render of empty window");
return NULL;
}
gst_video_info_init (&overlay_info);
gst_video_info_set_format (&overlay_info, format, win.w, win.h);
size = GST_VIDEO_INFO_SIZE (&overlay_info);
buffer = gst_buffer_new_and_alloc (size);
if (!buffer) {
GST_WARNING_OBJECT (dvdspu, "failed to allocate overlay buffer");
return NULL;
}
gst_buffer_add_video_meta (buffer, GST_VIDEO_FRAME_FLAG_NONE,
format, win.w, win.h);
if (!gst_video_frame_map (&frame, &overlay_info, buffer, GST_MAP_READWRITE))
goto map_failed;
memset (GST_VIDEO_FRAME_PLANE_DATA (&frame, 0), 0,
GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0) *
GST_VIDEO_FRAME_HEIGHT (&frame));
switch (dvdspu->spu_input_type) {
case SPU_INPUT_TYPE_VOBSUB:
@ -681,7 +787,57 @@ gstspu_render (GstDVDSpu * dvdspu, GstBuffer * buf)
default:
break;
}
gst_video_frame_unmap (&frame);
GST_DEBUG_OBJECT (dvdspu, "Overlay rendered for video size %dx%d, "
"spu display size %dx%d, window geometry %dx%d+%d%+d",
GST_VIDEO_INFO_WIDTH (&dvdspu->spu_state.info),
GST_VIDEO_INFO_HEIGHT (&dvdspu->spu_state.info),
spu_w, spu_h, win.w, win.h, win.x, win.y);
if (gstspu_fit_overlay_rectangle (dvdspu, &win, spu_w, spu_h,
dvdspu->spu_input_type == SPU_INPUT_TYPE_PGS))
GST_DEBUG_OBJECT (dvdspu, "Adjusted window to fit video: %dx%d%+d%+d",
win.w, win.h, win.x, win.y);
rectangle = gst_video_overlay_rectangle_new_raw (buffer, win.x, win.y,
win.w, win.h, GST_VIDEO_OVERLAY_FORMAT_FLAG_PREMULTIPLIED_ALPHA);
gst_buffer_unref (buffer);
composition = gst_video_overlay_composition_new (rectangle);
gst_video_overlay_rectangle_unref (rectangle);
return composition;
map_failed:
GST_ERROR_OBJECT (dvdspu, "failed to map buffer");
gst_buffer_unref (buffer);
return NULL;
}
static void
gstspu_render (GstDVDSpu * dvdspu, GstBuffer * buf)
{
GstVideoOverlayComposition *composition;
GstVideoFrame frame;
composition = gstspu_render_composition (dvdspu);
if (!composition)
return;
if (!gst_video_frame_map (&frame, &dvdspu->spu_state.info, buf,
GST_MAP_READWRITE)) {
GST_WARNING_OBJECT (dvdspu, "failed to map video frame for blending");
goto done;
}
gst_video_overlay_composition_blend (composition, &frame);
gst_video_frame_unmap (&frame);
done:
gst_video_overlay_composition_unref (composition);
}
/* With SPU LOCK */

View file

@ -71,10 +71,6 @@ struct SpuState {
GstVideoInfo info;
guint32 *comp_bufs[3]; /* Compositing buffers for U+V & A */
guint16 comp_left;
guint16 comp_right;
SpuVobsubState vobsub;
SpuPgsState pgs;
};

View file

@ -39,19 +39,21 @@ struct SpuRect {
gint16 bottom;
};
/* Store a pre-multiplied colour value. The YUV fields hold the YUV values
* multiplied by the 8-bit alpha, to save computing it while rendering */
/* Store a pre-multiplied YUV colour value */
struct SpuColour {
guint16 Y;
guint16 U;
guint16 V;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
guint8 A;
guint8 Y;
guint8 U;
guint8 V;
#else
guint8 V;
guint8 U;
guint8 Y;
guint8 A;
#endif
};
void gstspu_clear_comp_buffers (SpuState * state);
void gstspu_blend_comp_buffers (SpuState * state, guint8 * planes[3]);
G_END_DECLS
#endif /* __GSTSPU_COMMON_H__ */

View file

@ -171,15 +171,19 @@ dump_rle_data (GstDVDSpu * dvdspu, guint8 * data, guint32 len)
static void
pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state,
GstVideoFrame * frame)
GstVideoFrame * window)
{
SpuColour *colour;
guint8 *planes[3]; /* YUV frame pointers */
gint strides[3];
guint8 *pixels, *p;
gint stride;
gint win_w;
gint win_h;
guint8 *data, *end;
guint16 obj_w;
guint16 obj_h G_GNUC_UNUSED;
guint x, y, i, min_x, max_x;
guint16 obj_w, obj_h;
gint obj_x, obj_y;
gint min_x, max_x;
gint min_y, max_y;
gint x, y, i;
if (G_UNLIKELY (obj->rle_data == NULL || obj->rle_data_size == 0
|| obj->rle_data_used != obj->rle_data_size))
@ -191,37 +195,47 @@ pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state,
if (data + 4 > end)
return;
/* FIXME: Calculate and use the cropping window for the output, as the
* intersection of the crop rectangle for this object (if any) and the
* window specified by the object's window_id */
pixels = GST_VIDEO_FRAME_PLANE_DATA (window, 0);
stride = GST_VIDEO_FRAME_PLANE_STRIDE (window, 0);
win_w = GST_VIDEO_FRAME_WIDTH (window);
win_h = GST_VIDEO_FRAME_HEIGHT (window);
/* Store the start of each plane */
planes[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
planes[1] = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
planes[2] = GST_VIDEO_FRAME_COMP_DATA (frame, 2);
strides[0] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
strides[1] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
strides[2] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
y = MIN (obj->y, state->info.height);
planes[0] += strides[0] * y;
planes[1] += strides[1] * (y / 2);
planes[2] += strides[2] * (y / 2);
/* RLE data: */
obj_w = GST_READ_UINT16_BE (data);
obj_h = GST_READ_UINT16_BE (data + 2);
data += 4;
min_x = MIN (obj->x, strides[0]);
max_x = MIN (obj->x + obj_w, strides[0]);
/* Calculate object coordinates relative to the window */
min_x = obj_x = (gint) obj->x - (gint) state->pgs.win_x;
min_y = obj_y = (gint) obj->y - (gint) state->pgs.win_y;
state->comp_left = x = min_x;
state->comp_right = max_x;
if (obj->flags & PGS_COMPOSITION_OBJECT_FLAG_CROPPED) {
obj_x -= obj->crop_x;
obj_y -= obj->crop_y;
obj_w = MIN (obj_w, obj->crop_w);
obj_h = MIN (obj_h, obj->crop_h);
}
gstspu_clear_comp_buffers (state);
max_x = min_x + obj_w;
max_y = min_y + obj_h;
/* Early out if object is out of the window */
if (max_x <= 0 || max_y < 0 || min_x >= win_w || min_y >= win_h)
return;
/* Crop inside window */
if (min_x < 0)
min_x = 0;
if (max_x > win_w)
max_x = win_w;
if (min_y < 0)
min_y = 0;
if (max_y > win_h)
max_y = win_h;
/* Write RLE data to the plane */
x = obj_x;
y = obj_y;
p = pixels + y * stride;
while (data < end) {
guint8 pal_id;
@ -264,43 +278,56 @@ pgs_composition_object_render (PgsCompositionObject * obj, SpuState * state,
}
}
if (!run_len) {
x = obj_x;
y++;
if (y >= max_y)
break;
p = pixels + y * stride;
continue;
}
if (y < min_y)
continue;
if (x >= max_x)
continue;
if (x < min_x) {
if (x + run_len <= min_x) {
x += run_len;
continue;
} else {
run_len -= min_x - x;
x = min_x;
}
}
colour = &state->pgs.palette[pal_id];
if (colour->A) {
guint32 inv_A = 0xff - colour->A;
if (colour->A > 0) {
guint8 inv_A = 255 - colour->A;
if (G_UNLIKELY (x + run_len > max_x))
run_len = (max_x - x);
run_len = max_x - x;
for (i = 0; i < run_len; i++) {
planes[0][x] = (inv_A * planes[0][x] + colour->Y) / 0xff;
SpuColour *pix = &((SpuColour *) p)[x++];
state->comp_bufs[0][x / 2] += colour->U;
state->comp_bufs[1][x / 2] += colour->V;
state->comp_bufs[2][x / 2] += colour->A;
x++;
if (pix->A == 0) {
memcpy (pix, colour, sizeof (*pix));
} else {
pix->A = colour->A;
pix->Y = colour->Y + pix->Y * inv_A / 255;
pix->U = colour->U + pix->U * inv_A / 255;
pix->V = colour->V + pix->V * inv_A / 255;
}
}
} else {
x += run_len;
}
if (!run_len || x > max_x) {
x = min_x;
planes[0] += strides[0];
if (y % 2) {
gstspu_blend_comp_buffers (state, planes);
gstspu_clear_comp_buffers (state);
planes[1] += strides[1];
planes[2] += strides[2];
}
y++;
if (y >= state->info.height)
return; /* Hit the bottom */
}
}
if (y % 2)
gstspu_blend_comp_buffers (state, planes);
}
static void
@ -429,8 +456,10 @@ parse_presentation_segment (GstDVDSpu * dvdspu, guint8 type, guint8 * payload,
"x %u y %u\n", i, obj->id, obj->win_id, obj->flags, obj->x, obj->y);
if (obj->flags & PGS_COMPOSITION_OBJECT_FLAG_CROPPED) {
if (payload + 8 > end)
if (payload + 8 > end) {
obj->flags &= ~PGS_COMPOSITION_OBJECT_FLAG_CROPPED;
break;
}
obj->crop_x = GST_READ_UINT16_BE (payload);
obj->crop_y = GST_READ_UINT16_BE (payload + 2);
@ -497,10 +526,10 @@ parse_set_palette (GstDVDSpu * dvdspu, guint8 type, guint8 * payload,
#endif
/* Premultiply the palette entries by the alpha */
state->pgs.palette[n].Y = Y * A;
state->pgs.palette[n].U = U * A;
state->pgs.palette[n].V = V * A;
state->pgs.palette[n].A = A;
state->pgs.palette[n].Y = Y * A / 255;
state->pgs.palette[n].U = U * A / 255;
state->pgs.palette[n].V = V * A / 255;
payload += PGS_PALETTE_ENTRY_SIZE;
}
@ -767,7 +796,7 @@ gstspu_pgs_execute_event (GstDVDSpu * dvdspu)
}
void
gstspu_pgs_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
gstspu_pgs_render (GstDVDSpu * dvdspu, GstVideoFrame * window)
{
SpuState *state = &dvdspu->spu_state;
PgsPresentationSegment *ps = &state->pgs.pres_seg;
@ -779,7 +808,7 @@ gstspu_pgs_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
for (i = 0; i < ps->objects->len; i++) {
PgsCompositionObject *cur =
&g_array_index (ps->objects, PgsCompositionObject, i);
pgs_composition_object_render (cur, state, frame);
pgs_composition_object_render (cur, state, window);
}
}
@ -790,6 +819,27 @@ gstspu_pgs_handle_dvd_event (GstDVDSpu * dvdspu, GstEvent * event)
return FALSE;
}
void
gstspu_pgs_get_render_geometry (GstDVDSpu * dvdspu,
gint * display_width, gint * display_height,
GstVideoRectangle * window_rect)
{
SpuPgsState *pgs_state = &dvdspu->spu_state.pgs;
if (window_rect) {
window_rect->x = pgs_state->win_x;
window_rect->y = pgs_state->win_y;
window_rect->w = pgs_state->win_w;
window_rect->h = pgs_state->win_h;
}
if (display_width)
*display_width = pgs_state->pres_seg.vid_w;
if (display_height)
*display_height = pgs_state->pres_seg.vid_h;
}
void
gstspu_pgs_flush (GstDVDSpu * dvdspu)
{

View file

@ -99,8 +99,11 @@ struct SpuPgsState {
void gstspu_pgs_handle_new_buf (GstDVDSpu * dvdspu, GstClockTime event_ts, GstBuffer *buf);
gboolean gstspu_pgs_execute_event (GstDVDSpu *dvdspu);
void gstspu_pgs_render (GstDVDSpu *dvdspu, GstVideoFrame *frame);
void gstspu_pgs_render (GstDVDSpu *dvdspu, GstVideoFrame *window);
gboolean gstspu_pgs_handle_dvd_event (GstDVDSpu *dvdspu, GstEvent *event);
void gstspu_pgs_get_render_geometry (GstDVDSpu *dvdspu,
gint *display_width, gint *display_height,
GstVideoRectangle *window_rect);
void gstspu_pgs_flush (GstDVDSpu *dvdspu);
#endif

View file

@ -43,10 +43,10 @@ gstspu_vobsub_recalc_palette (GstDVDSpu * dvdspu,
/* Convert incoming 4-bit alpha to 8 bit for blending */
dest->A = (alpha[i] << 4) | alpha[i];
dest->Y = ((guint16) ((col >> 16) & 0xff)) * dest->A;
dest->Y = ((col >> 16) & 0xff) * dest->A / 255;
/* U/V are stored as V/U in the clut words, so switch them */
dest->V = ((guint16) ((col >> 8) & 0xff)) * dest->A;
dest->U = ((guint16) (col & 0xff)) * dest->A;
dest->V = ((col >> 8) & 0xff) * dest->A / 255;
dest->U = (col & 0xff) * dest->A / 255;
}
} else {
int y = 240;
@ -56,13 +56,13 @@ gstspu_vobsub_recalc_palette (GstDVDSpu * dvdspu,
for (i = 0; i < 4; i++, dest++) {
dest->A = (alpha[i] << 4) | alpha[i];
if (alpha[i] != 0) {
dest[0].Y = y * dest[0].A;
dest[0].Y = y * dest[0].A / 255;
y -= 112;
if (y < 0)
y = 0;
}
dest[0].U = 128 * dest[0].A;
dest[0].V = 128 * dest[0].A;
dest[0].U = 128 * dest[0].A / 255;
dest[0].V = 128 * dest[0].A / 255;
}
}
}
@ -169,28 +169,36 @@ gstspu_vobsub_get_rle_code (SpuState * state, guint16 * rle_offset)
}
static inline gboolean
gstspu_vobsub_draw_rle_run (SpuState * state, gint16 x, gint16 end,
SpuColour * colour)
gstspu_vobsub_draw_rle_run (SpuState * state, GstVideoFrame * frame,
gint16 x, gint16 end, SpuColour * colour)
{
#if 0
GST_LOG ("Y: %d x: %d end %d col %d %d %d %d",
GST_TRACE ("Y: %d x: %d end %d %d %d %d %d",
state->vobsub.cur_Y, x, end, colour->Y, colour->U, colour->V, colour->A);
#endif
if (colour->A != 0) {
guint32 inv_A = 0xff - colour->A;
if (colour->A > 0) {
gint i;
guint8 *data;
guint8 inv_A = 255 - colour->A;
/* FIXME: This could be more efficient */
while (x < end) {
state->vobsub.out_Y[x] =
(inv_A * state->vobsub.out_Y[x] + colour->Y) / 0xff;
state->vobsub.out_U[x / 2] += colour->U;
state->vobsub.out_V[x / 2] += colour->V;
state->vobsub.out_A[x / 2] += colour->A;
x++;
data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
data += GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0) *
(state->vobsub.cur_Y - state->vobsub.disp_rect.top);
x -= state->vobsub.disp_rect.left;
end -= state->vobsub.disp_rect.left;
for (i = x; i < end; i++) {
SpuColour *pix = &((SpuColour *) data)[x++];
if (pix->A == 0) {
memcpy (pix, colour, sizeof (*pix));
} else {
pix->A = colour->A;
pix->Y = colour->Y + pix->Y * inv_A / 255;
pix->U = colour->U + pix->U * inv_A / 255;
pix->V = colour->V + pix->V * inv_A / 255;
}
}
/* Update the compositing buffer so we know how much to blend later */
*(state->vobsub.comp_last_x_ptr) = end - 1; /* end is the start of the *next* run */
return TRUE;
}
@ -208,11 +216,11 @@ rle_end_x (guint16 rle_code, gint16 x, gint16 end)
}
static gboolean gstspu_vobsub_render_line_with_chgcol (SpuState * state,
guint8 * planes[3], guint16 * rle_offset);
GstVideoFrame * frame, guint16 * rle_offset);
static gboolean gstspu_vobsub_update_chgcol (SpuState * state);
static gboolean
gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3],
gstspu_vobsub_render_line (SpuState * state, GstVideoFrame * frame,
guint16 * rle_offset)
{
gint16 x, next_x, end, rle_code, next_draw_x;
@ -226,19 +234,13 @@ gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3],
/* Check the top & bottom, because we might not be within the region yet */
if (state->vobsub.cur_Y >= state->vobsub.cur_chg_col->top &&
state->vobsub.cur_Y <= state->vobsub.cur_chg_col->bottom) {
return gstspu_vobsub_render_line_with_chgcol (state, planes,
rle_offset);
return gstspu_vobsub_render_line_with_chgcol (state, frame, rle_offset);
}
}
}
/* No special case. Render as normal */
/* Set up our output pointers */
state->vobsub.out_Y = planes[0];
state->vobsub.out_U = state->comp_bufs[0];
state->vobsub.out_V = state->comp_bufs[1];
state->vobsub.out_A = state->comp_bufs[2];
/* We always need to start our RLE decoding byte_aligned */
*rle_offset = GST_ROUND_UP_2 (*rle_offset);
@ -249,12 +251,11 @@ gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3],
colour = &state->vobsub.main_pal[rle_code & 3];
next_x = rle_end_x (rle_code, x, end);
next_draw_x = next_x;
if (next_draw_x > state->vobsub.clip_rect.right)
next_draw_x = state->vobsub.clip_rect.right; /* ensure no overflow */
if (next_draw_x > state->vobsub.disp_rect.right)
next_draw_x = state->vobsub.disp_rect.right; /* ensure no overflow */
/* Now draw the run between [x,next_x) */
if (state->vobsub.cur_Y >= state->vobsub.clip_rect.top &&
state->vobsub.cur_Y <= state->vobsub.clip_rect.bottom)
visible |= gstspu_vobsub_draw_rle_run (state, x, next_draw_x, colour);
visible |=
gstspu_vobsub_draw_rle_run (state, frame, x, next_draw_x, colour);
x = next_x;
}
@ -289,7 +290,7 @@ gstspu_vobsub_update_chgcol (SpuState * state)
}
static gboolean
gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3],
gstspu_vobsub_render_line_with_chgcol (SpuState * state, GstVideoFrame * frame,
guint16 * rle_offset)
{
SpuVobsubLineCtrlI *chg_col = state->vobsub.cur_chg_col;
@ -304,11 +305,6 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3],
gint16 cur_reg_end;
gint i;
state->vobsub.out_Y = planes[0];
state->vobsub.out_U = state->comp_bufs[0];
state->vobsub.out_V = state->comp_bufs[1];
state->vobsub.out_A = state->comp_bufs[2];
/* We always need to start our RLE decoding byte_aligned */
*rle_offset = GST_ROUND_UP_2 (*rle_offset);
@ -344,12 +340,13 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3],
run_end = MIN (next_x, cur_reg_end);
run_draw_end = run_end;
if (run_draw_end > state->vobsub.clip_rect.right)
run_draw_end = state->vobsub.clip_rect.right; /* ensure no overflow */
if (run_draw_end > state->vobsub.disp_rect.right)
run_draw_end = state->vobsub.disp_rect.right; /* ensure no overflow */
if (G_LIKELY (x < run_end)) {
colour = &cur_pix_ctrl->pal_cache[rle_code & 3];
visible |= gstspu_vobsub_draw_rle_run (state, x, run_draw_end, colour);
visible |= gstspu_vobsub_draw_rle_run (state, frame, x,
run_draw_end, colour);
x = run_end;
}
@ -369,52 +366,37 @@ gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3],
return visible;
}
static void
gstspu_vobsub_blend_comp_buffers (SpuState * state, guint8 * planes[3])
{
state->comp_left = state->vobsub.disp_rect.left;
state->comp_right =
MAX (state->vobsub.comp_last_x[0], state->vobsub.comp_last_x[1]);
state->comp_left = MAX (state->comp_left, state->vobsub.clip_rect.left);
state->comp_right = MIN (state->comp_right, state->vobsub.clip_rect.right);
gstspu_blend_comp_buffers (state, planes);
}
static void
gstspu_vobsub_clear_comp_buffers (SpuState * state)
{
state->comp_left = state->vobsub.clip_rect.left;
state->comp_right = state->vobsub.clip_rect.right;
gstspu_clear_comp_buffers (state);
state->vobsub.comp_last_x[0] = -1;
state->vobsub.comp_last_x[1] = -1;
}
static void
gstspu_vobsub_draw_highlight (SpuState * state,
GstVideoFrame * frame, SpuRect * rect)
{
guint8 *cur;
SpuColour *cur;
SpuRect r;
guint8 *data;
guint stride;
gint16 pos;
gint ystride;
ystride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
r.left = rect->left - state->vobsub.disp_rect.left;
r.right = rect->right - state->vobsub.disp_rect.left;
r.top = rect->top - state->vobsub.disp_rect.top;
r.bottom = rect->bottom - state->vobsub.disp_rect.top;
rect = &r;
cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->top;
for (pos = rect->left + 1; pos < rect->right; pos++)
cur[pos] = (cur[pos] / 2) + 0x8;
cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->bottom;
for (pos = rect->left + 1; pos < rect->right; pos++)
cur[pos] = (cur[pos] / 2) + 0x8;
cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->top;
for (pos = rect->top; pos <= rect->bottom; pos++) {
cur[rect->left] = (cur[rect->left] / 2) + 0x8;
cur[rect->right] = (cur[rect->right] / 2) + 0x8;
cur += ystride;
data = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
cur = (SpuColour *) (data + stride * rect->top);
for (pos = rect->left; pos < rect->right; pos++)
cur[pos].A = 0x80;
cur = (SpuColour *) (data + stride * (rect->bottom - 1));
for (pos = rect->left; pos < rect->right; pos++)
cur[pos].A = 0x80;
for (pos = rect->top; pos < rect->bottom; pos++) {
cur = (SpuColour *) (data + stride * pos);
cur[rect->left].A = 0x80;
cur[rect->right - 1].A = 0x80;
}
}
@ -422,11 +404,8 @@ void
gstspu_vobsub_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
{
SpuState *state = &dvdspu->spu_state;
guint8 *planes[3]; /* YUV frame pointers */
gint y, last_y;
gint width, height;
gint strides[3];
gint offset_index = 0;
guint16 cur_offsets[2];
/* Set up our initial state */
if (G_UNLIKELY (state->vobsub.pix_buf == NULL))
@ -436,18 +415,6 @@ gstspu_vobsub_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
GST_MAP_READ))
return;
/* Store the start of each plane */
planes[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
planes[1] = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
planes[2] = GST_VIDEO_FRAME_COMP_DATA (frame, 2);
strides[0] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
strides[1] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
strides[2] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
width = GST_VIDEO_FRAME_WIDTH (frame);
height = GST_VIDEO_FRAME_HEIGHT (frame);
GST_DEBUG_OBJECT (dvdspu,
"Rendering SPU. disp_rect %d,%d to %d,%d. hl_rect %d,%d to %d,%d",
state->vobsub.disp_rect.left, state->vobsub.disp_rect.top,
@ -455,13 +422,6 @@ gstspu_vobsub_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
state->vobsub.hl_rect.left, state->vobsub.hl_rect.top,
state->vobsub.hl_rect.right, state->vobsub.hl_rect.bottom);
GST_DEBUG_OBJECT (dvdspu, "video size %d,%d", width, height);
/* When reading RLE data, we track the offset in nibbles... */
state->vobsub.cur_offsets[0] = state->vobsub.pix_data[0] * 2;
state->vobsub.cur_offsets[1] = state->vobsub.pix_data[1] * 2;
state->vobsub.max_offset = state->vobsub.pix_buf_map.size * 2;
/* Update all the palette caches */
gstspu_vobsub_update_palettes (dvdspu, state);
@ -476,175 +436,25 @@ gstspu_vobsub_render (GstDVDSpu * dvdspu, GstVideoFrame * frame)
} else
state->vobsub.cur_chg_col = NULL;
state->vobsub.clip_rect.left = state->vobsub.disp_rect.left;
state->vobsub.clip_rect.right = state->vobsub.disp_rect.right;
/* center the image when display rectangle exceeds the video width */
if (width <= state->vobsub.disp_rect.right) {
gint left, disp_width;
disp_width = state->vobsub.disp_rect.right - state->vobsub.disp_rect.left
+ 1;
left = (width - disp_width) / 2;
state->vobsub.disp_rect.left = left;
state->vobsub.disp_rect.right = left + disp_width - 1;
/* if it clips to the right, shift it left, but only till zero */
if (state->vobsub.disp_rect.right >= width) {
gint shift = state->vobsub.disp_rect.right - width - 1;
if (shift > state->vobsub.disp_rect.left)
shift = state->vobsub.disp_rect.left;
state->vobsub.disp_rect.left -= shift;
state->vobsub.disp_rect.right -= shift;
}
/* init clip to disp */
state->vobsub.clip_rect.left = state->vobsub.disp_rect.left;
state->vobsub.clip_rect.right = state->vobsub.disp_rect.right;
/* clip right after the shift */
if (state->vobsub.clip_rect.right >= width)
state->vobsub.clip_rect.right = width - 1;
GST_DEBUG_OBJECT (dvdspu,
"clipping width to %d,%d", state->vobsub.clip_rect.left,
state->vobsub.clip_rect.right);
}
/* for the height, bring it up till it fits as well as it can. We
* assume the picture is in the lower part. We should better check where it
* is and do something more clever. */
state->vobsub.clip_rect.top = state->vobsub.disp_rect.top;
state->vobsub.clip_rect.bottom = state->vobsub.disp_rect.bottom;
if (height <= state->vobsub.disp_rect.bottom) {
/* shift it up, but only till zero */
gint shift = state->vobsub.disp_rect.bottom - height - 1;
if (shift > state->vobsub.disp_rect.top)
shift = state->vobsub.disp_rect.top;
state->vobsub.disp_rect.top -= shift;
state->vobsub.disp_rect.bottom -= shift;
/* start on even line */
if (state->vobsub.disp_rect.top & 1) {
state->vobsub.disp_rect.top--;
state->vobsub.disp_rect.bottom--;
}
/* init clip to disp */
state->vobsub.clip_rect.top = state->vobsub.disp_rect.top;
state->vobsub.clip_rect.bottom = state->vobsub.disp_rect.bottom;
/* clip bottom after the shift */
if (state->vobsub.clip_rect.bottom >= height)
state->vobsub.clip_rect.bottom = height - 1;
GST_DEBUG_OBJECT (dvdspu,
"clipping height to %d,%d", state->vobsub.clip_rect.top,
state->vobsub.clip_rect.bottom);
}
/* We start rendering from the first line of the display rect */
y = state->vobsub.disp_rect.top;
/* We render most lines in pairs starting from an even y,
* accumulating 2 lines of chroma then blending it. We might need to render a
* single line at the start and end if the display rect starts on an odd line
* or ends on an even one */
if (y > state->vobsub.disp_rect.bottom)
return; /* Empty clip rect, nothing to do */
last_y = state->vobsub.disp_rect.bottom;
/* Update our plane references to the first line of the disp_rect */
planes[0] += strides[0] * y;
planes[1] += strides[1] * (y / 2);
planes[2] += strides[2] * (y / 2);
/* If the render rect starts on an odd line, render that only to start */
state->vobsub.cur_Y = y;
if (state->vobsub.cur_Y & 0x1) {
gboolean clip, visible = FALSE;
clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top
|| state->vobsub.cur_Y > state->vobsub.clip_rect.bottom);
if (!clip) {
/* Render a first odd line. */
gstspu_vobsub_clear_comp_buffers (state);
state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1;
visible |=
gstspu_vobsub_render_line (state, planes,
&state->vobsub.cur_offsets[offset_index]);
if (visible)
gstspu_vobsub_blend_comp_buffers (state, planes);
/* When reading RLE data, we track the offset in nibbles... */
state->vobsub.max_offset = state->vobsub.pix_buf_map.size * 2;
if (y & 1) {
cur_offsets[1] = state->vobsub.pix_data[0] * 2;
cur_offsets[0] = state->vobsub.pix_data[1] * 2;
} else {
cur_offsets[0] = state->vobsub.pix_data[0] * 2;
cur_offsets[1] = state->vobsub.pix_data[1] * 2;
}
/* Update all the output pointers */
state->vobsub.cur_Y++;
planes[0] += strides[0];
planes[1] += strides[1];
planes[2] += strides[2];
/* Switch the offset index 0 <=> 1 */
offset_index ^= 0x1;
}
last_y = (state->vobsub.disp_rect.bottom - 1) & ~(0x01);
for (; state->vobsub.cur_Y <= last_y; state->vobsub.cur_Y++) {
gboolean clip, visible = FALSE;
clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top
|| state->vobsub.cur_Y > state->vobsub.clip_rect.bottom);
/* Reset the compositing buffer */
gstspu_vobsub_clear_comp_buffers (state);
/* Render even line */
state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x;
gstspu_vobsub_render_line (state, planes,
&state->vobsub.cur_offsets[offset_index]);
/* Advance the luminance output pointer */
planes[0] += strides[0];
/* Switch the offset index 0 <=> 1 */
offset_index ^= 0x1;
state->vobsub.cur_Y++;
/* Render odd line */
state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1;
visible |=
gstspu_vobsub_render_line (state, planes,
&state->vobsub.cur_offsets[offset_index]);
if (visible && !clip) {
/* Blend the accumulated UV compositing buffers onto the output */
gstspu_vobsub_blend_comp_buffers (state, planes);
}
/* Update all the output pointers */
planes[0] += strides[0];
planes[1] += strides[1];
planes[2] += strides[2];
/* Switch the offset index 0 <=> 1 */
offset_index ^= 0x1;
}
if (state->vobsub.cur_Y == state->vobsub.disp_rect.bottom) {
gboolean clip, visible = FALSE;
clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top
|| state->vobsub.cur_Y > state->vobsub.clip_rect.bottom);
g_return_if_fail ((state->vobsub.disp_rect.bottom & 0x01) == 0);
if (!clip) {
/* Render a remaining lone last even line. y already has the correct value
* after the above loop exited. */
gstspu_vobsub_clear_comp_buffers (state);
state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x;
visible |=
gstspu_vobsub_render_line (state, planes,
&state->vobsub.cur_offsets[offset_index]);
if (visible)
gstspu_vobsub_blend_comp_buffers (state, planes);
}
/* Render line by line */
for (state->vobsub.cur_Y = y; state->vobsub.cur_Y <= last_y;
state->vobsub.cur_Y++) {
gstspu_vobsub_render_line (state, frame,
&cur_offsets[state->vobsub.cur_Y & 1]);
}
/* for debugging purposes, draw a faint rectangle at the edges of the disp_rect */

View file

@ -492,6 +492,29 @@ gstspu_vobsub_handle_dvd_event (GstDVDSpu * dvdspu, GstEvent * event)
return hl_change;
}
void
gstspu_vobsub_get_render_geometry (GstDVDSpu * dvdspu,
gint * display_width, gint * display_height,
GstVideoRectangle * window_rect)
{
SpuState *state = &dvdspu->spu_state;
if (window_rect) {
window_rect->x = state->vobsub.disp_rect.left;
window_rect->y = state->vobsub.disp_rect.top;
window_rect->w = state->vobsub.disp_rect.right -
state->vobsub.disp_rect.left + 1;
window_rect->h = state->vobsub.disp_rect.bottom -
state->vobsub.disp_rect.top + 1;
}
if (display_width)
*display_width = state->info.width;
if (display_height)
*display_height = state->info.height;
}
void
gstspu_vobsub_flush (GstDVDSpu * dvdspu)
{

View file

@ -55,7 +55,6 @@ struct SpuVobsubState {
GstMapInfo pix_buf_map; /* Mapped buffer info */
SpuRect disp_rect;
SpuRect clip_rect;
SpuRect hl_rect;
guint32 current_clut[16]; /* Colour lookup table from incoming events */
@ -81,31 +80,25 @@ struct SpuVobsubState {
* need recalculating */
/* Rendering state vars below */
gint16 comp_last_x[2]; /* Maximum X values we rendered into the comp buffer (odd & even) */
gint16 *comp_last_x_ptr; /* Ptr to the current comp_last_x value to be updated by the render */
/* Current Y Position */
gint16 cur_Y;
/* Current offset in nibbles into the pix_data */
guint16 cur_offsets[2];
guint16 max_offset;
/* current ChgColCon Line Info */
SpuVobsubLineCtrlI *cur_chg_col;
SpuVobsubLineCtrlI *cur_chg_col_end;
/* Output position tracking */
guint8 *out_Y;
guint32 *out_U;
guint32 *out_V;
guint32 *out_A;
};
void gstspu_vobsub_handle_new_buf (GstDVDSpu * dvdspu, GstClockTime event_ts, GstBuffer *buf);
gboolean gstspu_vobsub_execute_event (GstDVDSpu *dvdspu);
void gstspu_vobsub_render (GstDVDSpu *dvdspu, GstVideoFrame *frame);
gboolean gstspu_vobsub_handle_dvd_event (GstDVDSpu *dvdspu, GstEvent *event);
void gstspu_vobsub_get_render_geometry (GstDVDSpu *dvdspu,
gint *display_width, gint *display_height,
GstVideoRectangle *window_rect);
void gstspu_vobsub_flush (GstDVDSpu *dvdspu);
#endif

View file

@ -1,6 +1,5 @@
dvdspu_sources = [
'gstdvdspu.c',
'gstdvdspu-render.c',
'gstspu-vobsub.c',
'gstspu-vobsub-render.c',
'gstspu-pgs.c',