-base: port to GstVideoFrame API

This commit is contained in:
Wim Taymans 2011-06-17 15:31:59 +02:00
parent 42abb6672e
commit d93129d8da
14 changed files with 844 additions and 779 deletions

View file

@ -308,8 +308,9 @@ static GstStateChangeReturn gst_base_text_overlay_change_state (GstElement *
element, GstStateChange transition);
static GstCaps *gst_base_text_overlay_getcaps (GstPad * pad, GstCaps * filter);
static gboolean gst_base_text_overlay_setcaps (GstPad * pad, GstCaps * caps);
static gboolean gst_base_text_overlay_setcaps_txt (GstPad * pad,
static gboolean gst_base_text_overlay_setcaps (GstBaseTextOverlay * overlay,
GstCaps * caps);
static gboolean gst_base_text_overlay_setcaps_txt (GstBaseTextOverlay * overlay,
GstCaps * caps);
static gboolean gst_base_text_overlay_src_event (GstPad * pad,
GstEvent * event);
@ -687,9 +688,6 @@ gst_base_text_overlay_init (GstBaseTextOverlay * overlay,
overlay->use_vertical_render = DEFAULT_PROP_VERTICAL_RENDER;
gst_base_text_overlay_update_render_mode (overlay);
overlay->fps_n = 0;
overlay->fps_d = 1;
overlay->text_buffer = NULL;
overlay->text_linked = FALSE;
overlay->cond = g_cond_new ();
@ -743,52 +741,34 @@ gst_base_text_overlay_update_render_mode (GstBaseTextOverlay * overlay)
}
static gboolean
gst_base_text_overlay_setcaps_txt (GstPad * pad, GstCaps * caps)
gst_base_text_overlay_setcaps_txt (GstBaseTextOverlay * overlay, GstCaps * caps)
{
GstBaseTextOverlay *overlay;
GstStructure *structure;
overlay = GST_BASE_TEXT_OVERLAY (gst_pad_get_parent (pad));
structure = gst_caps_get_structure (caps, 0);
overlay->have_pango_markup =
gst_structure_has_name (structure, "text/x-pango-markup");
gst_object_unref (overlay);
return TRUE;
}
/* FIXME: upstream nego (e.g. when the video window is resized) */
static gboolean
gst_base_text_overlay_setcaps (GstPad * pad, GstCaps * caps)
gst_base_text_overlay_setcaps (GstBaseTextOverlay * overlay, GstCaps * caps)
{
GstBaseTextOverlay *overlay;
GstStructure *structure;
GstVideoInfo info;
gboolean ret = FALSE;
const GValue *fps;
if (!GST_PAD_IS_SINK (pad))
return TRUE;
if (!gst_video_info_from_caps (&info, caps))
goto invalid_caps;
g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE);
overlay->info = info;
overlay->format = info.format;
overlay->width = info.width;
overlay->height = info.height;
overlay = GST_BASE_TEXT_OVERLAY (gst_pad_get_parent (pad));
overlay->width = 0;
overlay->height = 0;
structure = gst_caps_get_structure (caps, 0);
fps = gst_structure_get_value (structure, "framerate");
if (fps
&& gst_video_format_parse_caps (caps, &overlay->format, &overlay->width,
&overlay->height)) {
ret = gst_pad_push_event (overlay->srcpad, gst_event_new_caps (caps));
}
overlay->fps_n = gst_value_get_fraction_numerator (fps);
overlay->fps_d = gst_value_get_fraction_denominator (fps);
ret = gst_pad_push_event (overlay->srcpad, gst_event_new_caps (caps));
if (ret) {
GST_OBJECT_LOCK (overlay);
@ -798,9 +778,14 @@ gst_base_text_overlay_setcaps (GstPad * pad, GstCaps * caps)
GST_OBJECT_UNLOCK (overlay);
}
gst_object_unref (overlay);
return ret;
/* ERRORS */
invalid_caps:
{
GST_DEBUG_OBJECT (overlay, "could not parse caps");
return FALSE;
}
}
static void
@ -1452,12 +1437,13 @@ gst_base_text_overlay_render_pangocairo (GstBaseTextOverlay * overlay,
static inline void
gst_base_text_overlay_shade_planar_Y (GstBaseTextOverlay * overlay,
guchar * dest, gint x0, gint x1, gint y0, gint y1)
GstVideoFrame * dest, gint x0, gint x1, gint y0, gint y1)
{
gint i, j, dest_stride;
guint8 *dest_ptr;
dest_stride = gst_video_format_get_row_stride (overlay->format, 0,
overlay->width);
dest_stride = dest->info.plane[0].stride;
dest_ptr = dest->data[0];
x0 = CLAMP (x0 - BOX_XPAD, 0, overlay->width);
x1 = CLAMP (x1 + BOX_XPAD, 0, overlay->width);
@ -1467,26 +1453,28 @@ gst_base_text_overlay_shade_planar_Y (GstBaseTextOverlay * overlay,
for (i = y0; i < y1; ++i) {
for (j = x0; j < x1; ++j) {
gint y = dest[(i * dest_stride) + j] + overlay->shading_value;
gint y = dest_ptr[(i * dest_stride) + j] + overlay->shading_value;
dest[(i * dest_stride) + j] = CLAMP (y, 0, 255);
dest_ptr[(i * dest_stride) + j] = CLAMP (y, 0, 255);
}
}
}
static inline void
gst_base_text_overlay_shade_packed_Y (GstBaseTextOverlay * overlay,
guchar * dest, gint x0, gint x1, gint y0, gint y1)
GstVideoFrame * dest, gint x0, gint x1, gint y0, gint y1)
{
gint i, j;
guint dest_stride, pixel_stride, component_offset;
guint8 *dest_ptr;
dest_stride = gst_video_format_get_row_stride (overlay->format, 0,
overlay->width);
pixel_stride = gst_video_format_get_pixel_stride (overlay->format, 0);
dest_stride = dest->info.plane[0].stride;
dest_ptr = dest->data[0];
pixel_stride = gst_video_format_get_pixel_stride (dest->info.format, 0);
component_offset =
gst_video_format_get_component_offset (overlay->format, 0, overlay->width,
overlay->height);
gst_video_format_get_component_offset (dest->info.format, 0,
overlay->width, overlay->height);
x0 = CLAMP (x0 - BOX_XPAD, 0, overlay->width);
x1 = CLAMP (x1 + BOX_XPAD, 0, overlay->width);
@ -1510,9 +1498,9 @@ gst_base_text_overlay_shade_packed_Y (GstBaseTextOverlay * overlay,
gint y_pos;
y_pos = (i * dest_stride) + j * pixel_stride + component_offset;
y = dest[y_pos] + overlay->shading_value;
y = dest_ptr[y_pos] + overlay->shading_value;
dest[y_pos] = CLAMP (y, 0, 255);
dest_ptr[y_pos] = CLAMP (y, 0, 255);
}
}
}
@ -1521,10 +1509,13 @@ gst_base_text_overlay_shade_packed_Y (GstBaseTextOverlay * overlay,
#define gst_base_text_overlay_shade_RGBx gst_base_text_overlay_shade_xRGB
#define gst_base_text_overlay_shade_xBGR gst_base_text_overlay_shade_xRGB
static inline void
gst_base_text_overlay_shade_xRGB (GstBaseTextOverlay * overlay, guchar * dest,
gint x0, gint x1, gint y0, gint y1)
gst_base_text_overlay_shade_xRGB (GstBaseTextOverlay * overlay,
GstVideoFrame * dest, gint x0, gint x1, gint y0, gint y1)
{
gint i, j;
guint8 *dest_ptr;
dest_ptr = dest->data[0];
x0 = CLAMP (x0 - BOX_XPAD, 0, overlay->width);
x1 = CLAMP (x1 + BOX_XPAD, 0, overlay->width);
@ -1538,8 +1529,8 @@ gst_base_text_overlay_shade_xRGB (GstBaseTextOverlay * overlay, guchar * dest,
y_pos = (i * 4 * overlay->width) + j * 4;
for (k = 0; k < 4; k++) {
y = dest[y_pos + k] + overlay->shading_value;
dest[y_pos + k] = CLAMP (y, 0, 255);
y = dest_ptr[y_pos + k] + overlay->shading_value;
dest_ptr[y_pos + k] = CLAMP (y, 0, 255);
}
}
}
@ -1547,10 +1538,13 @@ gst_base_text_overlay_shade_xRGB (GstBaseTextOverlay * overlay, guchar * dest,
#define ARGB_SHADE_FUNCTION(name, OFFSET) \
static inline void \
gst_base_text_overlay_shade_##name (GstBaseTextOverlay * overlay, guchar * dest, \
gst_base_text_overlay_shade_##name (GstBaseTextOverlay * overlay, GstVideoFrame * dest, \
gint x0, gint x1, gint y0, gint y1) \
{ \
gint i, j;\
guint8 *dest_ptr;\
\
dest_ptr = dest->data[0];\
\
x0 = CLAMP (x0 - BOX_XPAD, 0, overlay->width);\
x1 = CLAMP (x1 + BOX_XPAD, 0, overlay->width);\
@ -1563,8 +1557,8 @@ gint x0, gint x1, gint y0, gint y1) \
gint y, y_pos, k;\
y_pos = (i * 4 * overlay->width) + j * 4;\
for (k = OFFSET; k < 3+OFFSET; k++) {\
y = dest[y_pos + k] + overlay->shading_value;\
dest[y_pos + k] = CLAMP (y, 0, 255);\
y = dest_ptr[y_pos + k] + overlay->shading_value;\
dest_ptr[y_pos + k] = CLAMP (y, 0, 255);\
}\
}\
}\
@ -1583,11 +1577,10 @@ ARGB_SHADE_FUNCTION (BGRA, 0);
static inline void
gst_base_text_overlay_blit_NV12_NV21 (GstBaseTextOverlay * overlay,
guint8 * yuv_pixels, gint xpos, gint ypos)
GstVideoFrame * dest, gint xpos, gint ypos)
{
int y_stride, uv_stride;
int u_offset, v_offset;
int h, w;
int y_stride, u_stride, v_stride;
guint8 *y_pixels, *u_pixels, *v_pixels;
/* because U/V is 2x2 subsampled, we need to round, either up or down,
* to a boundary of integer number of U/V pixels:
@ -1595,28 +1588,25 @@ gst_base_text_overlay_blit_NV12_NV21 (GstBaseTextOverlay * overlay,
xpos = GST_ROUND_UP_2 (xpos);
ypos = GST_ROUND_UP_2 (ypos);
w = overlay->width;
h = overlay->height;
y_pixels = dest->data[0];
u_pixels = dest->data[1];
v_pixels = dest->data[2];
y_stride = dest->info.plane[0].stride;
u_stride = dest->info.plane[1].stride;
v_stride = dest->info.plane[2].stride;
y_stride = gst_video_format_get_row_stride (overlay->format, 0, w);
uv_stride = gst_video_format_get_row_stride (overlay->format, 1, w);
u_offset = gst_video_format_get_component_offset (overlay->format, 1, w, h);
v_offset = gst_video_format_get_component_offset (overlay->format, 2, w, h);
gst_base_text_overlay_blit_1 (overlay, yuv_pixels, xpos, ypos,
gst_base_text_overlay_blit_1 (overlay, y_pixels, xpos, ypos,
overlay->text_image, y_stride);
gst_base_text_overlay_blit_sub2x2cbcr (overlay, yuv_pixels + u_offset,
yuv_pixels + v_offset, xpos, ypos, overlay->text_image, uv_stride,
uv_stride, 2);
gst_base_text_overlay_blit_sub2x2cbcr (overlay, u_pixels,
v_pixels, xpos, ypos, overlay->text_image, u_stride, v_stride, 2);
}
static inline void
gst_base_text_overlay_blit_I420 (GstBaseTextOverlay * overlay,
guint8 * yuv_pixels, gint xpos, gint ypos)
GstVideoFrame * dest, gint xpos, gint ypos)
{
int y_stride, u_stride, v_stride;
int u_offset, v_offset;
int h, w;
guint8 *y_pixels, *u_pixels, *v_pixels;
/* because U/V is 2x2 subsampled, we need to round, either up or down,
* to a boundary of integer number of U/V pixels:
@ -1624,34 +1614,32 @@ gst_base_text_overlay_blit_I420 (GstBaseTextOverlay * overlay,
xpos = GST_ROUND_UP_2 (xpos);
ypos = GST_ROUND_UP_2 (ypos);
w = overlay->width;
h = overlay->height;
y_pixels = dest->data[0];
u_pixels = dest->data[1];
v_pixels = dest->data[2];
y_stride = dest->info.plane[0].stride;
u_stride = dest->info.plane[1].stride;
v_stride = dest->info.plane[2].stride;
y_stride = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, w);
u_stride = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, w);
v_stride = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 2, w);
u_offset =
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 1, w, h);
v_offset =
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 2, w, h);
gst_base_text_overlay_blit_1 (overlay, yuv_pixels, xpos, ypos,
gst_base_text_overlay_blit_1 (overlay, y_pixels, xpos, ypos,
overlay->text_image, y_stride);
gst_base_text_overlay_blit_sub2x2cbcr (overlay, yuv_pixels + u_offset,
yuv_pixels + v_offset, xpos, ypos, overlay->text_image, u_stride,
v_stride, 1);
gst_base_text_overlay_blit_sub2x2cbcr (overlay, u_pixels,
v_pixels, xpos, ypos, overlay->text_image, u_stride, v_stride, 1);
}
static inline void
gst_base_text_overlay_blit_UYVY (GstBaseTextOverlay * overlay,
guint8 * yuv_pixels, gint xpos, gint ypos)
GstVideoFrame * dest, gint xpos, gint ypos)
{
int a0, r0, g0, b0;
int a1, r1, g1, b1;
int y0, y1, u, v;
int i, j;
int h, w;
guchar *pimage, *dest;
guchar *pimage, *dest_ptr;
guint8 *yuv_pixels;
yuv_pixels = dest->data[0];
/* because U/V is 2x horizontally subsampled, we need to round to a
* boundary of integer number of U/V pixels in x dimension:
@ -1675,7 +1663,7 @@ gst_base_text_overlay_blit_UYVY (GstBaseTextOverlay * overlay,
for (i = 0; i < h; i++) {
pimage = overlay->text_image + i * overlay->image_width * 4;
dest = yuv_pixels + (i + ypos) * overlay->width * 2 + xpos * 2;
dest_ptr = yuv_pixels + (i + ypos) * overlay->width * 2 + xpos * 2;
for (j = 0; j < w; j += 2) {
b0 = pimage[CAIRO_ARGB_B];
g0 = pimage[CAIRO_ARGB_G];
@ -1694,7 +1682,7 @@ gst_base_text_overlay_blit_UYVY (GstBaseTextOverlay * overlay,
a0 += a1 + 2;
a0 /= 2;
if (a0 == 0) {
dest += 4;
dest_ptr += 4;
continue;
}
@ -1712,27 +1700,30 @@ gst_base_text_overlay_blit_UYVY (GstBaseTextOverlay * overlay,
COMP_U (u, r0, g0, b0);
COMP_V (v, r0, g0, b0);
BLEND (*dest, a0, u, *dest);
dest++;
BLEND (*dest, a0, y0, *dest);
dest++;
BLEND (*dest, a0, v, *dest);
dest++;
BLEND (*dest, a0, y1, *dest);
dest++;
BLEND (*dest_ptr, a0, u, *dest_ptr);
dest_ptr++;
BLEND (*dest_ptr, a0, y0, *dest_ptr);
dest_ptr++;
BLEND (*dest_ptr, a0, v, *dest_ptr);
dest_ptr++;
BLEND (*dest_ptr, a0, y1, *dest_ptr);
dest_ptr++;
}
}
}
static inline void
gst_base_text_overlay_blit_AYUV (GstBaseTextOverlay * overlay,
guint8 * rgb_pixels, gint xpos, gint ypos)
GstVideoFrame * dest, gint xpos, gint ypos)
{
int a, r, g, b, a1;
int y, u, v;
int i, j;
int h, w;
guchar *pimage, *dest;
guchar *pimage, *dest_ptr;
guint8 *rgb_pixels;
rgb_pixels = dest->data[0];
w = overlay->image_width;
h = overlay->image_height;
@ -1751,7 +1742,7 @@ gst_base_text_overlay_blit_AYUV (GstBaseTextOverlay * overlay,
for (i = 0; i < h; i++) {
pimage = overlay->text_image + i * overlay->image_width * 4;
dest = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4;
dest_ptr = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4;
for (j = 0; j < w; j++) {
a = pimage[CAIRO_ARGB_A];
b = pimage[CAIRO_ARGB_B];
@ -1766,14 +1757,14 @@ gst_base_text_overlay_blit_AYUV (GstBaseTextOverlay * overlay,
COMP_V (v, r, g, b);
// preform text "OVER" background alpha compositing
a1 = a + (dest[0] * (255 - a)) / 255 + 1; // add 1 to prevent divide by 0
OVER (dest[1], a, y, dest[0], dest[1], a1);
OVER (dest[2], a, u, dest[0], dest[2], a1);
OVER (dest[3], a, v, dest[0], dest[3], a1);
dest[0] = a1 - 1; // remove the temporary 1 we added
a1 = a + (dest_ptr[0] * (255 - a)) / 255 + 1; // add 1 to prevent divide by 0
OVER (dest_ptr[1], a, y, dest_ptr[0], dest_ptr[1], a1);
OVER (dest_ptr[2], a, u, dest_ptr[0], dest_ptr[2], a1);
OVER (dest_ptr[3], a, v, dest_ptr[0], dest_ptr[3], a1);
dest_ptr[0] = a1 - 1; // remove the temporary 1 we added
pimage += 4;
dest += 4;
dest_ptr += 4;
}
}
}
@ -1781,12 +1772,15 @@ gst_base_text_overlay_blit_AYUV (GstBaseTextOverlay * overlay,
#define xRGB_BLIT_FUNCTION(name, R, G, B) \
static inline void \
gst_base_text_overlay_blit_##name (GstBaseTextOverlay * overlay, \
guint8 * rgb_pixels, gint xpos, gint ypos) \
GstVideoFrame * dest, gint xpos, gint ypos) \
{ \
int a, r, g, b; \
int i, j; \
int h, w; \
guchar *pimage, *dest; \
guchar *pimage, *dest_ptr; \
guint8 *rgb_pixels;\
\
rgb_pixels = dest->data[0];\
\
w = overlay->image_width; \
h = overlay->image_height; \
@ -1805,22 +1799,22 @@ gst_base_text_overlay_blit_##name (GstBaseTextOverlay * overlay, \
\
for (i = 0; i < h; i++) { \
pimage = overlay->text_image + i * overlay->image_width * 4; \
dest = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4; \
dest_ptr = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4; \
for (j = 0; j < w; j++) { \
a = pimage[CAIRO_ARGB_A]; \
b = pimage[CAIRO_ARGB_B]; \
g = pimage[CAIRO_ARGB_G]; \
r = pimage[CAIRO_ARGB_R]; \
CAIRO_UNPREMULTIPLY (a, r, g, b); \
b = (b*a + dest[B] * (255-a)) / 255; \
g = (g*a + dest[G] * (255-a)) / 255; \
r = (r*a + dest[R] * (255-a)) / 255; \
b = (b*a + dest_ptr[B] * (255-a)) / 255; \
g = (g*a + dest_ptr[G] * (255-a)) / 255; \
r = (r*a + dest_ptr[R] * (255-a)) / 255; \
\
dest[B] = b; \
dest[G] = g; \
dest[R] = r; \
dest_ptr[B] = b; \
dest_ptr[G] = g; \
dest_ptr[R] = r; \
pimage += 4; \
dest += 4; \
dest_ptr += 4; \
} \
} \
}
@ -1832,12 +1826,15 @@ xRGB_BLIT_FUNCTION (RGBx, 0, 1, 2);
#define ARGB_BLIT_FUNCTION(name, A, R, G, B) \
static inline void \
gst_base_text_overlay_blit_##name (GstBaseTextOverlay * overlay, \
guint8 * rgb_pixels, gint xpos, gint ypos) \
GstVideoFrame * dest, gint xpos, gint ypos) \
{ \
int a, r, g, b, a1; \
int i, j; \
int h, w; \
guchar *pimage, *dest; \
guchar *pimage, *dest_ptr; \
guint8 *rgb_pixels;\
\
rgb_pixels = dest->data[0];\
\
w = overlay->image_width; \
h = overlay->image_height; \
@ -1856,20 +1853,20 @@ gst_base_text_overlay_blit_##name (GstBaseTextOverlay * overlay, \
\
for (i = 0; i < h; i++) { \
pimage = overlay->text_image + i * overlay->image_width * 4; \
dest = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4; \
dest_ptr = rgb_pixels + (i + ypos) * 4 * overlay->width + xpos * 4; \
for (j = 0; j < w; j++) { \
a = pimage[CAIRO_ARGB_A]; \
b = pimage[CAIRO_ARGB_B]; \
g = pimage[CAIRO_ARGB_G]; \
r = pimage[CAIRO_ARGB_R]; \
CAIRO_UNPREMULTIPLY (a, r, g, b); \
a1 = a + (dest[A] * (255 - a)) / 255 + 1; \
OVER (dest[R], a, r, dest[0], dest[R], a1); \
OVER (dest[G], a, g, dest[0], dest[G], a1); \
OVER (dest[B], a, b, dest[0], dest[B], a1); \
dest[A] = a1 - 1; \
a1 = a + (dest_ptr[A] * (255 - a)) / 255 + 1; \
OVER (dest_ptr[R], a, r, dest_ptr[0], dest_ptr[R], a1); \
OVER (dest_ptr[G], a, g, dest_ptr[0], dest_ptr[G], a1); \
OVER (dest_ptr[B], a, b, dest_ptr[0], dest_ptr[B], a1); \
dest_ptr[A] = a1 - 1; \
pimage += 4; \
dest += 4; \
dest_ptr += 4; \
} \
} \
}
@ -1920,15 +1917,15 @@ gst_base_text_overlay_push_frame (GstBaseTextOverlay * overlay,
gint width, height;
GstBaseTextOverlayVAlign valign;
GstBaseTextOverlayHAlign halign;
guint8 *data;
gsize size;
GstVideoFrame frame;
width = overlay->image_width;
height = overlay->image_height;
video_frame = gst_buffer_make_writable (video_frame);
data = gst_buffer_map (video_frame, &size, NULL, GST_MAP_WRITE);
if (!gst_video_frame_map (&frame, &overlay->info, video_frame, GST_MAP_WRITE))
goto invalid_frame;
if (overlay->use_vertical_render)
halign = GST_BASE_TEXT_OVERLAY_HALIGN_RIGHT;
@ -1990,53 +1987,53 @@ gst_base_text_overlay_push_frame (GstBaseTextOverlay * overlay,
case GST_VIDEO_FORMAT_I420:
case GST_VIDEO_FORMAT_NV12:
case GST_VIDEO_FORMAT_NV21:
gst_base_text_overlay_shade_planar_Y (overlay, data,
gst_base_text_overlay_shade_planar_Y (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_AYUV:
case GST_VIDEO_FORMAT_UYVY:
gst_base_text_overlay_shade_packed_Y (overlay, data,
gst_base_text_overlay_shade_packed_Y (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_xRGB:
gst_base_text_overlay_shade_xRGB (overlay, data,
gst_base_text_overlay_shade_xRGB (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_xBGR:
gst_base_text_overlay_shade_xBGR (overlay, data,
gst_base_text_overlay_shade_xBGR (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_BGRx:
gst_base_text_overlay_shade_BGRx (overlay, data,
gst_base_text_overlay_shade_BGRx (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_RGBx:
gst_base_text_overlay_shade_RGBx (overlay, data,
gst_base_text_overlay_shade_RGBx (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_ARGB:
gst_base_text_overlay_shade_ARGB (overlay, data,
gst_base_text_overlay_shade_ARGB (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_ABGR:
gst_base_text_overlay_shade_ABGR (overlay, data,
gst_base_text_overlay_shade_ABGR (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_RGBA:
gst_base_text_overlay_shade_RGBA (overlay, data,
gst_base_text_overlay_shade_RGBA (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
case GST_VIDEO_FORMAT_BGRA:
gst_base_text_overlay_shade_BGRA (overlay, data,
gst_base_text_overlay_shade_BGRA (overlay, &frame,
xpos, xpos + overlay->image_width,
ypos, ypos + overlay->image_height);
break;
@ -2051,49 +2048,56 @@ gst_base_text_overlay_push_frame (GstBaseTextOverlay * overlay,
if (overlay->text_image) {
switch (overlay->format) {
case GST_VIDEO_FORMAT_I420:
gst_base_text_overlay_blit_I420 (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_I420 (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_NV12:
case GST_VIDEO_FORMAT_NV21:
gst_base_text_overlay_blit_NV12_NV21 (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_NV12_NV21 (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_UYVY:
gst_base_text_overlay_blit_UYVY (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_UYVY (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_AYUV:
gst_base_text_overlay_blit_AYUV (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_AYUV (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_BGRx:
gst_base_text_overlay_blit_BGRx (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_BGRx (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_xRGB:
gst_base_text_overlay_blit_xRGB (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_xRGB (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_RGBx:
gst_base_text_overlay_blit_RGBx (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_RGBx (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_xBGR:
gst_base_text_overlay_blit_xBGR (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_xBGR (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_ARGB:
gst_base_text_overlay_blit_ARGB (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_ARGB (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_ABGR:
gst_base_text_overlay_blit_ABGR (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_ABGR (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_RGBA:
gst_base_text_overlay_blit_RGBA (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_RGBA (overlay, &frame, xpos, ypos);
break;
case GST_VIDEO_FORMAT_BGRA:
gst_base_text_overlay_blit_BGRA (overlay, data, xpos, ypos);
gst_base_text_overlay_blit_BGRA (overlay, &frame, xpos, ypos);
break;
default:
g_assert_not_reached ();
}
}
gst_buffer_unmap (video_frame, data, size);
gst_video_frame_unmap (&frame);
return gst_pad_push (overlay->srcpad, video_frame);
/* ERRORS */
invalid_frame:
{
GST_DEBUG_OBJECT (overlay, "received invalid buffer");
return GST_FLOW_OK;
}
}
static GstPadLinkReturn
@ -2143,7 +2147,7 @@ gst_base_text_overlay_text_event (GstPad * pad, GstEvent * event)
GstCaps *caps;
gst_event_parse_caps (event, &caps);
ret = gst_base_text_overlay_setcaps_txt (pad, caps);
ret = gst_base_text_overlay_setcaps_txt (overlay, caps);
gst_event_unref (event);
break;
}
@ -2233,7 +2237,7 @@ gst_base_text_overlay_video_event (GstPad * pad, GstEvent * event)
GstCaps *caps;
gst_event_parse_caps (event, &caps);
ret = gst_base_text_overlay_setcaps (pad, caps);
ret = gst_base_text_overlay_setcaps (overlay, caps);
gst_event_unref (event);
break;
}

View file

@ -115,11 +115,10 @@ struct _GstBaseTextOverlay {
* a text segment update, or a change
* in status (e.g. shutdown, flushing) */
GstVideoInfo info;
GstVideoFormat format;
gint width;
gint height;
gint fps_n;
gint fps_d;
GstVideoFormat format;
GstBaseTextOverlayVAlign valign;
GstBaseTextOverlayHAlign halign;

View file

@ -790,7 +790,7 @@ theora_handle_comment_packet (GstTheoraDec * dec, ogg_packet * packet)
}
static GstFlowReturn
theora_negotiate_pool (GstTheoraDec * dec, GstCaps * caps)
theora_negotiate_pool (GstTheoraDec * dec, GstCaps * caps, GstVideoInfo * info)
{
GstQuery *query;
GstBufferPool *pool = NULL;
@ -806,7 +806,7 @@ theora_negotiate_pool (GstTheoraDec * dec, GstCaps * caps)
&alignment, &pool);
} else {
GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
size = gst_video_format_get_size (dec->format, dec->width, dec->height);
size = info->size;
min = max = 0;
prefix = 0;
alignment = 0;
@ -841,6 +841,8 @@ static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
{
GstCaps *caps;
GstVideoFormat format;
gint width, height;
gint par_num, par_den;
GstFlowReturn ret = GST_FLOW_OK;
GList *walk;
@ -881,23 +883,23 @@ theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
switch (dec->info.pixel_fmt) {
case TH_PF_444:
dec->output_bpp = 24;
dec->format = GST_VIDEO_FORMAT_Y444;
format = GST_VIDEO_FORMAT_Y444;
break;
case TH_PF_420:
dec->output_bpp = 12; /* Average bits per pixel. */
dec->format = GST_VIDEO_FORMAT_I420;
format = GST_VIDEO_FORMAT_I420;
break;
case TH_PF_422:
dec->output_bpp = 16;
dec->format = GST_VIDEO_FORMAT_Y42B;
format = GST_VIDEO_FORMAT_Y42B;
break;
default:
goto invalid_format;
}
if (dec->crop) {
dec->width = dec->info.pic_width;
dec->height = dec->info.pic_height;
width = dec->info.pic_width;
height = dec->info.pic_height;
dec->offset_x = dec->info.pic_x;
dec->offset_y = dec->info.pic_y;
/* Ensure correct offsets in chroma for formats that need it
@ -905,22 +907,22 @@ theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
* so no need to handle them ourselves. */
if (dec->offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
dec->offset_x--;
dec->width++;
width++;
}
if (dec->offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
dec->offset_y--;
dec->height++;
height++;
}
} else {
/* no cropping, use the encoded dimensions */
dec->width = dec->info.frame_width;
dec->height = dec->info.frame_height;
width = dec->info.frame_width;
height = dec->info.frame_height;
dec->offset_x = 0;
dec->offset_y = 0;
}
GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
dec->width, dec->height, dec->offset_x, dec->offset_y);
width, height, dec->offset_x, dec->offset_y);
/* done */
dec->decoder = th_decode_alloc (&dec->info, dec->setup);
@ -942,19 +944,20 @@ theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
}
caps = gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, gst_video_format_to_string (dec->format),
"framerate", GST_TYPE_FRACTION,
dec->info.fps_numerator, dec->info.fps_denominator,
"pixel-aspect-ratio", GST_TYPE_FRACTION, par_num, par_den,
"width", G_TYPE_INT, dec->width, "height", G_TYPE_INT, dec->height,
"color-matrix", G_TYPE_STRING, "sdtv",
"chroma-site", G_TYPE_STRING, "jpeg", NULL);
gst_video_info_set_format (&dec->vinfo, format, width, height);
dec->vinfo.fps_n = dec->info.fps_numerator;
dec->vinfo.fps_d = dec->info.fps_denominator;
dec->vinfo.par_n = par_num;
dec->vinfo.par_d = par_den;
dec->vinfo.chroma_site = "jpeg";
dec->vinfo.color_matrix = "sdtv";
caps = gst_video_info_to_caps (&dec->vinfo);
gst_pad_set_caps (dec->srcpad, caps);
gst_caps_unref (caps);
/* negotiate a bufferpool */
if ((ret = theora_negotiate_pool (dec, caps)) != GST_FLOW_OK)
if ((ret = theora_negotiate_pool (dec, caps, &dec->vinfo)) != GST_FLOW_OK)
goto no_bufferpool;
dec->have_header = TRUE;
@ -1105,14 +1108,13 @@ theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
GstFlowReturn result;
int i, plane;
guint8 *dest, *src;
gsize size;
guint8 *data;
GstVideoFrame frame;
if (gst_pad_check_reconfigure (dec->srcpad)) {
GstCaps *caps;
caps = gst_pad_get_current_caps (dec->srcpad);
theora_negotiate_pool (dec, caps);
theora_negotiate_pool (dec, caps, &dec->vinfo);
gst_caps_unref (caps);
}
@ -1120,24 +1122,27 @@ theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
if (G_UNLIKELY (result != GST_FLOW_OK))
goto no_buffer;
data = gst_buffer_map (*out, &size, NULL, GST_MAP_WRITE);
if (!gst_video_frame_map (&frame, &dec->vinfo, *out, GST_MAP_WRITE))
goto invalid_frame;
/* FIXME, we can do things slightly more efficient when we know that
* downstream understands clipping and video metadata */
for (plane = 0; plane < 3; plane++) {
width =
gst_video_format_get_component_width (dec->format, plane, dec->width);
gst_video_format_get_component_width (frame.info.format, plane,
dec->vinfo.width);
height =
gst_video_format_get_component_height (dec->format, plane, dec->height);
stride = gst_video_format_get_row_stride (dec->format, plane, dec->width);
gst_video_format_get_component_height (frame.info.format, plane,
dec->vinfo.height);
stride = frame.info.plane[plane].stride;
dest = frame.data[plane];
dest = data + gst_video_format_get_component_offset (dec->format,
plane, dec->width, dec->height);
src = buf[plane].data;
src += ((height == dec->height) ? dec->offset_y : dec->offset_y / 2)
src += ((height == dec->vinfo.height) ? dec->offset_y : dec->offset_y / 2)
* buf[plane].stride;
src += (width == dec->width) ? dec->offset_x : dec->offset_x / 2;
src += (width == dec->vinfo.width) ? dec->offset_x : dec->offset_x / 2;
for (i = 0; i < height; i++) {
memcpy (dest, src, width);
@ -1146,7 +1151,7 @@ theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
src += buf[plane].stride;
}
}
gst_buffer_unmap (*out, data, size);
gst_video_frame_unmap (&frame);
return GST_FLOW_OK;
@ -1157,6 +1162,11 @@ no_buffer:
gst_flow_get_name (result));
return result;
}
invalid_frame:
{
GST_DEBUG_OBJECT (dec, "could not map video frame");
return GST_FLOW_ERROR;
}
}
static GstFlowReturn

View file

@ -69,8 +69,8 @@ struct _GstTheoraDec
GstClockTime last_timestamp;
guint64 frame_nr;
gboolean need_keyframe;
GstVideoFormat format;
gint width, height;
GstVideoInfo vinfo;
gint offset_x, offset_y;
gint output_bpp;
GstBufferPool *pool;

View file

@ -61,7 +61,6 @@
#include <stdlib.h> /* free */
#include <gst/tag/tag.h>
#include <gst/video/video.h>
#define GST_CAT_DEFAULT theoraenc_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT);
@ -652,54 +651,50 @@ theora_enc_sink_getcaps (GstPad * pad, GstCaps * filter)
static gboolean
theora_enc_sink_setcaps (GstTheoraEnc * enc, GstCaps * caps)
{
GstStructure *structure = gst_caps_get_structure (caps, 0);
guint32 fourcc;
const GValue *par;
gint fps_n, fps_d;
gst_structure_get_fourcc (structure, "format", &fourcc);
gst_structure_get_int (structure, "width", &enc->width);
gst_structure_get_int (structure, "height", &enc->height);
gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
par = gst_structure_get_value (structure, "pixel-aspect-ratio");
GstVideoInfo info;
th_info_clear (&enc->info);
th_info_init (&enc->info);
if (!gst_video_info_from_caps (&info, caps))
goto invalid_caps;
enc->vinfo = info;
/* Theora has a divisible-by-sixteen restriction for the encoded video size but
* we can define a picture area using pic_width/pic_height */
enc->info.frame_width = GST_ROUND_UP_16 (enc->width);
enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
enc->info.pic_width = enc->width;
enc->info.pic_height = enc->height;
switch (fourcc) {
case GST_MAKE_FOURCC ('I', '4', '2', '0'):
enc->info.frame_width = GST_ROUND_UP_16 (info.width);
enc->info.frame_height = GST_ROUND_UP_16 (info.height);
enc->info.pic_width = info.width;
enc->info.pic_height = info.height;
switch (info.format) {
case GST_VIDEO_FORMAT_I420:
enc->info.pixel_fmt = TH_PF_420;
break;
case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
case GST_VIDEO_FORMAT_Y42B:
enc->info.pixel_fmt = TH_PF_422;
break;
case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
case GST_VIDEO_FORMAT_Y444:
enc->info.pixel_fmt = TH_PF_444;
break;
default:
g_assert_not_reached ();
}
enc->info.fps_numerator = enc->fps_n = fps_n;
enc->info.fps_denominator = enc->fps_d = fps_d;
if (par) {
enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
enc->par_n = gst_value_get_fraction_numerator (par);
enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
enc->par_d = gst_value_get_fraction_denominator (par);
} else {
/* setting them to 0 indicates that the decoder can chose a good aspect
* ratio, defaulting to 1/1 */
enc->info.aspect_numerator = 0;
enc->par_n = 1;
enc->info.aspect_denominator = 0;
enc->par_d = 1;
}
enc->info.fps_numerator = info.fps_n;
enc->info.fps_denominator = info.fps_d;
enc->info.aspect_numerator = info.par_n;
enc->info.aspect_denominator = info.par_d;
#if 0
/* setting them to 0 indicates that the decoder can chose a good aspect
* ratio, defaulting to 1/1 */
enc->info.aspect_numerator = 0;
enc->par_n = 1;
enc->info.aspect_denominator = 0;
enc->par_d = 1;
#endif
enc->info.colorspace = TH_CS_UNSPECIFIED;
@ -713,6 +708,13 @@ theora_enc_sink_setcaps (GstTheoraEnc * enc, GstCaps * caps)
enc->initialised = TRUE;
return TRUE;
/* ERRORS */
invalid_caps:
{
GST_DEBUG_OBJECT (enc, "could not parse caps");
return FALSE;
}
}
static guint64
@ -853,7 +855,8 @@ theora_enc_force_keyframe (GstTheoraEnc * enc)
theora_enc_reset (enc);
enc->granulepos_offset =
gst_util_uint64_scale (next_ts, enc->fps_n, GST_SECOND * enc->fps_d);
gst_util_uint64_scale (next_ts, enc->vinfo.fps_n,
GST_SECOND * enc->vinfo.fps_d);
enc->timestamp_offset = next_ts;
enc->next_ts = 0;
}
@ -992,7 +995,8 @@ theora_enc_is_discontinuous (GstTheoraEnc * enc, GstClockTime timestamp,
}
static void
theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info, guint8 * data)
theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info,
GstVideoFrame * frame)
{
GstVideoFormat format;
guint i;
@ -1024,11 +1028,8 @@ theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info, guint8 * data)
buf[i].height =
gst_video_format_get_component_height (format, i, info->frame_height);
buf[i].data =
data + gst_video_format_get_component_offset (format, i,
info->pic_width, info->pic_height);
buf[i].stride =
gst_video_format_get_row_stride (format, i, info->pic_width);
buf[i].data = frame->data[i];
buf[i].stride = frame->info.plane[i].stride;
}
}
@ -1143,17 +1144,16 @@ theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
GstFlowReturn ret;
th_ycbcr_buffer ycbcr;
gint res;
guint8 *data;
gsize size;
GstVideoFrame frame;
data = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
theora_enc_init_buffer (ycbcr, &enc->info, data);
gst_video_frame_map (&frame, &enc->vinfo, buffer, GST_MAP_READ);
theora_enc_init_buffer (ycbcr, &enc->info, &frame);
if (theora_enc_is_discontinuous (enc, running_time, duration)) {
theora_enc_reset (enc);
enc->granulepos_offset =
gst_util_uint64_scale (running_time, enc->fps_n,
GST_SECOND * enc->fps_d);
gst_util_uint64_scale (running_time, enc->vinfo.fps_n,
GST_SECOND * enc->vinfo.fps_d);
enc->timestamp_offset = running_time;
enc->next_ts = 0;
enc->next_discont = TRUE;
@ -1195,7 +1195,7 @@ theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
}
done:
gst_buffer_unmap (buffer, data, size);
gst_video_frame_unmap (&frame);
gst_buffer_unref (buffer);
return ret;
@ -1336,10 +1336,11 @@ theora_enc_chain (GstPad * pad, GstBuffer * buffer)
/* mark buffers and put on caps */
caps = gst_caps_new_simple ("video/x-theora",
"width", G_TYPE_INT, enc->width,
"height", G_TYPE_INT, enc->height,
"framerate", GST_TYPE_FRACTION, enc->fps_n, enc->fps_d,
"pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_n, enc->par_d, NULL);
"width", G_TYPE_INT, enc->vinfo.width,
"height", G_TYPE_INT, enc->vinfo.height,
"framerate", GST_TYPE_FRACTION, enc->vinfo.fps_n, enc->vinfo.fps_d,
"pixel-aspect-ratio", GST_TYPE_FRACTION, enc->vinfo.par_n,
enc->vinfo.par_d, NULL);
caps = theora_set_header_on_caps (caps, buffers);
GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
gst_pad_set_caps (enc->srcpad, caps);
@ -1357,8 +1358,8 @@ theora_enc_chain (GstPad * pad, GstBuffer * buffer)
}
enc->granulepos_offset =
gst_util_uint64_scale (running_time, enc->fps_n,
GST_SECOND * enc->fps_d);
gst_util_uint64_scale (running_time, enc->vinfo.fps_n,
GST_SECOND * enc->vinfo.fps_d);
enc->timestamp_offset = running_time;
enc->next_ts = 0;
}

View file

@ -24,6 +24,8 @@
#include <gst/base/gstadapter.h>
#include <theora/theoraenc.h>
#include <gst/video/video.h>
G_BEGIN_DECLS
#define GST_TYPE_THEORA_ENC \
@ -99,10 +101,8 @@ struct _GstTheoraEnc
gint keyframe_freq;
gint keyframe_force;
GstVideoInfo vinfo;
gint info_width, info_height;
gint width, height;
gint fps_n, fps_d;
gint par_n, par_d;
GstClockTime next_ts;
GstClockTime expected_ts;

View file

@ -169,6 +169,7 @@ gst_video_convert_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
GstVideoInfo out_info;
gboolean ret;
ColorSpaceColorSpec in_spec, out_spec;
gboolean interlaced;
space = GST_VIDEO_CONVERT_CAST (btrans);
@ -228,16 +229,17 @@ gst_video_convert_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
space->from_spec = in_spec;
space->to_info = out_info;
space->to_spec = out_spec;
space->width = in_info.width;
space->height = in_info.height;
space->interlaced = (in_info.flags & GST_VIDEO_FLAG_INTERLACED) != 0;
interlaced = (in_info.flags & GST_VIDEO_FLAG_INTERLACED) != 0;
space->convert =
videoconvert_convert_new (out_info.format, out_spec, in_info.format,
in_spec, in_info.width, in_info.height);
if (space->convert) {
videoconvert_convert_set_interlaced (space->convert, space->interlaced);
}
if (space->convert == NULL)
goto no_convert;
videoconvert_convert_set_interlaced (space->convert, interlaced);
/* palette, only for from data */
if (space->from_info.format == GST_VIDEO_FORMAT_RGB8_PALETTED &&
space->to_info.format == GST_VIDEO_FORMAT_RGB8_PALETTED) {
@ -291,6 +293,13 @@ format_mismatch:
space->to_info.format = GST_VIDEO_FORMAT_UNKNOWN;
return FALSE;
}
no_convert:
{
GST_ERROR_OBJECT (space, "could not create converter");
space->from_info.format = GST_VIDEO_FORMAT_UNKNOWN;
space->to_info.format = GST_VIDEO_FORMAT_UNKNOWN;
return FALSE;
}
invalid_palette:
{
GST_ERROR_OBJECT (space, "invalid palette");
@ -419,9 +428,7 @@ gst_video_convert_transform (GstBaseTransform * btrans, GstBuffer * inbuf,
GstBuffer * outbuf)
{
GstVideoConvert *space;
guint8 *indata, *outdata;
gsize insize, outsize;
gint i;
GstVideoFrame in_frame, out_frame;
space = GST_VIDEO_CONVERT_CAST (btrans);
@ -434,21 +441,16 @@ gst_video_convert_transform (GstBaseTransform * btrans, GstBuffer * inbuf,
videoconvert_convert_set_dither (space->convert, space->dither);
indata = gst_buffer_map (inbuf, &insize, NULL, GST_MAP_READ);
outdata = gst_buffer_map (outbuf, &outsize, NULL, GST_MAP_WRITE);
if (!gst_video_frame_map (&in_frame, &space->from_info, inbuf, GST_MAP_READ))
goto invalid_buffer;
for (i = 0; i < space->to_info.n_planes; i++) {
space->convert->dest_stride[i] = space->to_info.plane[i].stride;
space->convert->dest_offset[i] = space->to_info.plane[i].offset;
if (!gst_video_frame_map (&out_frame, &space->to_info, outbuf, GST_MAP_WRITE))
goto invalid_buffer;
space->convert->src_stride[i] = space->from_info.plane[i].stride;
space->convert->src_offset[i] = space->from_info.plane[i].offset;
}
videoconvert_convert_convert (space->convert, &out_frame, &in_frame);
videoconvert_convert_convert (space->convert, outdata, indata);
gst_buffer_unmap (outbuf, outdata, outsize);
gst_buffer_unmap (inbuf, indata, insize);
gst_video_frame_unmap (&out_frame);
gst_video_frame_unmap (&in_frame);
/* baseclass copies timestamps */
GST_DEBUG ("from %d -> to %d done", space->from_info.format,
@ -463,6 +465,12 @@ unknown_format:
("attempting to convert colorspaces between unknown formats"));
return GST_FLOW_NOT_NEGOTIATED;
}
invalid_buffer:
{
GST_ELEMENT_WARNING (space, CORE, NOT_IMPLEMENTED, (NULL),
("invalid video buffer received"));
return GST_FLOW_OK;
}
#if 0
not_supported:
{

View file

@ -47,9 +47,6 @@ typedef struct _GstVideoConvertClass GstVideoConvertClass;
struct _GstVideoConvert {
GstVideoFilter element;
gint width, height;
gboolean interlaced;
GstVideoInfo from_info;
GstVideoInfo to_info;

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,6 @@
G_BEGIN_DECLS
typedef struct _VideoConvert VideoConvert;
typedef struct _VideoFrame VideoComponent;
typedef enum {
COLOR_SPEC_NONE = 0,
@ -41,11 +40,6 @@ typedef enum {
DITHER_HALFTONE
} ColorSpaceDitherMethod;
struct _VideoComponent {
int offset;
int stride;
};
struct _VideoConvert {
gint width, height;
gboolean interlaced;
@ -62,34 +56,34 @@ struct _VideoConvert {
guint16 *tmpline16;
guint16 *errline;
int dest_offset[4];
int dest_stride[4];
int src_offset[4];
int src_stride[4];
void (*convert) (VideoConvert *convert, guint8 *dest, const guint8 *src);
void (*getline) (VideoConvert *convert, guint8 *dest, const guint8 *src, int j);
void (*putline) (VideoConvert *convert, guint8 *dest, const guint8 *src, int j);
void (*convert) (VideoConvert *convert, GstVideoFrame *dest, const GstVideoFrame *src);
void (*getline) (VideoConvert *convert, guint8 *dest, const GstVideoFrame *src, int j);
void (*putline) (VideoConvert *convert, GstVideoFrame *dest, const guint8 *src, int j);
void (*matrix) (VideoConvert *convert);
void (*getline16) (VideoConvert *convert, guint16 *dest, const guint8 *src, int j);
void (*putline16) (VideoConvert *convert, guint8 *dest, const guint16 *src, int j);
void (*getline16) (VideoConvert *convert, guint16 *dest, const GstVideoFrame *src, int j);
void (*putline16) (VideoConvert *convert, GstVideoFrame *dest, const guint16 *src, int j);
void (*matrix16) (VideoConvert *convert);
void (*dither16) (VideoConvert *convert, int j);
};
VideoConvert * videoconvert_convert_new (GstVideoFormat to_format,
ColorSpaceColorSpec from_spec, GstVideoFormat from_format,
ColorSpaceColorSpec to_spec, int width, int height);
void videoconvert_convert_set_dither (VideoConvert * convert, int type);
void videoconvert_convert_set_interlaced (VideoConvert *convert,
gboolean interlaced);
void videoconvert_convert_set_palette (VideoConvert *convert,
const guint32 *palette);
const guint32 * videoconvert_convert_get_palette (VideoConvert *convert);
void videoconvert_convert_free (VideoConvert * convert);
void videoconvert_convert_convert (VideoConvert * convert,
guint8 *dest, const guint8 *src);
VideoConvert * videoconvert_convert_new (GstVideoFormat to_format,
ColorSpaceColorSpec from_spec,
GstVideoFormat from_format,
ColorSpaceColorSpec to_spec,
int width, int height);
void videoconvert_convert_free (VideoConvert * convert);
void videoconvert_convert_set_dither (VideoConvert * convert, int type);
void videoconvert_convert_set_interlaced (VideoConvert *convert,
gboolean interlaced);
void videoconvert_convert_set_palette (VideoConvert *convert,
const guint32 *palette);
const guint32 * videoconvert_convert_get_palette (VideoConvert *convert);
void videoconvert_convert_convert (VideoConvert * convert,
GstVideoFrame *dest, const GstVideoFrame *src);
G_END_DECLS

View file

@ -351,36 +351,23 @@ gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
{
GstVideoScale *videoscale = GST_VIDEO_SCALE (trans);
gboolean ret;
GstVideoInfo in_info, out_info;
gint from_dar_n, from_dar_d, to_dar_n, to_dar_d;
gint from_par_n, from_par_d, to_par_n, to_par_d;
ret =
gst_video_format_parse_caps (in, &videoscale->format,
&videoscale->from_width, &videoscale->from_height);
ret &=
gst_video_format_parse_caps (out, NULL, &videoscale->to_width,
&videoscale->to_height);
ret = gst_video_info_from_caps (&in_info, in);
ret &= gst_video_info_from_caps (&out_info, out);
if (!ret)
goto done;
goto invalid_formats;
videoscale->src_size = gst_video_format_get_size (videoscale->format,
videoscale->from_width, videoscale->from_height);
videoscale->dest_size = gst_video_format_get_size (videoscale->format,
videoscale->to_width, videoscale->to_height);
if (!gst_video_parse_caps_pixel_aspect_ratio (in, &from_par_n, &from_par_d))
from_par_n = from_par_d = 1;
if (!gst_video_parse_caps_pixel_aspect_ratio (out, &to_par_n, &to_par_d))
to_par_n = to_par_d = 1;
if (!gst_util_fraction_multiply (videoscale->from_width,
videoscale->from_height, from_par_n, from_par_d, &from_dar_n,
if (!gst_util_fraction_multiply (in_info.width,
in_info.height, out_info.par_n, out_info.par_d, &from_dar_n,
&from_dar_d)) {
from_dar_n = from_dar_d = -1;
}
if (!gst_util_fraction_multiply (videoscale->to_width,
videoscale->to_height, to_par_n, to_par_d, &to_dar_n, &to_dar_d)) {
if (!gst_util_fraction_multiply (out_info.width,
out_info.height, out_info.par_n, out_info.par_d, &to_dar_n,
&to_dar_d)) {
to_dar_n = to_dar_d = -1;
}
@ -390,17 +377,17 @@ gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
gint n, d, to_h, to_w;
if (from_dar_n != -1 && from_dar_d != -1
&& gst_util_fraction_multiply (from_dar_n, from_dar_d, to_par_n,
to_par_d, &n, &d)) {
to_h = gst_util_uint64_scale_int (videoscale->to_width, d, n);
if (to_h <= videoscale->to_height) {
videoscale->borders_h = videoscale->to_height - to_h;
&& gst_util_fraction_multiply (from_dar_n, from_dar_d, out_info.par_n,
out_info.par_d, &n, &d)) {
to_h = gst_util_uint64_scale_int (out_info.width, d, n);
if (to_h <= out_info.height) {
videoscale->borders_h = out_info.height - to_h;
videoscale->borders_w = 0;
} else {
to_w = gst_util_uint64_scale_int (videoscale->to_height, n, d);
g_assert (to_w <= videoscale->to_width);
to_w = gst_util_uint64_scale_int (out_info.height, n, d);
g_assert (to_w <= out_info.width);
videoscale->borders_h = 0;
videoscale->borders_w = videoscale->to_width - to_w;
videoscale->borders_w = out_info.width - to_w;
}
} else {
GST_WARNING_OBJECT (videoscale, "Can't calculate borders");
@ -412,34 +399,41 @@ gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
if (videoscale->tmp_buf)
g_free (videoscale->tmp_buf);
videoscale->tmp_buf = g_malloc (videoscale->to_width * 8 * 4);
videoscale->tmp_buf = g_malloc (out_info.width * 8 * 4);
gst_base_transform_set_passthrough (trans,
(videoscale->from_width == videoscale->to_width
&& videoscale->from_height == videoscale->to_height));
(in_info.width == out_info.width && in_info.height == out_info.height));
GST_DEBUG_OBJECT (videoscale, "from=%dx%d (par=%d/%d dar=%d/%d), size %d "
"-> to=%dx%d (par=%d/%d dar=%d/%d borders=%d:%d), size %d",
videoscale->from_width, videoscale->from_height, from_par_n, from_par_d,
from_dar_n, from_dar_d, videoscale->src_size, videoscale->to_width,
videoscale->to_height, to_par_n, to_par_d, to_dar_n, to_dar_d,
videoscale->borders_w, videoscale->borders_h, videoscale->dest_size);
in_info.width, in_info.height, out_info.par_n, out_info.par_d,
from_dar_n, from_dar_d, in_info.size, out_info.width,
out_info.height, out_info.par_n, out_info.par_d, to_dar_n, to_dar_d,
videoscale->borders_w, videoscale->borders_h, out_info.size);
done:
return ret;
videoscale->from_info = in_info;
videoscale->to_info = out_info;
return TRUE;
/* ERRORS */
invalid_formats:
{
GST_DEBUG_OBJECT (videoscale, "could not parse formats");
return FALSE;
}
}
static gboolean
gst_video_scale_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
gsize * size)
{
GstVideoFormat format;
gint width, height;
GstVideoInfo info;
if (!gst_video_format_parse_caps (caps, &format, &width, &height))
if (!gst_video_info_from_caps (&info, caps))
return FALSE;
*size = gst_video_format_get_size (format, width, height);
*size = info.size;
return TRUE;
}
@ -879,9 +873,16 @@ done:
}
static void
gst_video_scale_setup_vs_image (VSImage * image, GstVideoFormat format,
gint component, gint width, gint height, gint b_w, gint b_h, uint8_t * data)
gst_video_scale_setup_vs_image (VSImage * image, GstVideoFrame * frame,
gint component, gint b_w, gint b_h)
{
GstVideoFormat format;
gint width, height;
format = frame->info.format;
width = frame->info.width;
height = frame->info.height;
image->real_width =
gst_video_format_get_component_width (format, component, width);
image->real_height =
@ -892,7 +893,6 @@ gst_video_scale_setup_vs_image (VSImage * image, GstVideoFormat format,
image->height =
gst_video_format_get_component_height (format, component, MAX (1,
height - b_h));
image->stride = gst_video_format_get_row_stride (format, component, width);
image->border_top = (image->real_height - image->height) / 2;
image->border_bottom = image->real_height - image->height - image->border_top;
@ -911,16 +911,8 @@ gst_video_scale_setup_vs_image (VSImage * image, GstVideoFormat format,
image->border_right = image->real_width - image->width - image->border_left;
}
if (format == GST_VIDEO_FORMAT_I420
|| format == GST_VIDEO_FORMAT_YV12
|| format == GST_VIDEO_FORMAT_Y444
|| format == GST_VIDEO_FORMAT_Y42B || format == GST_VIDEO_FORMAT_Y41B) {
image->real_pixels = data + gst_video_format_get_component_offset (format,
component, width, height);
} else {
g_assert (component == 0);
image->real_pixels = data;
}
image->real_pixels = frame->data[component];
image->stride = frame->info.plane[component].stride;
image->pixels =
image->real_pixels + image->border_top * image->stride +
@ -995,58 +987,41 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
{
GstVideoScale *videoscale = GST_VIDEO_SCALE (trans);
GstFlowReturn ret = GST_FLOW_OK;
VSImage dest = { NULL, };
VSImage src = { NULL, };
VSImage dest_u = { NULL, };
VSImage dest_v = { NULL, };
VSImage src_u = { NULL, };
VSImage src_v = { NULL, };
GstVideoFrame in_frame, out_frame;
VSImage dest[4] = { {NULL,}, };
VSImage src[4] = { {NULL,}, };
gint method;
const guint8 *black = _get_black_for_format (videoscale->format);
const guint8 *black;
gboolean add_borders;
guint8 *in_data, *out_data;
gsize in_size, out_size;
GstVideoFormat format;
gint i;
GST_OBJECT_LOCK (videoscale);
method = videoscale->method;
add_borders = videoscale->add_borders;
GST_OBJECT_UNLOCK (videoscale);
if (videoscale->from_width == 1) {
format = videoscale->from_info.format;
black = _get_black_for_format (format);
if (videoscale->from_info.width == 1) {
method = GST_VIDEO_SCALE_NEAREST;
}
if (method == GST_VIDEO_SCALE_4TAP &&
(videoscale->from_width < 4 || videoscale->from_height < 4)) {
(videoscale->from_info.width < 4 || videoscale->from_info.height < 4)) {
method = GST_VIDEO_SCALE_BILINEAR;
}
in_data = gst_buffer_map (in, &in_size, NULL, GST_MAP_READ);
out_data = gst_buffer_map (out, &out_size, NULL, GST_MAP_WRITE);
gst_video_frame_map (&in_frame, &videoscale->from_info, in, GST_MAP_READ);
gst_video_frame_map (&out_frame, &videoscale->to_info, out, GST_MAP_WRITE);
gst_video_scale_setup_vs_image (&src, videoscale->format, 0,
videoscale->from_width, videoscale->from_height, 0, 0, in_data);
gst_video_scale_setup_vs_image (&dest, videoscale->format, 0,
videoscale->to_width, videoscale->to_height, videoscale->borders_w,
videoscale->borders_h, out_data);
if (videoscale->format == GST_VIDEO_FORMAT_I420
|| videoscale->format == GST_VIDEO_FORMAT_YV12
|| videoscale->format == GST_VIDEO_FORMAT_Y444
|| videoscale->format == GST_VIDEO_FORMAT_Y42B
|| videoscale->format == GST_VIDEO_FORMAT_Y41B) {
gst_video_scale_setup_vs_image (&src_u, videoscale->format, 1,
videoscale->from_width, videoscale->from_height, 0, 0, in_data);
gst_video_scale_setup_vs_image (&src_v, videoscale->format, 2,
videoscale->from_width, videoscale->from_height, 0, 0, in_data);
gst_video_scale_setup_vs_image (&dest_u, videoscale->format, 1,
videoscale->to_width, videoscale->to_height, videoscale->borders_w,
videoscale->borders_h, out_data);
gst_video_scale_setup_vs_image (&dest_v, videoscale->format, 2,
videoscale->to_width, videoscale->to_height, videoscale->borders_w,
videoscale->borders_h, out_data);
for (i = 0; i < in_frame.info.n_planes; i++) {
gst_video_scale_setup_vs_image (&src[i], &in_frame, i, 0, 0);
gst_video_scale_setup_vs_image (&dest[i], &out_frame, i,
videoscale->borders_w, videoscale->borders_h);
}
switch (videoscale->format) {
switch (format) {
case GST_VIDEO_FORMAT_RGBx:
case GST_VIDEO_FORMAT_xRGB:
case GST_VIDEO_FORMAT_BGRx:
@ -1057,16 +1032,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_ABGR:
case GST_VIDEO_FORMAT_AYUV:
if (add_borders)
vs_fill_borders_RGBA (&dest, black);
vs_fill_borders_RGBA (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_RGBA (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_RGBA (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_RGBA (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_RGBA (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_RGBA (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_RGBA (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1075,16 +1050,17 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_ARGB64:
case GST_VIDEO_FORMAT_AYUV64:
if (add_borders)
vs_fill_borders_AYUV64 (&dest, black);
vs_fill_borders_AYUV64 (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_AYUV64 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_AYUV64 (&dest[0], &src[0],
videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_AYUV64 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_AYUV64 (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_AYUV64 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_AYUV64 (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1094,16 +1070,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_BGR:
case GST_VIDEO_FORMAT_v308:
if (add_borders)
vs_fill_borders_RGB (&dest, black);
vs_fill_borders_RGB (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_RGB (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_RGB (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_RGB (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_RGB (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_RGB (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_RGB (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1112,16 +1088,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_YUY2:
case GST_VIDEO_FORMAT_YVYU:
if (add_borders)
vs_fill_borders_YUYV (&dest, black);
vs_fill_borders_YUYV (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_YUYV (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_YUYV (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_YUYV (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_YUYV (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_YUYV (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_YUYV (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1129,16 +1105,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
break;
case GST_VIDEO_FORMAT_UYVY:
if (add_borders)
vs_fill_borders_UYVY (&dest, black);
vs_fill_borders_UYVY (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_UYVY (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_UYVY (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_UYVY (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_UYVY (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_UYVY (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_UYVY (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1147,16 +1123,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_Y800:
case GST_VIDEO_FORMAT_GRAY8:
if (add_borders)
vs_fill_borders_Y (&dest, black);
vs_fill_borders_Y (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1166,16 +1142,16 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_GRAY16_BE:
case GST_VIDEO_FORMAT_Y16:
if (add_borders)
vs_fill_borders_Y16 (&dest, 0);
vs_fill_borders_Y16 (&dest[0], 0);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_Y16 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_Y16 (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_Y16 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_Y16 (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_Y16 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_Y16 (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1187,25 +1163,25 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
case GST_VIDEO_FORMAT_Y42B:
case GST_VIDEO_FORMAT_Y41B:
if (add_borders) {
vs_fill_borders_Y (&dest, black);
vs_fill_borders_Y (&dest_u, black + 1);
vs_fill_borders_Y (&dest_v, black + 2);
vs_fill_borders_Y (&dest[0], black);
vs_fill_borders_Y (&dest[1], black + 1);
vs_fill_borders_Y (&dest[2], black + 2);
}
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest_u, &src_u, videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest_v, &src_v, videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest[0], &src[0], videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest[1], &src[1], videoscale->tmp_buf);
vs_image_scale_nearest_Y (&dest[2], &src[2], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest_u, &src_u, videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest_v, &src_v, videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest[0], &src[0], videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest[1], &src[1], videoscale->tmp_buf);
vs_image_scale_linear_Y (&dest[2], &src[2], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_Y (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest_u, &src_u, videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest_v, &src_v, videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest[0], &src[0], videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest[1], &src[1], videoscale->tmp_buf);
vs_image_scale_4tap_Y (&dest[2], &src[2], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1213,16 +1189,17 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
break;
case GST_VIDEO_FORMAT_RGB16:
if (add_borders)
vs_fill_borders_RGB565 (&dest, black);
vs_fill_borders_RGB565 (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_RGB565 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_RGB565 (&dest[0], &src[0],
videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_RGB565 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_RGB565 (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_RGB565 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_RGB565 (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1230,16 +1207,17 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
break;
case GST_VIDEO_FORMAT_RGB15:
if (add_borders)
vs_fill_borders_RGB555 (&dest, black);
vs_fill_borders_RGB555 (&dest[0], black);
switch (method) {
case GST_VIDEO_SCALE_NEAREST:
vs_image_scale_nearest_RGB555 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_nearest_RGB555 (&dest[0], &src[0],
videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_BILINEAR:
vs_image_scale_linear_RGB555 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_linear_RGB555 (&dest[0], &src[0], videoscale->tmp_buf);
break;
case GST_VIDEO_SCALE_4TAP:
vs_image_scale_4tap_RGB555 (&dest, &src, videoscale->tmp_buf);
vs_image_scale_4tap_RGB555 (&dest[0], &src[0], videoscale->tmp_buf);
break;
default:
goto unknown_mode;
@ -1253,8 +1231,8 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
gst_buffer_get_size (out));
done:
gst_buffer_unmap (in, in_data, in_size);
gst_buffer_unmap (out, out_data, out_size);
gst_video_frame_unmap (&out_frame);
gst_video_frame_unmap (&in_frame);
return ret;
@ -1262,8 +1240,7 @@ done:
unsupported:
{
GST_ELEMENT_ERROR (videoscale, STREAM, NOT_IMPLEMENTED, (NULL),
("Unsupported format %d for scaling method %d",
videoscale->format, method));
("Unsupported format %d for scaling method %d", format, method));
ret = GST_FLOW_ERROR;
goto done;
}
@ -1295,11 +1272,12 @@ gst_video_scale_src_event (GstBaseTransform * trans, GstEvent * event)
structure = (GstStructure *) gst_event_get_structure (event);
if (gst_structure_get_double (structure, "pointer_x", &a)) {
gst_structure_set (structure, "pointer_x", G_TYPE_DOUBLE,
a * videoscale->from_width / videoscale->to_width, NULL);
a * videoscale->from_info.width / videoscale->to_info.width, NULL);
}
if (gst_structure_get_double (structure, "pointer_y", &a)) {
gst_structure_set (structure, "pointer_y", G_TYPE_DOUBLE,
a * videoscale->from_height / videoscale->to_height, NULL);
a * videoscale->from_info.height / videoscale->to_info.height,
NULL);
}
break;
default:

View file

@ -71,13 +71,8 @@ struct _GstVideoScale {
gboolean add_borders;
/* negotiated stuff */
GstVideoFormat format;
gint to_width;
gint to_height;
gint from_width;
gint from_height;
guint src_size;
guint dest_size;
GstVideoInfo from_info;
GstVideoInfo to_info;
gint borders_h;
gint borders_w;

View file

@ -1059,11 +1059,9 @@ gst_ximagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
{
GstXImageSink *ximagesink;
GstStructure *structure;
GstVideoInfo info;
GstBufferPool *newpool, *oldpool;
gboolean ret = TRUE;
const GValue *par;
gint new_width, new_height;
const GValue *fps;
gint size;
ximagesink = GST_XIMAGESINK (bsink);
@ -1079,19 +1077,12 @@ gst_ximagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
if (!gst_caps_can_intersect (ximagesink->xcontext->caps, caps))
goto incompatible_caps;
if (!gst_video_info_from_caps (&info, caps))
goto invalid_format;
size = info.size;
structure = gst_caps_get_structure (caps, 0);
ret &= gst_structure_get_int (structure, "width", &new_width);
ret &= gst_structure_get_int (structure, "height", &new_height);
fps = gst_structure_get_value (structure, "framerate");
ret &= (fps != NULL);
if (!ret)
return FALSE;
if (!gst_video_get_size_from_caps (caps, &size))
return FALSE;
/* if the caps contain pixel-aspect-ratio, they have to match ours,
* otherwise linking should fail */
par = gst_structure_get_value (structure, "pixel-aspect-ratio");
@ -1107,10 +1098,10 @@ gst_ximagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
}
}
GST_VIDEO_SINK_WIDTH (ximagesink) = new_width;
GST_VIDEO_SINK_HEIGHT (ximagesink) = new_height;
ximagesink->fps_n = gst_value_get_fraction_numerator (fps);
ximagesink->fps_d = gst_value_get_fraction_denominator (fps);
GST_VIDEO_SINK_WIDTH (ximagesink) = info.width;
GST_VIDEO_SINK_HEIGHT (ximagesink) = info.height;
ximagesink->fps_n = info.fps_n;
ximagesink->fps_d = info.fps_d;
/* Notify application to set xwindow id now */
g_mutex_lock (ximagesink->flow_lock);
@ -1164,6 +1155,11 @@ incompatible_caps:
GST_ERROR_OBJECT (ximagesink, "caps incompatible");
return FALSE;
}
invalid_format:
{
GST_ERROR_OBJECT (ximagesink, "caps invalid");
return FALSE;
}
wrong_aspect:
{
GST_INFO_OBJECT (ximagesink, "pixel aspect ratio does not match");
@ -1446,17 +1442,16 @@ gst_ximagesink_sink_query (GstPad * sinkpad, GstQuery * query)
}
}
if (pool == NULL && need_pool) {
GstVideoFormat format;
gint width, height;
GstVideoInfo info;
GST_DEBUG_OBJECT (ximagesink, "create new pool");
pool = gst_ximage_buffer_pool_new (ximagesink);
if (!gst_video_format_parse_caps (caps, &format, &width, &height))
if (!gst_video_info_from_caps (&info, caps))
goto invalid_caps;
/* the normal size of a frame */
size = gst_video_format_get_size (format, width, height);
size = info.size;
config = gst_buffer_pool_get_config (pool);
gst_buffer_pool_config_set (config, caps, size, 0, 0, 0, 15);

View file

@ -1530,16 +1530,13 @@ gst_xvimagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
GstXvImageSink *xvimagesink;
GstStructure *structure;
GstBufferPool *newpool, *oldpool;
gboolean ret;
GstVideoInfo info;
guint32 im_format = 0;
gint video_width, video_height;
gint disp_x, disp_y;
gint disp_width, disp_height;
gint video_par_n, video_par_d; /* video's PAR */
gint display_par_n, display_par_d; /* display's PAR */
const GValue *caps_par;
const GValue *caps_disp_reg;
const GValue *fps;
guint num, den;
gint size;
@ -1552,41 +1549,31 @@ gst_xvimagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
if (!gst_caps_can_intersect (xvimagesink->xcontext->caps, caps))
goto incompatible_caps;
if (!gst_video_info_from_caps (&info, caps))
goto invalid_format;
structure = gst_caps_get_structure (caps, 0);
ret = gst_structure_get_int (structure, "width", &video_width);
ret &= gst_structure_get_int (structure, "height", &video_height);
fps = gst_structure_get_value (structure, "framerate");
ret &= (fps != NULL);
if (!ret)
goto incomplete_caps;
xvimagesink->fps_n = info.fps_n;
xvimagesink->fps_d = info.fps_d;
xvimagesink->fps_n = gst_value_get_fraction_numerator (fps);
xvimagesink->fps_d = gst_value_get_fraction_denominator (fps);
xvimagesink->video_width = video_width;
xvimagesink->video_height = video_height;
xvimagesink->video_width = info.width;
xvimagesink->video_height = info.height;
im_format = gst_xvimagesink_get_format_from_caps (xvimagesink, caps);
if (im_format == -1)
goto invalid_format;
if (!gst_video_get_size_from_caps (caps, &size))
goto invalid_format;
size = info.size;
/* get aspect ratio from caps if it's present, and
* convert video width and height to a display width and height
* using wd / hd = wv / hv * PARv / PARd */
/* get video's PAR */
caps_par = gst_structure_get_value (structure, "pixel-aspect-ratio");
if (caps_par) {
video_par_n = gst_value_get_fraction_numerator (caps_par);
video_par_d = gst_value_get_fraction_denominator (caps_par);
} else {
video_par_n = 1;
video_par_d = 1;
}
video_par_n = info.par_n;
video_par_d = info.par_d;
/* get display's PAR */
if (xvimagesink->par) {
display_par_n = gst_value_get_fraction_numerator (xvimagesink->par);
@ -1606,12 +1593,12 @@ gst_xvimagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
g_value_get_int (gst_value_array_get_value (caps_disp_reg, 3));
} else {
disp_x = disp_y = 0;
disp_width = video_width;
disp_height = video_height;
disp_width = info.width;
disp_height = info.height;
}
if (!gst_video_calculate_display_ratio (&num, &den, video_width,
video_height, video_par_n, video_par_d, display_par_n, display_par_d))
if (!gst_video_calculate_display_ratio (&num, &den, info.width,
info.height, video_par_n, video_par_d, display_par_n, display_par_d))
goto no_disp_ratio;
xvimagesink->disp_x = disp_x;
@ -1621,7 +1608,7 @@ gst_xvimagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
GST_DEBUG_OBJECT (xvimagesink,
"video width/height: %dx%d, calculated display ratio: %d/%d",
video_width, video_height, num, den);
info.width, info.height, num, den);
/* now find a width x height that respects this display ratio.
* prefer those that have one of w/h the same as the incoming video
@ -1629,21 +1616,21 @@ gst_xvimagesink_setcaps (GstBaseSink * bsink, GstCaps * caps)
/* start with same height, because of interlaced video */
/* check hd / den is an integer scale factor, and scale wd with the PAR */
if (video_height % den == 0) {
if (info.height % den == 0) {
GST_DEBUG_OBJECT (xvimagesink, "keeping video height");
GST_VIDEO_SINK_WIDTH (xvimagesink) = (guint)
gst_util_uint64_scale_int (video_height, num, den);
GST_VIDEO_SINK_HEIGHT (xvimagesink) = video_height;
} else if (video_width % num == 0) {
gst_util_uint64_scale_int (info.height, num, den);
GST_VIDEO_SINK_HEIGHT (xvimagesink) = info.height;
} else if (info.width % num == 0) {
GST_DEBUG_OBJECT (xvimagesink, "keeping video width");
GST_VIDEO_SINK_WIDTH (xvimagesink) = video_width;
GST_VIDEO_SINK_WIDTH (xvimagesink) = info.width;
GST_VIDEO_SINK_HEIGHT (xvimagesink) = (guint)
gst_util_uint64_scale_int (video_width, den, num);
gst_util_uint64_scale_int (info.width, den, num);
} else {
GST_DEBUG_OBJECT (xvimagesink, "approximating while keeping video height");
GST_VIDEO_SINK_WIDTH (xvimagesink) = (guint)
gst_util_uint64_scale_int (video_height, num, den);
GST_VIDEO_SINK_HEIGHT (xvimagesink) = video_height;
gst_util_uint64_scale_int (info.height, num, den);
GST_VIDEO_SINK_HEIGHT (xvimagesink) = info.height;
}
GST_DEBUG_OBJECT (xvimagesink, "scaling to %dx%d",
GST_VIDEO_SINK_WIDTH (xvimagesink), GST_VIDEO_SINK_HEIGHT (xvimagesink));
@ -1703,12 +1690,6 @@ incompatible_caps:
GST_ERROR_OBJECT (xvimagesink, "caps incompatible");
return FALSE;
}
incomplete_caps:
{
GST_DEBUG_OBJECT (xvimagesink, "Failed to retrieve either width, "
"height or framerate from intersected caps");
return FALSE;
}
invalid_format:
{
GST_DEBUG_OBJECT (xvimagesink,
@ -1999,17 +1980,16 @@ gst_xvimagesink_sink_query (GstPad * sinkpad, GstQuery * query)
}
}
if (pool == NULL && need_pool) {
GstVideoFormat format;
gint width, height;
GstVideoInfo info;
GST_DEBUG_OBJECT (xvimagesink, "create new pool");
pool = gst_xvimage_buffer_pool_new (xvimagesink);
if (!gst_video_format_parse_caps (caps, &format, &width, &height))
if (!gst_video_info_from_caps (&info, caps))
goto invalid_caps;
/* the normal size of a frame */
size = gst_video_format_get_size (format, width, height);
size = info.size;
config = gst_buffer_pool_get_config (pool);
gst_buffer_pool_config_set (config, caps, size, 0, 0, 0, 15);