mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-12-18 14:26:43 +00:00
videoscale: Add 16-bit-channel support
This commit is contained in:
parent
99d8339289
commit
e1149f52c1
12 changed files with 505 additions and 8 deletions
|
@ -129,7 +129,9 @@ static GstStaticCaps gst_video_scale_format_caps[] = {
|
|||
GST_STATIC_CAPS (GST_VIDEO_CAPS_GRAY8),
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("Y800")),
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("Y8 ")),
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("GREY"))
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("GREY")),
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("AY64")),
|
||||
GST_STATIC_CAPS (GST_VIDEO_CAPS_ARGB_64)
|
||||
};
|
||||
|
||||
#define GST_TYPE_VIDEO_SCALE_METHOD (gst_video_scale_method_get_type())
|
||||
|
@ -422,9 +424,7 @@ gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
|
|||
|
||||
if (videoscale->tmp_buf)
|
||||
g_free (videoscale->tmp_buf);
|
||||
videoscale->tmp_buf =
|
||||
g_malloc (gst_video_format_get_row_stride (videoscale->format, 0,
|
||||
videoscale->to_width) * 4);
|
||||
videoscale->tmp_buf = g_malloc (videoscale->to_width * 8 * 4);
|
||||
|
||||
gst_base_transform_set_passthrough (trans,
|
||||
(videoscale->from_width == videoscale->to_width
|
||||
|
@ -960,6 +960,7 @@ _get_black_for_format (GstVideoFormat format)
|
|||
case GST_VIDEO_FORMAT_ABGR:
|
||||
case GST_VIDEO_FORMAT_xRGB:
|
||||
case GST_VIDEO_FORMAT_xBGR:
|
||||
case GST_VIDEO_FORMAT_ARGB64:
|
||||
return black[0];
|
||||
case GST_VIDEO_FORMAT_RGBA:
|
||||
case GST_VIDEO_FORMAT_BGRA:
|
||||
|
@ -967,6 +968,7 @@ _get_black_for_format (GstVideoFormat format)
|
|||
case GST_VIDEO_FORMAT_BGRx:
|
||||
return black[1];
|
||||
case GST_VIDEO_FORMAT_AYUV:
|
||||
case GST_VIDEO_FORMAT_AYUV64:
|
||||
return black[2];
|
||||
case GST_VIDEO_FORMAT_RGB:
|
||||
case GST_VIDEO_FORMAT_BGR:
|
||||
|
@ -1072,6 +1074,24 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in,
|
|||
goto unknown_mode;
|
||||
}
|
||||
break;
|
||||
case GST_VIDEO_FORMAT_ARGB64:
|
||||
case GST_VIDEO_FORMAT_AYUV64:
|
||||
if (add_borders)
|
||||
vs_fill_borders_AYUV64 (&dest, black);
|
||||
switch (method) {
|
||||
case GST_VIDEO_SCALE_NEAREST:
|
||||
vs_image_scale_nearest_AYUV64 (&dest, &src, videoscale->tmp_buf);
|
||||
break;
|
||||
case GST_VIDEO_SCALE_BILINEAR:
|
||||
vs_image_scale_linear_AYUV64 (&dest, &src, videoscale->tmp_buf);
|
||||
break;
|
||||
case GST_VIDEO_SCALE_4TAP:
|
||||
vs_image_scale_4tap_AYUV64 (&dest, &src, videoscale->tmp_buf);
|
||||
break;
|
||||
default:
|
||||
goto unknown_mode;
|
||||
}
|
||||
break;
|
||||
case GST_VIDEO_FORMAT_RGB:
|
||||
case GST_VIDEO_FORMAT_BGR:
|
||||
case GST_VIDEO_FORMAT_v308:
|
||||
|
|
|
@ -32,6 +32,7 @@ typedef unsigned __int16 orc_uint16;
|
|||
typedef unsigned __int32 orc_uint32;
|
||||
typedef unsigned __int64 orc_uint64;
|
||||
#define ORC_UINT64_C(x) (x##Ui64)
|
||||
#define inline __inline
|
||||
#else
|
||||
#include <limits.h>
|
||||
typedef signed char orc_int8;
|
||||
|
@ -78,6 +79,7 @@ void orc_merge_linear_u16 (orc_uint16 * d1, const orc_uint16 * s1,
|
|||
const orc_uint16 * s2, int p1, int p2, int n);
|
||||
void orc_splat_u16 (orc_uint16 * d1, int p1, int n);
|
||||
void orc_splat_u32 (orc_uint32 * d1, int p1, int n);
|
||||
void orc_splat_u64 (orc_uint64 * d1, orc_int64 p1, int n);
|
||||
void orc_downsample_u8 (guint8 * d1, const guint8 * s1, int n);
|
||||
void orc_downsample_u16 (guint16 * d1, const guint16 * s1, int n);
|
||||
void gst_videoscale_orc_downsample_u32 (guint8 * d1, const guint8 * s1, int n);
|
||||
|
@ -171,7 +173,7 @@ orc_merge_linear_u8 (orc_uint8 * d1, const orc_uint8 * s1, const orc_uint8 * s2,
|
|||
/* 6: loadpw */
|
||||
var38.i = p1;
|
||||
/* 8: loadpw */
|
||||
var39.i = 0x00000080; /* 128 or 6.32404e-322f */
|
||||
var39.i = (int) 0x00000080; /* 128 or 6.32404e-322f */
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 0: loadb */
|
||||
|
@ -229,7 +231,7 @@ _backup_orc_merge_linear_u8 (OrcExecutor * ORC_RESTRICT ex)
|
|||
/* 6: loadpw */
|
||||
var38.i = ex->params[24];
|
||||
/* 8: loadpw */
|
||||
var39.i = 0x00000080; /* 128 or 6.32404e-322f */
|
||||
var39.i = (int) 0x00000080; /* 128 or 6.32404e-322f */
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 0: loadb */
|
||||
|
@ -540,6 +542,81 @@ orc_splat_u32 (orc_uint32 * d1, int p1, int n)
|
|||
#endif
|
||||
|
||||
|
||||
/* orc_splat_u64 */
|
||||
#ifdef DISABLE_ORC
|
||||
void
|
||||
orc_splat_u64 (orc_uint64 * d1, orc_int64 p1, int n)
|
||||
{
|
||||
int i;
|
||||
orc_union64 *ORC_RESTRICT ptr0;
|
||||
orc_union64 var32;
|
||||
orc_union64 var33;
|
||||
|
||||
ptr0 = (orc_union64 *) d1;
|
||||
|
||||
/* 0: loadpq */
|
||||
var32.i = p1;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 1: copyq */
|
||||
var33.i = var32.i;
|
||||
/* 2: storeq */
|
||||
ptr0[i] = var33;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
static void
|
||||
_backup_orc_splat_u64 (OrcExecutor * ORC_RESTRICT ex)
|
||||
{
|
||||
int i;
|
||||
int n = ex->n;
|
||||
orc_union64 *ORC_RESTRICT ptr0;
|
||||
orc_union64 var32;
|
||||
orc_union64 var33;
|
||||
|
||||
ptr0 = (orc_union64 *) ex->arrays[0];
|
||||
|
||||
/* 0: loadpq */
|
||||
var32.i =
|
||||
(ex->params[24] & 0xffffffff) | ((orc_uint64) (ex->params[24 +
|
||||
(ORC_VAR_T1 - ORC_VAR_P1)]) << 32);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 1: copyq */
|
||||
var33.i = var32.i;
|
||||
/* 2: storeq */
|
||||
ptr0[i] = var33;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static OrcProgram *_orc_program_orc_splat_u64;
|
||||
void
|
||||
orc_splat_u64 (orc_uint64 * d1, orc_int64 p1, int n)
|
||||
{
|
||||
OrcExecutor _ex, *ex = &_ex;
|
||||
OrcProgram *p = _orc_program_orc_splat_u64;
|
||||
void (*func) (OrcExecutor *);
|
||||
|
||||
ex->program = p;
|
||||
|
||||
ex->n = n;
|
||||
ex->arrays[ORC_VAR_D1] = d1;
|
||||
{
|
||||
orc_union64 tmp;
|
||||
tmp.i = p1;
|
||||
ex->params[ORC_VAR_P1] = tmp.x2[0];
|
||||
ex->params[ORC_VAR_T1] = tmp.x2[1];
|
||||
}
|
||||
|
||||
func = p->code_exec;
|
||||
func (ex);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* orc_downsample_u8 */
|
||||
#ifdef DISABLE_ORC
|
||||
void
|
||||
|
@ -1510,7 +1587,7 @@ gst_videoscale_orc_merge_bicubic_u8 (guint8 * d1, const guint8 * s1,
|
|||
/* 12: loadpb */
|
||||
var41 = p4;
|
||||
/* 15: loadpw */
|
||||
var42.i = 0x00000020; /* 32 or 1.58101e-322f */
|
||||
var42.i = (int) 0x00000020; /* 32 or 1.58101e-322f */
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 0: loadb */
|
||||
|
@ -1593,7 +1670,7 @@ _backup_gst_videoscale_orc_merge_bicubic_u8 (OrcExecutor * ORC_RESTRICT ex)
|
|||
/* 12: loadpb */
|
||||
var41 = ex->params[27];
|
||||
/* 15: loadpw */
|
||||
var42.i = 0x00000020; /* 32 or 1.58101e-322f */
|
||||
var42.i = (int) 0x00000020; /* 32 or 1.58101e-322f */
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
/* 0: loadb */
|
||||
|
@ -1770,6 +1847,24 @@ gst_videoscale_orc_init (void)
|
|||
|
||||
_orc_program_orc_splat_u32 = p;
|
||||
}
|
||||
{
|
||||
/* orc_splat_u64 */
|
||||
OrcProgram *p;
|
||||
OrcCompileResult result;
|
||||
|
||||
p = orc_program_new ();
|
||||
orc_program_set_name (p, "orc_splat_u64");
|
||||
orc_program_set_backup_function (p, _backup_orc_splat_u64);
|
||||
orc_program_add_destination (p, 8, "d1");
|
||||
orc_program_add_parameter_int64 (p, 8, "p1");
|
||||
|
||||
orc_program_append_2 (p, "copyq", 0, ORC_VAR_D1, ORC_VAR_P1, ORC_VAR_D1,
|
||||
ORC_VAR_D1);
|
||||
|
||||
result = orc_program_compile (p);
|
||||
|
||||
_orc_program_orc_splat_u64 = p;
|
||||
}
|
||||
{
|
||||
/* orc_downsample_u8 */
|
||||
OrcProgram *p;
|
||||
|
|
|
@ -37,6 +37,7 @@ typedef unsigned __int16 orc_uint16;
|
|||
typedef unsigned __int32 orc_uint32;
|
||||
typedef unsigned __int64 orc_uint64;
|
||||
#define ORC_UINT64_C(x) (x##Ui64)
|
||||
#define inline __inline
|
||||
#else
|
||||
#include <limits.h>
|
||||
typedef signed char orc_int8;
|
||||
|
@ -63,6 +64,7 @@ void orc_merge_linear_u8 (orc_uint8 * d1, const orc_uint8 * s1, const orc_uint8
|
|||
void orc_merge_linear_u16 (orc_uint16 * d1, const orc_uint16 * s1, const orc_uint16 * s2, int p1, int p2, int n);
|
||||
void orc_splat_u16 (orc_uint16 * d1, int p1, int n);
|
||||
void orc_splat_u32 (orc_uint32 * d1, int p1, int n);
|
||||
void orc_splat_u64 (orc_uint64 * d1, orc_int64 p1, int n);
|
||||
void orc_downsample_u8 (guint8 * d1, const guint8 * s1, int n);
|
||||
void orc_downsample_u16 (guint16 * d1, const guint16 * s1, int n);
|
||||
void gst_videoscale_orc_downsample_u32 (guint8 * d1, const guint8 * s1, int n);
|
||||
|
|
|
@ -54,6 +54,13 @@ copyw d1, p1
|
|||
copyl d1, p1
|
||||
|
||||
|
||||
.function orc_splat_u64
|
||||
.dest 8 d1
|
||||
.longparam 8 p1
|
||||
|
||||
copyq d1, p1
|
||||
|
||||
|
||||
.function orc_downsample_u8
|
||||
.dest 1 d1 guint8
|
||||
.source 2 s1 guint8
|
||||
|
|
|
@ -1324,3 +1324,128 @@ vs_image_scale_4tap_RGB555 (const VSImage * dest, const VSImage * src,
|
|||
yacc += y_increment;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_scanline_resample_4tap_AYUV64 (uint16_t * dest, uint16_t * src,
|
||||
int n, int src_width, int *xacc, int increment)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
int acc;
|
||||
int x;
|
||||
int y;
|
||||
int off;
|
||||
|
||||
acc = *xacc;
|
||||
for (i = 0; i < n; i++) {
|
||||
j = acc >> 16;
|
||||
x = (acc & 0xffff) >> 8;
|
||||
|
||||
for (off = 0; off < 4; off++) {
|
||||
if (j - 1 >= 0 && j + 2 < src_width) {
|
||||
y = vs_4tap_taps[x][0] * src[MAX ((j - 1) * 4 + off, 0)];
|
||||
y += vs_4tap_taps[x][1] * src[j * 4 + off];
|
||||
y += vs_4tap_taps[x][2] * src[(j + 1) * 4 + off];
|
||||
y += vs_4tap_taps[x][3] * src[(j + 2) * 4 + off];
|
||||
} else {
|
||||
y = vs_4tap_taps[x][0] * src[CLAMP ((j - 1) * 4 + off, 0,
|
||||
4 * (src_width - 1) + off)];
|
||||
y += vs_4tap_taps[x][1] * src[CLAMP (j * 4 + off, 0,
|
||||
4 * (src_width - 1) + off)];
|
||||
y += vs_4tap_taps[x][2] * src[CLAMP ((j + 1) * 4 + off, 0,
|
||||
4 * (src_width - 1) + off)];
|
||||
y += vs_4tap_taps[x][3] * src[CLAMP ((j + 2) * 4 + off, 0,
|
||||
4 * (src_width - 1) + off)];
|
||||
}
|
||||
y += (1 << (SHIFT - 1));
|
||||
dest[i * 4 + off] = CLAMP (y >> SHIFT, 0, 255);
|
||||
}
|
||||
acc += increment;
|
||||
}
|
||||
*xacc = acc;
|
||||
}
|
||||
|
||||
void
|
||||
vs_scanline_merge_4tap_AYUV64 (uint16_t * dest, uint16_t * src1,
|
||||
uint16_t * src2, uint16_t * src3, uint16_t * src4, int n, int acc)
|
||||
{
|
||||
int i;
|
||||
int y;
|
||||
int off;
|
||||
int a, b, c, d;
|
||||
|
||||
acc = (acc >> 8) & 0xff;
|
||||
a = vs_4tap_taps[acc][0];
|
||||
b = vs_4tap_taps[acc][1];
|
||||
c = vs_4tap_taps[acc][2];
|
||||
d = vs_4tap_taps[acc][3];
|
||||
for (i = 0; i < n; i++) {
|
||||
for (off = 0; off < 4; off++) {
|
||||
y = a * src1[i * 4 + off];
|
||||
y += b * src2[i * 4 + off];
|
||||
y += c * src3[i * 4 + off];
|
||||
y += d * src4[i * 4 + off];
|
||||
y += (1 << (SHIFT - 1));
|
||||
dest[i * 4 + off] = CLAMP (y >> SHIFT, 0, 65535);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_image_scale_4tap_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf8)
|
||||
{
|
||||
int yacc;
|
||||
int y_increment;
|
||||
int x_increment;
|
||||
int i;
|
||||
int j;
|
||||
int xacc;
|
||||
int k;
|
||||
guint16 *tmpbuf = (guint16 *) tmpbuf8;
|
||||
|
||||
if (dest->height == 1)
|
||||
y_increment = 0;
|
||||
else
|
||||
y_increment = ((src->height - 1) << 16) / (dest->height - 1);
|
||||
|
||||
if (dest->width == 1)
|
||||
x_increment = 0;
|
||||
else
|
||||
x_increment = ((src->width - 1) << 16) / (dest->width - 1);
|
||||
|
||||
k = 0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
xacc = 0;
|
||||
vs_scanline_resample_4tap_AYUV64 ((guint16 *) (tmpbuf + i * dest->stride),
|
||||
(guint16 *) (src->pixels + i * src->stride), dest->width, src->width,
|
||||
&xacc, x_increment);
|
||||
}
|
||||
|
||||
yacc = 0;
|
||||
for (i = 0; i < dest->height; i++) {
|
||||
uint16_t *t0, *t1, *t2, *t3;
|
||||
|
||||
j = yacc >> 16;
|
||||
|
||||
while (j > k) {
|
||||
k++;
|
||||
if (k + 3 < src->height) {
|
||||
xacc = 0;
|
||||
vs_scanline_resample_4tap_AYUV64 ((guint16 *) (tmpbuf + ((k +
|
||||
3) & 3) * dest->stride),
|
||||
(guint16 *) (src->pixels + (k + 3) * src->stride), dest->width,
|
||||
src->width, &xacc, x_increment);
|
||||
}
|
||||
}
|
||||
|
||||
t0 = tmpbuf + (CLAMP (j - 1, 0, src->height - 1) & 3) * dest->stride;
|
||||
t1 = tmpbuf + (CLAMP (j, 0, src->height - 1) & 3) * dest->stride;
|
||||
t2 = tmpbuf + (CLAMP (j + 1, 0, src->height - 1) & 3) * dest->stride;
|
||||
t3 = tmpbuf + (CLAMP (j + 2, 0, src->height - 1) & 3) * dest->stride;
|
||||
vs_scanline_merge_4tap_AYUV64 ((guint16 *) (dest->pixels +
|
||||
i * dest->stride), t0, t1, t2, t3, dest->width, yacc & 0xffff);
|
||||
|
||||
yacc += y_increment;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,5 +88,12 @@ void vs_scanline_merge_4tap_Y16 (uint8_t *dest, uint8_t *src1, uint8_t *src2,
|
|||
void vs_image_scale_4tap_Y16 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf);
|
||||
|
||||
void vs_scanline_resample_4tap_AYUV64 (uint16_t *dest, uint16_t *src,
|
||||
int n, int src_width, int *xacc, int increment);
|
||||
void vs_scanline_merge_4tap_AYUV64 (uint16_t *dest, uint16_t *src1, uint16_t *src2,
|
||||
uint16_t *src3, uint16_t *src4, int n, int acc);
|
||||
void vs_image_scale_4tap_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -379,3 +379,44 @@ vs_fill_borders_RGB555 (const VSImage * dest, const uint8_t * val)
|
|||
data += stride;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_fill_borders_AYUV64 (const VSImage * dest, const uint8_t * val)
|
||||
{
|
||||
int i;
|
||||
int top = dest->border_top, bottom = dest->border_bottom;
|
||||
int left = dest->border_left, right = dest->border_right;
|
||||
int width = dest->width;
|
||||
int height = dest->height;
|
||||
int real_width = dest->real_width;
|
||||
int stride = dest->stride;
|
||||
int tmp, tmp2;
|
||||
uint8_t *data;
|
||||
uint64_t v;
|
||||
|
||||
v = (val[0] << 8) | (val[1] << 24) | (((guint64) val[2]) << 40) | (((guint64)
|
||||
val[3]) << 56);
|
||||
|
||||
data = dest->real_pixels;
|
||||
for (i = 0; i < top; i++) {
|
||||
orc_splat_u64 ((uint64_t *) data, v, real_width);
|
||||
data += stride;
|
||||
}
|
||||
|
||||
if (left || right) {
|
||||
tmp = height;
|
||||
tmp2 = (left + width) * 8;
|
||||
for (i = 0; i < tmp; i++) {
|
||||
orc_splat_u64 ((uint64_t *) data, v, left);
|
||||
orc_splat_u64 ((uint64_t *) (data + tmp2), v, right);
|
||||
data += stride;
|
||||
}
|
||||
} else {
|
||||
data += stride * height;
|
||||
}
|
||||
|
||||
for (i = 0; i < bottom; i++) {
|
||||
orc_splat_u64 ((uint64_t *) data, v, real_width);
|
||||
data += stride;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,5 +39,6 @@ void vs_fill_borders_Y (const VSImage *dest, const uint8_t *val);
|
|||
void vs_fill_borders_Y16 (const VSImage *dest, const uint16_t val);
|
||||
void vs_fill_borders_RGB565 (const VSImage *dest, const uint8_t *val);
|
||||
void vs_fill_borders_RGB555 (const VSImage *dest, const uint8_t *val);
|
||||
void vs_fill_borders_AYUV64 (const VSImage *dest, const uint8_t *val);
|
||||
|
||||
#endif /* __VS_FILL_BORDERS_H__ */
|
||||
|
|
|
@ -1047,3 +1047,121 @@ vs_image_scale_linear_RGB555 (const VSImage * dest, const VSImage * src,
|
|||
acc += y_increment;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_image_scale_nearest_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf8)
|
||||
{
|
||||
int acc;
|
||||
int y_increment;
|
||||
int x_increment;
|
||||
int i;
|
||||
int j;
|
||||
int prev_j;
|
||||
|
||||
if (dest->height == 1)
|
||||
y_increment = 0;
|
||||
else
|
||||
y_increment = ((src->height - 1) << 16) / (dest->height - 1);
|
||||
|
||||
if (dest->width == 1)
|
||||
x_increment = 0;
|
||||
else
|
||||
x_increment = ((src->width - 1) << 16) / (dest->width - 1);
|
||||
|
||||
|
||||
acc = 0;
|
||||
prev_j = -1;
|
||||
for (i = 0; i < dest->height; i++) {
|
||||
j = acc >> 16;
|
||||
|
||||
if (j == prev_j) {
|
||||
memcpy (dest->pixels + i * dest->stride,
|
||||
dest->pixels + (i - 1) * dest->stride, dest->width * 8);
|
||||
} else {
|
||||
int xacc = 0;
|
||||
vs_scanline_resample_nearest_AYUV64 (dest->pixels + i * dest->stride,
|
||||
src->pixels + j * src->stride, src->width, dest->width, &xacc,
|
||||
x_increment);
|
||||
}
|
||||
|
||||
prev_j = j;
|
||||
acc += y_increment;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_image_scale_linear_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf)
|
||||
{
|
||||
int acc;
|
||||
int y_increment;
|
||||
int x_increment;
|
||||
int y1;
|
||||
int y2;
|
||||
int i;
|
||||
int j;
|
||||
int x;
|
||||
int dest_size;
|
||||
int xacc;
|
||||
|
||||
if (dest->height == 1)
|
||||
y_increment = 0;
|
||||
else
|
||||
y_increment = ((src->height - 1) << 16) / (dest->height - 1);
|
||||
|
||||
if (dest->width == 1)
|
||||
x_increment = 0;
|
||||
else
|
||||
x_increment = ((src->width - 1) << 16) / (dest->width - 1);
|
||||
|
||||
dest_size = dest->width * 8;
|
||||
|
||||
#undef LINE
|
||||
#define LINE(x) ((guint16 *)((tmpbuf) + (dest_size)*((x)&1)))
|
||||
|
||||
acc = 0;
|
||||
y2 = -1;
|
||||
//gst_videoscale_orc_resample_bilinear_u64 (LINE (0), src->pixels,
|
||||
// 0, x_increment, dest->width);
|
||||
xacc = 0;
|
||||
vs_scanline_resample_linear_AYUV64 ((guint8 *) LINE (0),
|
||||
src->pixels, src->width, dest->width, &xacc, x_increment);
|
||||
y1 = 0;
|
||||
for (i = 0; i < dest->height; i++) {
|
||||
j = acc >> 16;
|
||||
x = acc & 0xffff;
|
||||
|
||||
if (x == 0) {
|
||||
memcpy (dest->pixels + i * dest->stride, LINE (j), dest_size);
|
||||
} else {
|
||||
if (j > y1) {
|
||||
xacc = 0;
|
||||
vs_scanline_resample_linear_AYUV64 ((guint8 *) LINE (j),
|
||||
src->pixels + j * src->stride, src->width, dest->width, &xacc,
|
||||
x_increment);
|
||||
//gst_videoscale_orc_resample_bilinear_u64 (LINE (j),
|
||||
// src->pixels + j * src->stride, 0, x_increment, dest->width);
|
||||
y1++;
|
||||
}
|
||||
if (j >= y1) {
|
||||
xacc = 0;
|
||||
vs_scanline_resample_linear_AYUV64 ((guint8 *) LINE (j + 1),
|
||||
src->pixels + (j + 1) * src->stride, src->width, dest->width, &xacc,
|
||||
x_increment);
|
||||
orc_merge_linear_u16 ((guint16 *) (dest->pixels + i * dest->stride),
|
||||
LINE (j), LINE (j + 1), 65536 - x, x, dest->width * 4);
|
||||
//gst_videoscale_orc_resample_merge_bilinear_u64 (dest->pixels +
|
||||
// i * dest->stride, LINE (j + 1), LINE (j),
|
||||
// src->pixels + (j + 1) * src->stride, (x >> 8), 0, x_increment,
|
||||
// dest->width);
|
||||
y1++;
|
||||
} else {
|
||||
orc_merge_linear_u16 ((guint16 *) (dest->pixels + i * dest->stride),
|
||||
LINE (j), LINE (j + 1), 65536 - x, x, dest->width * 4);
|
||||
}
|
||||
}
|
||||
|
||||
acc += y_increment;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,5 +84,15 @@ void vs_image_scale_nearest_Y16 (const VSImage *dest, const VSImage *src,
|
|||
void vs_image_scale_linear_Y16 (const VSImage *dest, const VSImage *src,
|
||||
uint8_t *tmpbuf);
|
||||
|
||||
void vs_image_scale_nearest_AYUV16 (const VSImage *dest, const VSImage *src,
|
||||
uint8_t *tmpbuf);
|
||||
void vs_image_scale_linear_AYUV16 (const VSImage *dest, const VSImage *src,
|
||||
uint8_t *tmpbuf);
|
||||
|
||||
void vs_image_scale_nearest_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf8);
|
||||
void vs_image_scale_linear_AYUV64 (const VSImage * dest, const VSImage * src,
|
||||
uint8_t * tmpbuf8);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -713,3 +713,69 @@ vs_scanline_merge_linear_RGB555 (uint8_t * dest_u8, uint8_t * src1_u8,
|
|||
(RGB555_B (src1[i]) * (65536 - x) + RGB555_B (src2[i]) * x) >> 16);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vs_scanline_resample_nearest_AYUV64 (uint8_t * dest8, uint8_t * src8,
|
||||
int src_width, int n, int *accumulator, int increment)
|
||||
{
|
||||
guint16 *dest = (guint16 *) dest8;
|
||||
guint16 *src = (guint16 *) src8;
|
||||
int acc = *accumulator;
|
||||
int i;
|
||||
int j;
|
||||
int x;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
j = acc >> 16;
|
||||
x = acc & 0xffff;
|
||||
dest[i * 4 + 0] = (x < 32768
|
||||
|| j + 1 >= src_width) ? src[j * 4 + 0] : src[j * 4 + 4];
|
||||
dest[i * 4 + 1] = (x < 32768
|
||||
|| j + 1 >= src_width) ? src[j * 4 + 1] : src[j * 4 + 5];
|
||||
dest[i * 4 + 2] = (x < 32768
|
||||
|| j + 1 >= src_width) ? src[j * 4 + 2] : src[j * 4 + 6];
|
||||
dest[i * 4 + 3] = (x < 32768
|
||||
|| j + 1 >= src_width) ? src[j * 4 + 3] : src[j * 4 + 7];
|
||||
|
||||
acc += increment;
|
||||
}
|
||||
|
||||
*accumulator = acc;
|
||||
}
|
||||
|
||||
void
|
||||
vs_scanline_resample_linear_AYUV64 (uint8_t * dest8, uint8_t * src8,
|
||||
int src_width, int n, int *accumulator, int increment)
|
||||
{
|
||||
guint16 *dest = (guint16 *) dest8;
|
||||
guint16 *src = (guint16 *) src8;
|
||||
int acc = *accumulator;
|
||||
int i;
|
||||
int j;
|
||||
int x;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
j = acc >> 16;
|
||||
x = (acc & 0xffff) >> 1;
|
||||
|
||||
if (j + 1 < src_width) {
|
||||
dest[i * 4 + 0] =
|
||||
(src[j * 3 + 0] * (32768 - x) + src[j * 4 + 4] * x) >> 15;
|
||||
dest[i * 4 + 1] =
|
||||
(src[j * 4 + 1] * (32768 - x) + src[j * 4 + 5] * x) >> 15;
|
||||
dest[i * 4 + 2] =
|
||||
(src[j * 4 + 2] * (32768 - x) + src[j * 4 + 6] * x) >> 15;
|
||||
dest[i * 4 + 3] =
|
||||
(src[j * 4 + 3] * (32768 - x) + src[j * 4 + 7] * x) >> 15;
|
||||
} else {
|
||||
dest[i * 4 + 0] = src[j * 4 + 0];
|
||||
dest[i * 4 + 1] = src[j * 4 + 1];
|
||||
dest[i * 4 + 2] = src[j * 4 + 2];
|
||||
dest[i * 4 + 3] = src[j * 4 + 3];
|
||||
}
|
||||
|
||||
acc += increment;
|
||||
}
|
||||
|
||||
*accumulator = acc;
|
||||
}
|
||||
|
|
|
@ -70,5 +70,10 @@ void vs_scanline_resample_nearest_Y16 (uint8_t *dest, uint8_t *src, int n, int s
|
|||
void vs_scanline_resample_linear_Y16 (uint8_t *dest, uint8_t *src, int n, int src_width, int *accumulator, int increment);
|
||||
void vs_scanline_merge_linear_Y16 (uint8_t *dest, uint8_t *src1, uint8_t *src2, int n, int x);
|
||||
|
||||
void vs_scanline_resample_nearest_AYUV64 (uint8_t * dest, uint8_t * src,
|
||||
int src_width, int n, int *accumulator, int increment);
|
||||
void vs_scanline_resample_linear_AYUV64 (uint8_t * dest, uint8_t * src,
|
||||
int src_width, int n, int *accumulator, int increment);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in a new issue