Slightly faster yuv to rgb conversion.

Original commit message from CVS:
Slightly faster yuv to rgb conversion.
This commit is contained in:
Wim Taymans 2000-10-26 19:07:27 +00:00
parent 9bae9d4b91
commit 31cae4b447
3 changed files with 193 additions and 181 deletions

View file

@ -2,14 +2,14 @@ filterdir = $(libdir)/gst
filter_LTLIBRARIES = libgstcolorspace.la
libgstcolorspace_la_SOURCES = gstcolorspace.c rgb2rgb.c yuv2rgb.c
libgstcolorspace_la_SOURCES = gstcolorspace.c rgb2rgb.c yuv2rgb.c yuv2rgb_mmx16.s
libgstcolorspaceincludedir = $(includedir)/gst/libs/gstcolorspace
libgstcolorspaceinclude_HEADERS = gstcolorspace.h
noinst_HEADERS = yuv2rgb.h
CFLAGS += -Wall -O2 -fomit-frame-pointer -funroll-all-loops -finline-functions -ffast-math
CFLAGS += -Wall -O2 -fomit-frame-pointer -finline-functions -ffast-math
INCLUDES = $(GLIB_CFLAGS) $(GTK_CFLAGS) -I$(top_srcdir) -I$(top_srcdir)/include
LDADD = $(GLIB_LIBS) $(GTK_LIBS) $(top_srcdir)/gst/libgst.la

View file

@ -34,6 +34,8 @@
#include "yuv2rgb.h"
static void gst_colorspace_yuv420P_to_bgr16_mmx(GstColorSpaceConverter *space, unsigned char *src, unsigned char *dest);
static void gst_colorspace_yuv420P_to_rgb32(GstColorSpaceConverter *space, unsigned char *src, unsigned char *dest);
static void gst_colorspace_yuv420P_to_bgr32(GstColorSpaceConverter *space, unsigned char *src, unsigned char *dest);
static void gst_colorspace_yuv420P_to_bgr32_mmx(GstColorSpaceConverter *space, unsigned char *src, unsigned char *dest);
@ -67,7 +69,7 @@ static void gst_colorspace_yuv_to_bgr32_mmx(GstColorSpaceYUVTables *tables,
unsigned char *cb,
unsigned char *out,
int cols, int rows);
static void gst_colorspace_yuv_to_bgr16_mmx(GstColorSpaceYUVTables *tables,
extern void gst_colorspace_yuv_to_bgr16_mmx(GstColorSpaceYUVTables *tables,
unsigned char *lum,
unsigned char *cr,
unsigned char *cb,
@ -213,8 +215,6 @@ static void gst_colorspace_yuv420P_to_rgb16(GstColorSpaceConverter *space, unsig
}
#ifdef HAVE_LIBMMX
static mmx_t MMX16_redmask = (mmx_t)(long long)0xf800f800f800f800LL; //dd 07c00 7c00h, 07c007c00h
static mmx_t MMX16_grnmask = (mmx_t)(long long)0x07e007e007e007e0LL; //dd 003e0 03e0h, 003e003e0h
static void gst_colorspace_yuv420P_to_bgr32_mmx(GstColorSpaceConverter *space, unsigned char *src, unsigned char *dest) {
int size;
@ -663,192 +663,16 @@ gst_colorspace_yuv_to_rgb32(tables, lum, cb, cr, out, rows, cols)
}
#ifdef HAVE_LIBMMX
static mmx_t MMX_10w = (mmx_t)(long long)0x000D000D000D000DLL; //dd 00080 0080h, 000800080h
static mmx_t MMX_80w = (mmx_t)(long long)0x0080008000800080LL; //dd 00080 0080h, 000800080h
static mmx_t MMX_00FFw = (mmx_t)(long long)0x00ff00ff00ff00ffLL; //dd 000FF 00FFh, 000FF00FFh
static mmx_t MMX_FF00w = (mmx_t)(long long)0xff00ff00ff00ff00LL; //dd 000FF 00FFh, 000FF00FFh
static mmx_t MMX16_Vredcoeff = (mmx_t)(long long)0x0066006600660066LL; //dd 00066 0066h, 000660066h
static mmx_t MMX16_Ublucoeff = (mmx_t)(long long)0x0081008100810081LL; //dd 00081 0081h, 000810081h
static mmx_t MMX16_Ugrncoeff = (mmx_t)(long long)0xffe8ffe8ffe8ffe8LL; //dd 0FFE7 FFE7h, 0FFE7FFE7h
static mmx_t MMX16_Vgrncoeff = (mmx_t)(long long)0xffcdffcdffcdffcdLL; //dd 0FFCC FFCCh, 0FFCCFFCCh
static mmx_t MMX16_Ycoeff = (mmx_t)(long long)0x004a004a004a004aLL; //dd 0004A 004Ah, 0004A004Ah
static mmx_t MMX32_Vredcoeff = (mmx_t)(long long)0x0059005900590059LL;
static mmx_t MMX32_Ubluecoeff = (mmx_t)(long long)0x0072007200720072LL;
static mmx_t MMX32_Ugrncoeff = (mmx_t)(long long)0xffeaffeaffeaffeaLL;
static mmx_t MMX32_Vgrncoeff = (mmx_t)(long long)0xffd2ffd2ffd2ffd2LL;
static void
gst_colorspace_yuv_to_bgr16_mmx(tables, lum, cr, cb, out, rows, cols)
GstColorSpaceYUVTables *tables;
unsigned char *lum;
unsigned char *cr;
unsigned char *cb;
unsigned char *out;
int cols, rows;
{
unsigned short *row1 = (unsigned short* )out; // 32 bit target
int cols8 = cols>>3;
int y, x;
DEBUG("gst_colorspace_yuv420P_to_bgr16_mmx %p %p %p\n", lum, cr, cb);
for (y=rows>>1; y; y--) {
for (x=cols8; x; x--) {
movd_m2r(*(mmx_t *)cr, mm0); // 4 Cr 0 0 0 0 u3 u2 u1 u0
pxor_r2r(mm7, mm7);
movd_m2r(*(mmx_t *)cb, mm1); // 4 Cb 0 0 0 0 v3 v2 v1 v0
punpcklbw_r2r(mm7, mm0); // 4 W cb 0 u3 0 u2 0 u1 0 u0
punpcklbw_r2r(mm7, mm1); // 4 W cr 0 v3 0 v2 0 v1 0 v0
psubw_m2r(MMX_80w, mm0);
psubw_m2r(MMX_80w, mm1);
movq_r2r(mm0, mm2); // Cb 0 u3 0 u2 0 u1 0 u0
movq_r2r(mm1, mm3); // Cr
pmullw_m2r(MMX16_Ugrncoeff, mm2); // Cb2green 0 R3 0 R2 0 R1 0 R0
movq_m2r(*(mmx_t *)lum, mm6); // L1 l7 L6 L5 L4 L3 L2 L1 L0
pmullw_m2r(MMX16_Ublucoeff, mm0); // Cb2blue
pand_m2r(MMX_00FFw, mm6); // L1 00 L6 00 L4 00 L2 00 L0
pmullw_m2r(MMX16_Vgrncoeff, mm3); // Cr2green
movq_m2r(*(mmx_t *)lum, mm7); // L2
pmullw_m2r(MMX16_Vredcoeff, mm1); // Cr2red
psubw_m2r(MMX_10w, mm6);
psrlw_i2r(8, mm7); // L2 00 L7 00 L5 00 L3 00 L1
pmullw_m2r(MMX16_Ycoeff, mm6); // lum1
psubw_m2r(MMX_10w, mm7);
paddw_r2r(mm3, mm2); // Cb2green + Cr2green == green
pmullw_m2r(MMX16_Ycoeff, mm7); // lum2
movq_r2r(mm6, mm4); // lum1
paddw_r2r(mm0, mm6); // lum1 +blue 00 B6 00 B4 00 B2 00 B0
movq_r2r(mm4, mm5); // lum1
paddw_r2r(mm1, mm4); // lum1 +red 00 R6 00 R4 00 R2 00 R0
paddw_r2r(mm2, mm5); // lum1 +green 00 G6 00 G4 00 G2 00 G0
psraw_i2r(6, mm4); // R1 0 .. 64
movq_r2r(mm7, mm3); // lum2 00 L7 00 L5 00 L3 00 L1
psraw_i2r(6, mm5); // G1 - .. +
paddw_r2r(mm0, mm7); // Lum2 +blue 00 B7 00 B5 00 B3 00 B1
psraw_i2r(6, mm6); // B1 0 .. 64
packuswb_r2r(mm4, mm4); // R1 R1
packuswb_r2r(mm5, mm5); // G1 G1
packuswb_r2r(mm6, mm6); // B1 B1
punpcklbw_r2r(mm4, mm4);
punpcklbw_r2r(mm5, mm5);
pand_m2r(MMX16_redmask, mm4);
psllw_i2r(3, mm5); // GREEN 1
punpcklbw_r2r(mm6, mm6);
pand_m2r(MMX16_grnmask, mm5);
pand_m2r(MMX16_redmask, mm6);
por_r2r(mm5, mm4); //
psrlw_i2r(11, mm6); // BLUE 1
movq_r2r(mm3, mm5); // lum2
paddw_r2r(mm1, mm3); // lum2 +red 00 R7 00 R5 00 R3 00 R1
paddw_r2r(mm2, mm5); // lum2 +green 00 G7 00 G5 00 G3 00 G1
psraw_i2r(6, mm3); // R2
por_r2r(mm6, mm4); // MM4
psraw_i2r(6, mm5); // G2
movq_m2r(*(mmx_t *)(lum+cols), mm6); // L3 load lum2
psraw_i2r(6, mm7);
packuswb_r2r(mm3, mm3);
packuswb_r2r(mm5, mm5);
packuswb_r2r(mm7, mm7);
pand_m2r(MMX_00FFw, mm6); // L3
psubw_m2r(MMX_10w, mm6);
punpcklbw_r2r(mm3, mm3);
// "psubw MMX_10w, %%mm6\n" // L3
punpcklbw_r2r(mm5, mm5);
pmullw_m2r(MMX16_Ycoeff, mm6); // lum3
punpcklbw_r2r(mm7, mm7);
psllw_i2r(3, mm5); // GREEN 2
pand_m2r(MMX16_redmask, mm7);
pand_m2r(MMX16_redmask, mm3);
psrlw_i2r(11, mm7); // BLUE 2
pand_m2r(MMX16_grnmask, mm5);
por_r2r(mm7, mm3);
movq_m2r(*(mmx_t *)(lum+cols), mm7); // L4 load lum2
por_r2r(mm5, mm3); //
psrlw_i2r(8, mm7); // L4
movq_r2r(mm4, mm5);
// "psubw MMX_10w, %%mm7\n" // L4
punpcklwd_r2r(mm3, mm4);
psubw_m2r(MMX_10w, mm7);
pmullw_m2r(MMX16_Ycoeff, mm7); // lum4
punpckhwd_r2r(mm3, mm5);
movq_r2m(mm4, *(row1)); // write row1
movq_r2m(mm5, *(row1+4)); // write row1
movq_r2r(mm6, mm4); // Lum3
paddw_r2r(mm0, mm6); // Lum3 +blue
movq_r2r(mm4, mm5); // Lum3
paddw_r2r(mm1, mm4); // Lum3 +red
paddw_r2r(mm2, mm5); // Lum3 +green
psraw_i2r(6, mm4);
movq_r2r(mm7, mm3); // Lum4
psraw_i2r(6, mm5);
paddw_r2r(mm0, mm7); // Lum4 +blue
psraw_i2r(6, mm6); // Lum3 +blue
movq_r2r(mm3, mm0); // Lum4
packuswb_r2r(mm4, mm4);
paddw_r2r(mm1, mm3); // Lum4 +red
packuswb_r2r(mm5, mm5);
paddw_r2r(mm2, mm0); // Lum4 +green
packuswb_r2r(mm6, mm6);
punpcklbw_r2r(mm4, mm4);
punpcklbw_r2r(mm5, mm5);
punpcklbw_r2r(mm6, mm6);
psllw_i2r(3, mm5); // GREEN 3
pand_m2r(MMX16_redmask, mm4);
psraw_i2r(6, mm3); // psr 6
psraw_i2r(6, mm0);
pand_m2r(MMX16_redmask, mm6); // BLUE
pand_m2r(MMX16_grnmask, mm5);
psrlw_i2r(11, mm6); // BLUE 3
por_r2r(mm5, mm4);
psraw_i2r(6, mm7);
por_r2r(mm6, mm4);
packuswb_r2r(mm3, mm3);
packuswb_r2r(mm0, mm0);
packuswb_r2r(mm7, mm7);
punpcklbw_r2r(mm3, mm3);
punpcklbw_r2r(mm0, mm0);
punpcklbw_r2r(mm7, mm7);
pand_m2r(MMX16_redmask, mm3);
pand_m2r(MMX16_redmask, mm7); // BLUE
psllw_i2r(3, mm0); // GREEN 4
psrlw_i2r(11, mm7);
pand_m2r(MMX16_grnmask, mm0);
por_r2r(mm7, mm3);
por_r2r(mm0, mm3);
movq_r2r(mm4, mm5);
punpcklwd_r2r(mm3, mm4);
punpckhwd_r2r(mm3, mm5);
movq_r2m(mm4, *(row1+cols));
movq_r2m(mm5, *(row1+cols+4));
lum+=8;
cr+=4;
cb+=4;
row1 +=8;
}
lum += cols;
row1 += cols;
}
emms();
}
static void
gst_colorspace_yuv_to_bgr32_mmx(tables, lum, cr, cb, out, rows, cols)
GstColorSpaceYUVTables *tables;

View file

@ -0,0 +1,188 @@
.globl mmx_80w
.data
.align 4
.type mmx_80w,@object
.size mmx_80w,8
mmx_80w:
.long 8388736
.long 8388736
.globl mmx_10w
.align 4
.type mmx_10w,@object
.size mmx_10w,8
mmx_10w:
.long 269488144
.long 269488144
.globl mmx_00ffw
.align 4
.type mmx_00ffw,@object
.size mmx_00ffw,8
mmx_00ffw:
.long 16711935
.long 16711935
.globl mmx_Y_coeff
.align 4
.type mmx_Y_coeff,@object
.size mmx_Y_coeff,8
mmx_Y_coeff:
.long 624895295
.long 624895295
.globl mmx_U_green
.align 4
.type mmx_U_green,@object
.size mmx_U_green,8
mmx_U_green:
.long -209849475
.long -209849475
.globl mmx_U_blue
.align 4
.type mmx_U_blue,@object
.size mmx_U_blue,8
mmx_U_blue:
.long 1083392147
.long 1083392147
.globl mmx_V_red
.align 4
.type mmx_V_red,@object
.size mmx_V_red,8
mmx_V_red:
.long 856830738
.long 856830738
.globl mmx_V_green
.align 4
.type mmx_V_green,@object
.size mmx_V_green,8
mmx_V_green:
.long -436410884
.long -436410884
.globl mmx_redmask
.align 4
.type mmx_redmask,@object
.size mmx_redmask,8
mmx_redmask:
.long -117901064
.long -117901064
.globl mmx_grnmask
.align 4
.type mmx_grnmask,@object
.size mmx_grnmask,8
mmx_grnmask:
.long -50529028
.long -50529028
.text
.align 4
.globl gst_colorspace_yuv_to_bgr16_mmx
.type gst_colorspace_yuv_to_bgr16_mmx,@function
gst_colorspace_yuv_to_bgr16_mmx:
subl $8,%esp
pushl %ebp
pushl %edi
pushl %esi
movl 28(%esp),%edi
movl 32(%esp),%ecx
movl 36(%esp),%edx
movl $1,%ebp
movl 48(%esp),%esi
sarl $1,%esi
movl %esi,16(%esp)
pxor %mm4, %mm4 # zero mm4
movl %esi,12(%esp)
sarl $2,12(%esp)
movl 40(%esp),%esi
.p2align 4,,7
.L68:
movd (%ecx), %mm0 # Load 4 Cb 00 00 00 00 00 u3 u2 u1 u0
movd (%edx), %mm1 # Load 4 Cr 00 00 00 00 00 v2 v1 v0
movq (%edi), %mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
movl 12(%esp),%eax
.p2align 4,,7
.L74:
punpcklbw %mm4, %mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0
punpcklbw %mm4, %mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0
psubsw mmx_80w, %mm0 # Cb -= 128
psubsw mmx_80w, %mm1 # Cr -= 128
psllw $3, %mm0 # Promote precision
psllw $3, %mm1 # Promote precision
movq %mm0, %mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0
movq %mm1, %mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0
pmulhw mmx_U_green, %mm2 # Mul Cb with green coeff -> Cb green
pmulhw mmx_V_green, %mm3 # Mul Cr with green coeff -> Cr green
pmulhw mmx_U_blue, %mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0
pmulhw mmx_V_red, %mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0
paddsw %mm3, %mm2 # Cb green + Cr green -> Cgreen
psubusb mmx_10w, %mm6 # Y -= 16
movq %mm6, %mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
pand mmx_00ffw, %mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0
psrlw $8, %mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1
psllw $3, %mm6 # Promote precision
psllw $3, %mm7 # Promote precision
pmulhw mmx_Y_coeff, %mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0
pmulhw mmx_Y_coeff, %mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1
movq %mm0, %mm3 # Copy Cblue
movq %mm1, %mm4 # Copy Cred
movq %mm2, %mm5 # Copy Cgreen
paddsw %mm6, %mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0
paddsw %mm7, %mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1
paddsw %mm6, %mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0
paddsw %mm7, %mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1
paddsw %mm6, %mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0
paddsw %mm7, %mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1
packuswb %mm0, %mm0 # B6 B4 B2 B0 | B6 B4 B2 B0
packuswb %mm1, %mm1 # R6 R4 R2 R0 | R6 R4 R2 R0
packuswb %mm2, %mm2 # G6 G4 G2 G0 | G6 G4 G2 G0
packuswb %mm3, %mm3 # B7 B5 B3 B1 | B7 B5 B3 B1
packuswb %mm4, %mm4 # R7 R5 R3 R1 | R7 R5 R3 R1
packuswb %mm5, %mm5 # G7 G5 G3 G1 | G7 G5 G3 G1
punpcklbw %mm3, %mm0 # B7 B6 B5 B4 B3 B2 B1 B0
punpcklbw %mm4, %mm1 # R7 R6 R5 R4 R3 R2 R1 R0
punpcklbw %mm5, %mm2 # G7 G6 G5 G4 G3 G2 G1 G0
pand mmx_redmask, %mm0 # b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0
pand mmx_grnmask, %mm2 # g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0
pand mmx_redmask, %mm1 # r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0
psrlw $3,%mm0 #0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3
pxor %mm4, %mm4 # zero mm4
movq %mm0, %mm5 # Copy B7-B0
movq %mm2, %mm7 # Copy G7-G0
punpcklbw %mm4, %mm2 # 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0
punpcklbw %mm1, %mm0 # r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3
psllw $3,%mm2 # 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0
por %mm2, %mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3
movq 8(%edi), %mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
movq %mm0, (%esi) # store pixel 0-3
punpckhbw %mm4, %mm7 # 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0
punpckhbw %mm1, %mm5 # r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3
psllw $3,%mm7 # 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0
movd 4(%ecx), %mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0
por %mm7, %mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3
movd 4(%edx), %mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0
movq %mm5, 8(%esi) # store pixel 4-7
addl $8,%edi
addl $4,%ecx
addl $4,%edx
addl $16,%esi
decl %eax
jnz .L74
.L72:
xorl $1,%ebp
jne .L76
subl 16(%esp),%ecx
subl 16(%esp),%edx
.L76:
subl $1,44(%esp)
jnz .L68
emms
popl %esi
popl %edi
popl %ebp
addl $8,%esp
ret