From 67e5e612be2472f134b3fd18aa09da9dee67281b Mon Sep 17 00:00:00 2001 From: Brian Cameron Date: Mon, 9 Aug 2004 21:21:25 +0000 Subject: [PATCH] Remove GPL'ed mmx32idct.c code and supporting code, since logic in gst-plugins is not supposed to be GPL'ed. This co... Original commit message from CVS: Remove GPL'ed mmx32idct.c code and supporting code, since logic in gst-plugins is not supposed to be GPL'ed. This code provided MMX optimisations, but was never compiled in since configure never set HAVE_LIBMMX anyway. --- gst-libs/gst/idct/Makefile.am | 8 +- gst-libs/gst/idct/dct.h | 6 - gst-libs/gst/idct/idct.c | 15 +- gst-libs/gst/idct/mmx32idct.c | 690 ------------------------------- gst-libs/gst/idct/mmxidct.S | 740 ---------------------------------- gst-libs/gst/idct/sseidct.S | 740 ---------------------------------- 6 files changed, 2 insertions(+), 2197 deletions(-) delete mode 100644 gst-libs/gst/idct/mmx32idct.c delete mode 100644 gst-libs/gst/idct/mmxidct.S delete mode 100644 gst-libs/gst/idct/sseidct.S diff --git a/gst-libs/gst/idct/Makefile.am b/gst-libs/gst/idct/Makefile.am index aea5a1f37e..d8ddf8ddf4 100644 --- a/gst-libs/gst/idct/Makefile.am +++ b/gst-libs/gst/idct/Makefile.am @@ -1,8 +1,3 @@ -if HAVE_LIBMMX -GSTIDCTARCH_SRCS = mmxidct.S mmx32idct.c sseidct.S -else -GSTIDCTARCH_SRCS = -endif librarydir = $(libdir)/gstreamer-@GST_MAJORMINOR@ @@ -12,8 +7,7 @@ libgstidct_la_SOURCES = \ fastintidct.c \ floatidct.c \ idct.c \ - intidct.c \ - $(GSTIDCTARCH_SRCS) + intidct.c libgstidctincludedir = $(includedir)/gstreamer-@GST_MAJORMINOR@/gst/idct libgstidctinclude_HEADERS = idct.h diff --git a/gst-libs/gst/idct/dct.h b/gst-libs/gst/idct/dct.h index efb3ddb36d..ea453cdb3f 100644 --- a/gst-libs/gst/idct/dct.h +++ b/gst-libs/gst/idct/dct.h @@ -21,12 +21,6 @@ extern void gst_idct_int_idct(); extern void gst_idct_init_fast_int_idct (void); extern void gst_idct_fast_int_idct (short *block); -#ifdef HAVE_LIBMMX -extern void gst_idct_mmx_idct (short *block); -extern void gst_idct_mmx32_idct (short *block); -extern void gst_idct_sse_idct (short *block); -#endif /* HAVE_LIBMMX */ - extern void gst_idct_init_float_idct(void); extern void gst_idct_float_idct (short *block); diff --git a/gst-libs/gst/idct/idct.c b/gst-libs/gst/idct/idct.c index 4be150f1fc..4d00b30fbf 100644 --- a/gst-libs/gst/idct/idct.c +++ b/gst-libs/gst/idct/idct.c @@ -35,20 +35,7 @@ gst_idct_new (GstIDCTMethod method) new->need_transpose = FALSE; if (method == GST_IDCT_DEFAULT) { -#ifdef HAVE_LIBMMX - if (gst_cpu_get_flags () & GST_CPU_FLAG_MMX) { - method = GST_IDCT_MMX; - } - /* disabled for now - if (gst_cpu_get_flags() & GST_CPU_FLAG_SSE) { - method = GST_IDCT_SSE; - } - */ - else -#endif /* HAVE_LIBMMX */ - { - method = GST_IDCT_FAST_INT; - } + method = GST_IDCT_FAST_INT; } new->convert_sparse = gst_idct_int_sparse_idct; diff --git a/gst-libs/gst/idct/mmx32idct.c b/gst-libs/gst/idct/mmx32idct.c deleted file mode 100644 index 32f17bea9d..0000000000 --- a/gst-libs/gst/idct/mmx32idct.c +++ /dev/null @@ -1,690 +0,0 @@ -/* - * idctmmx32.cpp - * - * Copyright (C) Alberto Vigata - January 2000 - ultraflask@yahoo.com - * - * This file is part of FlasKMPEG, a free MPEG to MPEG/AVI converter - * - * FlasKMPEG is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * FlasKMPEG is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU Make; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -/* MMX32 iDCT algorithm (IEEE-1180 compliant) :: idct_mmx32() */ -/* */ -/* MPEG2AVI */ -/* -------- */ -/* v0.16B33 initial release */ -/* */ -/* This was one of the harder pieces of work to code. */ -/* Intel's app-note focuses on the numerical issues of the algorithm, but */ -/* assumes the programmer is familiar with IDCT mathematics, leaving the */ -/* form of the complete function up to the programmer's imagination. */ -/* */ -/* ALGORITHM OVERVIEW */ -/* ------------------ */ -/* I played around with the code for quite a few hours. I came up */ -/* with *A* working IDCT algorithm, however I'm not sure whether my routine */ -/* is "the correct one." But rest assured, my code passes all six IEEE */ -/* accuracy tests with plenty of margin. */ -/* */ -/* My IDCT algorithm consists of 4 steps: */ -/* */ -/* 1) IDCT-row transformation (using the IDCT-row function) on all 8 rows */ -/* This yields an intermediate 8x8 matrix. */ -/* */ -/* 2) intermediate matrix transpose (mandatory) */ -/* */ -/* 3) IDCT-row transformation (2nd time) on all 8 rows of the intermediate */ -/* matrix. The output is the final-result, in transposed form. */ -/* */ -/* 4) post-transformation matrix transpose */ -/* (not necessary if the input-data is already transposed, this could */ -/* be done during the MPEG "zig-zag" scan, but since my algorithm */ -/* requires at least one transpose operation, why not re-use the */ -/* transpose-code.) */ -/* */ -/* Although the (1st) and (3rd) steps use the SAME row-transform operation, */ -/* the (3rd) step uses different shift&round constants (explained later.) */ -/* */ -/* Also note that the intermediate transpose (2) would not be neccessary, */ -/* if the subsequent operation were an iDCT-column transformation. Since */ -/* we only have the iDCT-row transform, we transpose the intermediate */ -/* matrix and use the iDCT-row transform a 2nd time. */ -/* */ -/* I had to change some constants/variables for my method to work : */ -/* */ -/* As given by Intel, the #defines for SHIFT_INV_COL and RND_INV_COL are */ -/* wrong. Not surprising since I'm not using a true column-transform */ -/* operation, but the row-transform operation (as mentioned earlier.) */ -/* round_inv_col[], which is given as "4 short" values, should have the */ -/* same dimensions as round_inv_row[]. The corrected variables are */ -/* shown. */ -/* */ -/* Intel's code defines a different table for each each row operation. */ -/* The tables given are 0/4, 1/7, 2/6, and 5/3. My code only uses row#0. */ -/* Using the other rows messes up the overall transform. */ -/* */ -/* IMPLEMENTATION DETAILs */ -/* ---------------------- */ -/* */ -/* I divided the algorithm's work into two subroutines, */ -/* 1) idct_mmx32_rows() - transforms 8 rows, then transpose */ -/* 2) idct_mmx32_cols() - transforms 8 rows, then transpose */ -/* yields final result ("drop-in" direct replacement for INT32 IDCT) */ -/* */ -/* The 2nd function is a clone of the 1st, with changes made only to the */ -/* shift&rounding instructions. */ -/* */ -/* In the 1st function (rows), the shift & round instructions use */ -/* SHIFT_INV_ROW & round_inv_row[] (renamed to r_inv_row[]) */ -/* */ -/* In the 2nd function (cols)-> r_inv_col[], and */ -/* SHIFT_INV_COL & round_inv_col[] (renamed to r_inv_col[]) */ -/* */ -/* Each function contains an integrated transpose-operator, which comes */ -/* AFTER the primary transformation operation. In the future, I'll optimize */ -/* the code to do more of the transpose-work "in-place". Right now, I've */ -/* left the code as two subroutines and a main calling function, so other */ -/* people can read the code more easily. */ -/* */ -/* liaor@umcc.ais.org http://members.tripod.com/~liaor */ -/* */ - -/*;============================================================================= */ -/*; */ -/*; AP-922 http://developer.intel.com/vtune/cbts/strmsimd */ -/*; These examples contain code fragments for first stage iDCT 8x8 */ -/*; (for rows) and first stage DCT 8x8 (for columns) */ -/*; */ -/*;============================================================================= */ -/* -mword typedef qword -qword ptr equ mword ptr */ -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif -#include - -#define BITS_INV_ACC 4 /*; 4 or 5 for IEEE */ - /* 5 yields higher accuracy, but lessens dynamic range on the input matrix */ -#define SHIFT_INV_ROW (16 - BITS_INV_ACC) -#define SHIFT_INV_COL (1 + BITS_INV_ACC +14 ) /* changed from Intel's val) */ -/*#define SHIFT_INV_COL (1 + BITS_INV_ACC ) */ - -#define RND_INV_ROW (1 << (SHIFT_INV_ROW-1)) -#define RND_INV_COL (1 << (SHIFT_INV_COL-1)) -#define RND_INV_CORR (RND_INV_COL - 1) /*; correction -1.0 and round */ -/*#define RND_INV_ROW (1024 * (6 - BITS_INV_ACC)) //; 1 << (SHIFT_INV_ROW-1) */ -/*#define RND_INV_COL (16 * (BITS_INV_ACC - 3)) //; 1 << (SHIFT_INV_COL-1) */ - -/*.data */ -/*Align 16 */ -const static long r_inv_row[2] = { RND_INV_ROW, RND_INV_ROW }; - const static long r_inv_col[2] = { RND_INV_COL, RND_INV_COL }; - const static long r_inv_corr[2] = { RND_INV_CORR, RND_INV_CORR }; - - -/*const static short r_inv_col[4] = */ -/* {RND_INV_COL, RND_INV_COL, RND_INV_COL, RND_INV_COL}; */ -/*const static short r_inv_corr[4] = */ -/* {RND_INV_CORR, RND_INV_CORR, RND_INV_CORR, RND_INV_CORR}; */ - -/* constants for the forward DCT - -/*#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy */ -/*#define SHIFT_FRW_COL BITS_FRW_ACC */ -/*#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17) */ -/*#define RND_FRW_ROW (262144 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_ROW-1) */ - const static __int64 one_corr = 0x0001000100010001; - const static long r_frw_row[2] = { RND_FRW_ROW, RND_FRW_ROW }; - - -/*const static short tg_1_16[4] = {13036, 13036, 13036, 13036 }; //tg * (2<<16) + 0.5 */ -/*const static short tg_2_16[4] = {27146, 27146, 27146, 27146 }; //tg * (2<<16) + 0.5 */ -/*const static short tg_3_16[4] = {-21746, -21746, -21746, -21746 }; //tg * (2<<16) + 0.5 */ -/*const static short cos_4_16[4] = {-19195, -19195, -19195, -19195 }; //cos * (2<<16) + 0.5 */ -/*const static short ocos_4_16[4] = {23170, 23170, 23170, 23170 }; //cos * (2<<15) + 0.5 */ - -/*concatenated table, for forward DCT transformation */ -const static short tg_all_16[] = { 13036, 13036, 13036, 13036, /* tg * (2<<16) + 0.5 */ - 27146, 27146, 27146, 27146, /*tg * (2<<16) + 0.5 */ - -21746, -21746, -21746, -21746, /* tg * (2<<16) + 0.5 */ - -19195, -19195, -19195, -19195, /*cos * (2<<16) + 0.5 */ - 23170, 23170, 23170, 23170 -}; /*cos * (2<<15) + 0.5 */ - - -#define tg_1_16 (tg_all_16 + 0) -#define tg_2_16 (tg_all_16 + 8) -#define tg_3_16 (tg_all_16 + 16) -#define cos_4_16 (tg_all_16 + 24) -#define ocos_4_16 (tg_all_16 + 32) - */ -/* -;============================================================================= -; -; The first stage iDCT 8x8 - inverse DCTs of rows -; -;----------------------------------------------------------------------------- -; The 8-point inverse DCT direct algorithm -;----------------------------------------------------------------------------- -; -; static const short w[32] = { -; FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16), -; FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16), -; FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16), -; FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16), -; FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16), -; FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16), -; FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16), -; FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) }; -; -; #define DCT_8_INV_ROW(x, y) - -;{ -; int a0, a1, a2, a3, b0, b1, b2, b3; -; -; a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3]; -; a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7]; -; a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11]; -; a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15]; -; b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19]; -; b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23]; -; b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27]; -; b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31]; -; -; y[0] = SHIFT_ROUND ( a0 + b0 ); -; y[1] = SHIFT_ROUND ( a1 + b1 ); -; y[2] = SHIFT_ROUND ( a2 + b2 ); -; y[3] = SHIFT_ROUND ( a3 + b3 ); -; y[4] = SHIFT_ROUND ( a3 - b3 ); -; y[5] = SHIFT_ROUND ( a2 - b2 ); -; y[6] = SHIFT_ROUND ( a1 - b1 ); -; y[7] = SHIFT_ROUND ( a0 - b0 ); -;} -; -;----------------------------------------------------------------------------- -; -; In this implementation the outputs of the iDCT-1D are multiplied -; for rows 0,4 - by cos_4_16, -; for rows 1,7 - by cos_1_16, -; for rows 2,6 - by cos_2_16, -; for rows 3,5 - by cos_3_16 -; and are shifted to the left for better accuracy -; -; For the constants used, -; FIX(float_const) = (short) (float_const * (1<<15) + 0.5) -; -;============================================================================= -;============================================================================= -IF _MMX ; MMX code -;============================================================================= - -/*; Table for rows 0,4 - constants are multiplied by cos_4_16 */ -const short tab_i_04[] = { 16384, 16384, 16384, -16384, /* ; movq-> w06 w04 w02 w00 */ - 21407, 8867, 8867, -21407, /* w07 w05 w03 w01 */ - 16384, -16384, 16384, 16384, /*; w14 w12 w10 w08 */ - -8867, 21407, -21407, -8867, /*; w15 w13 w11 w09 */ - 22725, 12873, 19266, -22725, /*; w22 w20 w18 w16 */ - 19266, 4520, -4520, -12873, /*; w23 w21 w19 w17 */ - 12873, 4520, 4520, 19266, /*; w30 w28 w26 w24 */ - -22725, 19266, -12873, -22725 -}; /*w31 w29 w27 w25 */ - - -/*; Table for rows 1,7 - constants are multiplied by cos_1_16 */ -const short tab_i_17[] = { 22725, 22725, 22725, -22725, /* ; movq-> w06 w04 w02 w00 */ - 29692, 12299, 12299, -29692, /* ; w07 w05 w03 w01 */ - 22725, -22725, 22725, 22725, /*; w14 w12 w10 w08 */ - -12299, 29692, -29692, -12299, /*; w15 w13 w11 w09 */ - 31521, 17855, 26722, -31521, /*; w22 w20 w18 w16 */ - 26722, 6270, -6270, -17855, /*; w23 w21 w19 w17 */ - 17855, 6270, 6270, 26722, /*; w30 w28 w26 w24 */ - -31521, 26722, -17855, -31521 -}; /* w31 w29 w27 w25 */ - - -/*; Table for rows 2,6 - constants are multiplied by cos_2_16 */ -const short tab_i_26[] = { 21407, 21407, 21407, -21407, /* ; movq-> w06 w04 w02 w00 */ - 27969, 11585, 11585, -27969, /* ; w07 w05 w03 w01 */ - 21407, -21407, 21407, 21407, /* ; w14 w12 w10 w08 */ - -11585, 27969, -27969, -11585, /* ;w15 w13 w11 w09 */ - 29692, 16819, 25172, -29692, /* ;w22 w20 w18 w16 */ - 25172, 5906, -5906, -16819, /* ;w23 w21 w19 w17 */ - 16819, 5906, 5906, 25172, /* ;w30 w28 w26 w24 */ - -29692, 25172, -16819, -29692 -}; /* ;w31 w29 w27 w25 */ - - -/*; Table for rows 3,5 - constants are multiplied by cos_3_16 */ -const short tab_i_35[] = { 19266, 19266, 19266, -19266, /*; movq-> w06 w04 w02 w00 */ - 25172, 10426, 10426, -25172, /*; w07 w05 w03 w01 */ - 19266, -19266, 19266, 19266, /*; w14 w12 w10 w08 */ - -10426, 25172, -25172, -10426, /*; w15 w13 w11 w09 */ - 26722, 15137, 22654, -26722, /*; w22 w20 w18 w16 */ - 22654, 5315, -5315, -15137, /*; w23 w21 w19 w17 */ - 15137, 5315, 5315, 22654, /*; w30 w28 w26 w24 */ - -26722, 22654, -15137, -26722 -}; /*; w31 w29 w27 w25 */ - -*/ -/* CONCATENATED TABLE, rows 0,1,2,3,4,5,6,7 (in order ) */ -/* */ -/* In our implementation, however, we only use row0 ! */ -/* */ -static const short tab_i_01234567[] = { - /*row0, this row is required */ - 16384, 16384, 16384, -16384, /* ; movq-> w06 w04 w02 w00 */ - 21407, 8867, 8867, -21407, /* w07 w05 w03 w01 */ - 16384, -16384, 16384, 16384, /*; w14 w12 w10 w08 */ - -8867, 21407, -21407, -8867, /*; w15 w13 w11 w09 */ - 22725, 12873, 19266, -22725, /*; w22 w20 w18 w16 */ - 19266, 4520, -4520, -12873, /*; w23 w21 w19 w17 */ - 12873, 4520, 4520, 19266, /*; w30 w28 w26 w24 */ - -22725, 19266, -12873, -22725, /*w31 w29 w27 w25 */ - - /* the rest of these rows (1-7), aren't used ! */ - - /*row1 */ - 22725, 22725, 22725, -22725, /* ; movq-> w06 w04 w02 w00 */ - 29692, 12299, 12299, -29692, /* ; w07 w05 w03 w01 */ - 22725, -22725, 22725, 22725, /*; w14 w12 w10 w08 */ - -12299, 29692, -29692, -12299, /*; w15 w13 w11 w09 */ - 31521, 17855, 26722, -31521, /*; w22 w20 w18 w16 */ - 26722, 6270, -6270, -17855, /*; w23 w21 w19 w17 */ - 17855, 6270, 6270, 26722, /*; w30 w28 w26 w24 */ - -31521, 26722, -17855, -31521, /* w31 w29 w27 w25 */ - - /*row2 */ - 21407, 21407, 21407, -21407, /* ; movq-> w06 w04 w02 w00 */ - 27969, 11585, 11585, -27969, /* ; w07 w05 w03 w01 */ - 21407, -21407, 21407, 21407, /* ; w14 w12 w10 w08 */ - -11585, 27969, -27969, -11585, /* ;w15 w13 w11 w09 */ - 29692, 16819, 25172, -29692, /* ;w22 w20 w18 w16 */ - 25172, 5906, -5906, -16819, /* ;w23 w21 w19 w17 */ - 16819, 5906, 5906, 25172, /* ;w30 w28 w26 w24 */ - -29692, 25172, -16819, -29692, /* ;w31 w29 w27 w25 */ - - /*row3 */ - 19266, 19266, 19266, -19266, /*; movq-> w06 w04 w02 w00 */ - 25172, 10426, 10426, -25172, /*; w07 w05 w03 w01 */ - 19266, -19266, 19266, 19266, /*; w14 w12 w10 w08 */ - -10426, 25172, -25172, -10426, /*; w15 w13 w11 w09 */ - 26722, 15137, 22654, -26722, /*; w22 w20 w18 w16 */ - 22654, 5315, -5315, -15137, /*; w23 w21 w19 w17 */ - 15137, 5315, 5315, 22654, /*; w30 w28 w26 w24 */ - -26722, 22654, -15137, -26722, /*; w31 w29 w27 w25 */ - - /*row4 */ - 16384, 16384, 16384, -16384, /* ; movq-> w06 w04 w02 w00 */ - 21407, 8867, 8867, -21407, /* w07 w05 w03 w01 */ - 16384, -16384, 16384, 16384, /*; w14 w12 w10 w08 */ - -8867, 21407, -21407, -8867, /*; w15 w13 w11 w09 */ - 22725, 12873, 19266, -22725, /*; w22 w20 w18 w16 */ - 19266, 4520, -4520, -12873, /*; w23 w21 w19 w17 */ - 12873, 4520, 4520, 19266, /*; w30 w28 w26 w24 */ - -22725, 19266, -12873, -22725, /*w31 w29 w27 w25 */ - - /*row5 */ - 19266, 19266, 19266, -19266, /*; movq-> w06 w04 w02 w00 */ - 25172, 10426, 10426, -25172, /*; w07 w05 w03 w01 */ - 19266, -19266, 19266, 19266, /*; w14 w12 w10 w08 */ - -10426, 25172, -25172, -10426, /*; w15 w13 w11 w09 */ - 26722, 15137, 22654, -26722, /*; w22 w20 w18 w16 */ - 22654, 5315, -5315, -15137, /*; w23 w21 w19 w17 */ - 15137, 5315, 5315, 22654, /*; w30 w28 w26 w24 */ - -26722, 22654, -15137, -26722, /*; w31 w29 w27 w25 */ - - /*row6 */ - 21407, 21407, 21407, -21407, /* ; movq-> w06 w04 w02 w00 */ - 27969, 11585, 11585, -27969, /* ; w07 w05 w03 w01 */ - 21407, -21407, 21407, 21407, /* ; w14 w12 w10 w08 */ - -11585, 27969, -27969, -11585, /* ;w15 w13 w11 w09 */ - 29692, 16819, 25172, -29692, /* ;w22 w20 w18 w16 */ - 25172, 5906, -5906, -16819, /* ;w23 w21 w19 w17 */ - 16819, 5906, 5906, 25172, /* ;w30 w28 w26 w24 */ - -29692, 25172, -16819, -29692, /* ;w31 w29 w27 w25 */ - - /*row7 */ - 22725, 22725, 22725, -22725, /* ; movq-> w06 w04 w02 w00 */ - 29692, 12299, 12299, -29692, /* ; w07 w05 w03 w01 */ - 22725, -22725, 22725, 22725, /*; w14 w12 w10 w08 */ - -12299, 29692, -29692, -12299, /*; w15 w13 w11 w09 */ - 31521, 17855, 26722, -31521, /*; w22 w20 w18 w16 */ - 26722, 6270, -6270, -17855, /*; w23 w21 w19 w17 */ - 17855, 6270, 6270, 26722, /*; w30 w28 w26 w24 */ - -31521, 26722, -17855, -31521 -}; /* w31 w29 w27 w25 */ - - -#define INP eax /* pointer to (short *blk) */ -#define OUT ecx /* pointer to output (temporary store space qwTemp[]) */ -#define TABLE ebx /* pointer to tab_i_01234567[] */ -#define round_inv_row edx -#define round_inv_col edx - -#define ROW_STRIDE 8 /* for 8x8 matrix transposer */ - -/* private variables and functions */ - -/*temporary storage space, 8x8 of shorts */ - __inline static void idct_mmx32_rows (short *blk); /* transform rows */ -__inline static void idct_mmx32_cols (short *blk); /* transform "columns" */ - - /* the "column" transform actually transforms rows, it is */ - /* identical to the row-transform except for the ROUNDING */ - /* and SHIFTING coefficients. */ - static void -idct_mmx32_rows (short *blk) -{ /* transform all 8 rows of 8x8 iDCT block */ - int x; - short qwTemp[64]; - short *out = &qwTemp[0]; - short *inptr = blk; - - - /* this subroutine performs two operations */ - /* 1) iDCT row transform */ - /* for( i = 0; i < 8; ++ i) */ - /* DCT_8_INV_ROW_1( blk[i*8], qwTemp[i] ); */ - /* */ - /* 2) transpose the matrix (which was stored in qwTemp[]) */ - /* qwTemp[] -> [8x8 matrix transpose] -> blk[] */ - for (x = 0; x < 8; x++) { /* transform one row per iteration */ - movq_m2r (*(inptr), mm0); /* 0 ; x3 x2 x1 x0 */ - movq_m2r (*(inptr + 4), mm1); /* 1 ; x7 x6 x5 x4 */ - movq_r2r (mm0, mm2); /* 2 ; x3 x2 x1 x0 */ - movq_m2r (*(tab_i_01234567), mm3); /* 3 ; w06 w04 w02 w00 */ - punpcklwd_r2r (mm1, mm0); /* x5 x1 x4 x0 */ - - /* ---------- */ - movq_r2r (mm0, mm5); /* 5 ; x5 x1 x4 x0 */ - punpckldq_r2r (mm0, mm0); /* x4 x0 x4 x0 */ - movq_m2r (*(tab_i_01234567 + 4), mm4); /* 4 ; w07 w05 w03 w01 */ - punpckhwd_r2r (mm1, mm2); /* 1 ; x7 x3 x6 x2 */ - pmaddwd_r2r (mm0, mm3); /* x4*w06+x0*w04 x4*w02+x0*w00 */ - movq_r2r (mm2, mm6); /* 6 ; x7 x3 x6 x2 */ - movq_m2r (*(tab_i_01234567 + 16), mm1); /* 1 ; w22 w20 w18 w16 */ - punpckldq_r2r (mm2, mm2); /* x6 x2 x6 x2 */ - pmaddwd_r2r (mm2, mm4); /* x6*w07+x2*w05 x6*w03+x2*w01 */ - punpckhdq_r2r (mm5, mm5); /* x5 x1 x5 x1 */ - pmaddwd_m2r (*(tab_i_01234567 + 8), mm0); /* x4*w14+x0*w12 x4*w10+x0*w08 */ - punpckhdq_r2r (mm6, mm6); /* x7 x3 x7 x3 */ - movq_m2r (*(tab_i_01234567 + 20), mm7); /* 7 ; w23 w21 w19 w17 */ - pmaddwd_r2r (mm5, mm1); /* x5*w22+x1*w20 x5*w18+x1*w16 */ - paddd_m2r (*(r_inv_row), mm3); /* +rounder */ - pmaddwd_r2r (mm6, mm7); /* x7*w23+x3*w21 x7*w19+x3*w17 */ - pmaddwd_m2r (*(tab_i_01234567 + 12), mm2); /* x6*w15+x2*w13 x6*w11+x2*w09 */ - paddd_r2r (mm4, mm3); /* 4 ; a1=sum(even1) a0=sum(even0) */ - pmaddwd_m2r (*(tab_i_01234567 + 24), mm5); /* x5*w30+x1*w28 x5*w26+x1*w24 */ - movq_r2r (mm3, mm4); /* 4 ; a1 a0 */ - pmaddwd_m2r (*(tab_i_01234567 + 28), mm6); /* x7*w31+x3*w29 x7*w27+x3*w25 */ - paddd_r2r (mm7, mm1); /* 7 ; b1=sum(odd1) b0=sum(odd0) */ - paddd_m2r (*(r_inv_row), mm0); /* +rounder */ - psubd_r2r (mm1, mm3); /* a1-b1 a0-b0 */ - psrad_i2r (SHIFT_INV_ROW, mm3); /* y6=a1-b1 y7=a0-b0 */ - paddd_r2r (mm4, mm1); /* 4 ; a1+b1 a0+b0 */ - paddd_r2r (mm2, mm0); /* 2 ; a3=sum(even3) a2=sum(even2) */ - psrad_i2r (SHIFT_INV_ROW, mm1); /* y1=a1+b1 y0=a0+b0 */ - paddd_r2r (mm6, mm5); /* 6 ; b3=sum(odd3) b2=sum(odd2) */ - movq_r2r (mm0, mm4); /* 4 ; a3 a2 */ - paddd_r2r (mm5, mm0); /* a3+b3 a2+b2 */ - psubd_r2r (mm5, mm4); /* 5 ; a3-b3 a2-b2 */ - psrad_i2r (SHIFT_INV_ROW, mm4); /* y4=a3-b3 y5=a2-b2 */ - psrad_i2r (SHIFT_INV_ROW, mm0); /* y3=a3+b3 y2=a2+b2 */ - packssdw_r2r (mm3, mm4); /* 3 ; y6 y7 y4 y5 */ - packssdw_r2r (mm0, mm1); /* 0 ; y3 y2 y1 y0 */ - movq_r2r (mm4, mm7); /* 7 ; y6 y7 y4 y5 */ - psrld_i2r (16, mm4); /* 0 y6 0 y4 */ - movq_r2m (mm1, *(out)); /* 1 ; save y3 y2 y1 y0 */ - pslld_i2r (16, mm7); /* y7 0 y5 0 */ - por_r2r (mm4, mm7); /* 4 ; y7 y6 y5 y4 */ - - /* begin processing row 1 */ - movq_r2m (mm7, *(out + 4)); /* 7 ; save y7 y6 y5 y4 */ - inptr += 8; - out += 8; - } - - /* done with the iDCT row-transformation */ - - /* now we have to transpose the output 8x8 matrix */ - /* 8x8 (OUT) -> 8x8't' (IN) */ - /* the transposition is implemented as 4 sub-operations. */ - /* 1) transpose upper-left quad */ - /* 2) transpose lower-right quad */ - /* 3) transpose lower-left quad */ - /* 4) transpose upper-right quad */ - - /* mm0 = 1st row [ A B C D ] row1 */ - /* mm1 = 2nd row [ E F G H ] 2 */ - /* mm2 = 3rd row [ I J K L ] 3 */ - /* mm3 = 4th row [ M N O P ] 4 */ - - /* 1) transpose upper-left quad */ - out = &qwTemp[0]; - movq_m2r (*(out + ROW_STRIDE * 0), mm0); - movq_m2r (*(out + ROW_STRIDE * 1), mm1); - movq_r2r (mm0, mm4); /* mm4 = copy of row1[A B C D] */ - movq_m2r (*(out + ROW_STRIDE * 2), mm2); - punpcklwd_r2r (mm1, mm0); /* mm0 = [ 0 4 1 5] */ - movq_m2r (*(out + ROW_STRIDE * 3), mm3); - punpckhwd_r2r (mm1, mm4); /* mm4 = [ 2 6 3 7] */ - movq_r2r (mm2, mm6); - punpcklwd_r2r (mm3, mm2); /* mm2 = [ 8 12 9 13] */ - punpckhwd_r2r (mm3, mm6); /* mm6 = 10 14 11 15] */ - movq_r2r (mm0, mm1); /* mm1 = [ 0 4 1 5] */ - inptr = blk; - punpckldq_r2r (mm2, mm0); /* final result mm0 = row1 [0 4 8 12] */ - movq_r2r (mm4, mm3); /* mm3 = [ 2 6 3 7] */ - punpckhdq_r2r (mm2, mm1); /* mm1 = final result mm1 = row2 [1 5 9 13] */ - movq_r2m (mm0, *(inptr + ROW_STRIDE * 0)); /* store row 1 */ - punpckldq_r2r (mm6, mm4); /* final result mm4 = row3 [2 6 10 14] */ - -/* begin reading next quadrant (lower-right) */ - movq_m2r (*(out + ROW_STRIDE * 4 + 4), mm0); - punpckhdq_r2r (mm6, mm3); /* final result mm3 = row4 [3 7 11 15] */ - movq_r2m (mm4, *(inptr + ROW_STRIDE * 2)); /* store row 3 */ - movq_r2r (mm0, mm4); /* mm4 = copy of row1[A B C D] */ - movq_r2m (mm1, *(inptr + ROW_STRIDE * 1)); /* store row 2 */ - movq_m2r (*(out + ROW_STRIDE * 5 + 4), mm1); - movq_r2m (mm3, *(inptr + ROW_STRIDE * 3)); /* store row 4 */ - punpcklwd_r2r (mm1, mm0); /* mm0 = [ 0 4 1 5] */ - - /* 2) transpose lower-right quadrant */ - -/* movq mm0, qword ptr [OUT + ROW_STRIDE*4 + 8] */ - -/* movq mm1, qword ptr [OUT + ROW_STRIDE*5 + 8] */ -/* movq mm4, mm0; // mm4 = copy of row1[A B C D] */ - movq_m2r (*(out + ROW_STRIDE * 6 + 4), mm2); - -/* punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5] */ - punpckhwd_r2r (mm1, mm4); /* mm4 = [ 2 6 3 7] */ - movq_m2r (*(out + ROW_STRIDE * 7 + 4), mm3); - movq_r2r (mm2, mm6); - punpcklwd_r2r (mm3, mm2); /* mm2 = [ 8 12 9 13] */ - movq_r2r (mm0, mm1); /* mm1 = [ 0 4 1 5] */ - punpckhwd_r2r (mm3, mm6); /* mm6 = 10 14 11 15] */ - movq_r2r (mm4, mm3); /* mm3 = [ 2 6 3 7] */ - punpckldq_r2r (mm2, mm0); /* final result mm0 = row1 [0 4 8 12] */ - punpckhdq_r2r (mm2, mm1); /* mm1 = final result mm1 = row2 [1 5 9 13] */ - ; /* slot */ - movq_r2m (mm0, *(inptr + ROW_STRIDE * 4 + 4)); /* store row 1 */ - punpckldq_r2r (mm6, mm4); /* final result mm4 = row3 [2 6 10 14] */ - movq_m2r (*(out + ROW_STRIDE * 4), mm0); - punpckhdq_r2r (mm6, mm3); /* final result mm3 = row4 [3 7 11 15] */ - movq_r2m (mm4, *(inptr + ROW_STRIDE * 6 + 4)); /* store row 3 */ - movq_r2r (mm0, mm4); /* mm4 = copy of row1[A B C D] */ - movq_r2m (mm1, *(inptr + ROW_STRIDE * 5 + 4)); /* store row 2 */ - ; /* slot */ - movq_m2r (*(out + ROW_STRIDE * 5), mm1); - ; /* slot */ - movq_r2m (mm3, *(inptr + ROW_STRIDE * 7 + 4)); /* store row 4 */ - punpcklwd_r2r (mm1, mm0); /* mm0 = [ 0 4 1 5] */ - - /* 3) transpose lower-left */ -/* movq mm0, qword ptr [OUT + ROW_STRIDE * 4 ] */ - -/* movq mm1, qword ptr [OUT + ROW_STRIDE * 5 ] */ -/* movq mm4, mm0; // mm4 = copy of row1[A B C D] */ - movq_m2r (*(out + ROW_STRIDE * 6), mm2); - -/* punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5] */ - punpckhwd_r2r (mm1, mm4); /* mm4 = [ 2 6 3 7] */ - movq_m2r (*(out + ROW_STRIDE * 7), mm3); - movq_r2r (mm2, mm6); - punpcklwd_r2r (mm3, mm2); /* mm2 = [ 8 12 9 13] */ - movq_r2r (mm0, mm1); /* mm1 = [ 0 4 1 5] */ - punpckhwd_r2r (mm3, mm6); /* mm6 = 10 14 11 15] */ - movq_r2r (mm4, mm3); /* mm3 = [ 2 6 3 7] */ - punpckldq_r2r (mm2, mm0); /* final result mm0 = row1 [0 4 8 12] */ - punpckhdq_r2r (mm2, mm1); /* mm1 = final result mm1 = row2 [1 5 9 13] */ - ; /*slot */ - movq_r2m (mm0, *(inptr + ROW_STRIDE * 0 + 4)); /* store row 1 */ - punpckldq_r2r (mm6, mm4); /* final result mm4 = row3 [2 6 10 14] */ - -/* begin reading next quadrant (upper-right) */ - movq_m2r (*(out + ROW_STRIDE * 0 + 4), mm0); - punpckhdq_r2r (mm6, mm3); /* final result mm3 = row4 [3 7 11 15] */ - movq_r2m (mm4, *(inptr + ROW_STRIDE * 2 + 4)); /* store row 3 */ - movq_r2r (mm0, mm4); /* mm4 = copy of row1[A B C D] */ - movq_r2m (mm1, *(inptr + ROW_STRIDE * 1 + 4)); /* store row 2 */ - movq_m2r (*(out + ROW_STRIDE * 1 + 4), mm1); - movq_r2m (mm3, *(inptr + ROW_STRIDE * 3 + 4)); /* store row 4 */ - punpcklwd_r2r (mm1, mm0); /* mm0 = [ 0 4 1 5] */ - - /* 2) transpose lower-right quadrant */ - -/* movq mm0, qword ptr [OUT + ROW_STRIDE*4 + 8] */ - -/* movq mm1, qword ptr [OUT + ROW_STRIDE*5 + 8] */ -/* movq mm4, mm0; // mm4 = copy of row1[A B C D] */ - movq_m2r (*(out + ROW_STRIDE * 2 + 4), mm2); - -/* punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5] */ - punpckhwd_r2r (mm1, mm4); /* mm4 = [ 2 6 3 7] */ - movq_m2r (*(out + ROW_STRIDE * 3 + 4), mm3); - movq_r2r (mm2, mm6); - punpcklwd_r2r (mm3, mm2); /* mm2 = [ 8 12 9 13] */ - movq_r2r (mm0, mm1); /* mm1 = [ 0 4 1 5] */ - punpckhwd_r2r (mm3, mm6); /* mm6 = 10 14 11 15] */ - movq_r2r (mm4, mm3); /* mm3 = [ 2 6 3 7] */ - punpckldq_r2r (mm2, mm0); /* final result mm0 = row1 [0 4 8 12] */ - punpckhdq_r2r (mm2, mm1); /* mm1 = final result mm1 = row2 [1 5 9 13] */ - ; /* slot */ - movq_r2m (mm0, *(inptr + ROW_STRIDE * 4)); /* store row 1 */ - punpckldq_r2r (mm6, mm4); /* final result mm4 = row3 [2 6 10 14] */ - movq_r2m (mm1, *(inptr + ROW_STRIDE * 5)); /* store row 2 */ - punpckhdq_r2r (mm6, mm3); /* final result mm3 = row4 [3 7 11 15] */ - movq_r2m (mm4, *(inptr + ROW_STRIDE * 6)); /* store row 3 */ - ; /* slot */ - movq_r2m (mm3, *(inptr + ROW_STRIDE * 7)); /* store row 4 */ - ; /* slot */ - } - static void -idct_mmx32_cols (short *blk) -{ /* transform all 8 cols of 8x8 iDCT block */ - int x; - short *inptr = blk; - - - /* Despite the function's name, the matrix is transformed */ - /* row by row. This function is identical to idct_mmx32_rows(), */ - /* except for the SHIFT amount and ROUND_INV amount. */ - - /* this subroutine performs two operations */ - /* 1) iDCT row transform */ - /* for( i = 0; i < 8; ++ i) */ - /* DCT_8_INV_ROW_1( blk[i*8], qwTemp[i] ); */ - /* */ - /* 2) transpose the matrix (which was stored in qwTemp[]) */ - /* qwTemp[] -> [8x8 matrix transpose] -> blk[] */ - for (x = 0; x < 8; x++) { /* transform one row per iteration */ - movq_m2r (*(inptr), mm0); /* 0 ; x3 x2 x1 x0 */ - movq_m2r (*(inptr + 4), mm1); /* 1 ; x7 x6 x5 x4 */ - movq_r2r (mm0, mm2); /* 2 ; x3 x2 x1 x0 */ - movq_m2r (*(tab_i_01234567), mm3); /* 3 ; w06 w04 w02 w00 */ - punpcklwd_r2r (mm1, mm0); /* x5 x1 x4 x0 */ - -/* ---------- */ - movq_r2r (mm0, mm5); /* 5 ; x5 x1 x4 x0 */ - punpckldq_r2r (mm0, mm0); /* x4 x0 x4 x0 */ - movq_m2r (*(tab_i_01234567 + 4), mm4); /* 4 ; w07 w05 w03 w01 */ - punpckhwd_r2r (mm1, mm2); /* 1 ; x7 x3 x6 x2 */ - pmaddwd_r2r (mm0, mm3); /* x4*w06+x0*w04 x4*w02+x0*w00 */ - movq_r2r (mm2, mm6); /* 6 ; x7 x3 x6 x2 */ - movq_m2r (*(tab_i_01234567 + 16), mm1); /* 1 ; w22 w20 w18 w16 */ - punpckldq_r2r (mm2, mm2); /* x6 x2 x6 x2 */ - pmaddwd_r2r (mm2, mm4); /* x6*w07+x2*w05 x6*w03+x2*w01 */ - punpckhdq_r2r (mm5, mm5); /* x5 x1 x5 x1 */ - pmaddwd_m2r (*(tab_i_01234567 + 8), mm0); /* x4*w14+x0*w12 x4*w10+x0*w08 */ - punpckhdq_r2r (mm6, mm6); /* x7 x3 x7 x3 */ - movq_m2r (*(tab_i_01234567 + 20), mm7); /* 7 ; w23 w21 w19 w17 */ - pmaddwd_r2r (mm5, mm1); /* x5*w22+x1*w20 x5*w18+x1*w16 */ - paddd_m2r (*(r_inv_col), mm3); /* +rounder */ - pmaddwd_r2r (mm6, mm7); /* x7*w23+x3*w21 x7*w19+x3*w17 */ - pmaddwd_m2r (*(tab_i_01234567 + 12), mm2); /* x6*w15+x2*w13 x6*w11+x2*w09 */ - paddd_r2r (mm4, mm3); /* 4 ; a1=sum(even1) a0=sum(even0) */ - pmaddwd_m2r (*(tab_i_01234567 + 24), mm5); /* x5*w30+x1*w28 x5*w26+x1*w24 */ - movq_r2r (mm3, mm4); /* 4 ; a1 a0 */ - pmaddwd_m2r (*(tab_i_01234567 + 28), mm6); /* x7*w31+x3*w29 x7*w27+x3*w25 */ - paddd_r2r (mm7, mm1); /* 7 ; b1=sum(odd1) b0=sum(odd0) */ - paddd_m2r (*(r_inv_col), mm0); /* +rounder */ - psubd_r2r (mm1, mm3); /* a1-b1 a0-b0 */ - psrad_i2r (SHIFT_INV_COL, mm3); /* y6=a1-b1 y7=a0-b0 */ - paddd_r2r (mm4, mm1); /* 4 ; a1+b1 a0+b0 */ - paddd_r2r (mm2, mm0); /* 2 ; a3=sum(even3) a2=sum(even2) */ - psrad_i2r (SHIFT_INV_COL, mm1); /* y1=a1+b1 y0=a0+b0 */ - paddd_r2r (mm6, mm5); /* 6 ; b3=sum(odd3) b2=sum(odd2) */ - movq_r2r (mm0, mm4); /* 4 ; a3 a2 */ - paddd_r2r (mm5, mm0); /* a3+b3 a2+b2 */ - psubd_r2r (mm5, mm4); /* 5 ; a3-b3 a2-b2 */ - psrad_i2r (SHIFT_INV_COL, mm4); /* y4=a3-b3 y5=a2-b2 */ - psrad_i2r (SHIFT_INV_COL, mm0); /* y3=a3+b3 y2=a2+b2 */ - packssdw_r2r (mm3, mm4); /* 3 ; y6 y7 y4 y5 */ - packssdw_r2r (mm0, mm1); /* 0 ; y3 y2 y1 y0 */ - movq_r2r (mm4, mm7); /* 7 ; y6 y7 y4 y5 */ - psrld_i2r (16, mm4); /* 0 y6 0 y4 */ - movq_r2m (mm1, *(inptr)); /* 1 ; save y3 y2 y1 y0 */ - pslld_i2r (16, mm7); /* y7 0 y5 0 */ - por_r2r (mm4, mm7); /* 4 ; y7 y6 y5 y4 */ - - /* begin processing row 1 */ - movq_r2m (mm7, *(inptr + 4)); /* 7 ; save y7 y6 y5 y4 */ - inptr += 8; - } - - /* done with the iDCT column-transformation */ -} - - -/* */ -/* public interface to MMX32 IDCT 8x8 operation */ -/* */ -void -gst_idct_mmx32_idct (short *blk) -{ - - /* 1) iDCT row transformation */ - idct_mmx32_rows (blk); /* 1) transform iDCT row, and transpose */ - - /* 2) iDCT column transformation */ - idct_mmx32_cols (blk); /* 2) transform iDCT row, and transpose */ - emms (); /* restore processor state */ - /* all done */ -} - - diff --git a/gst-libs/gst/idct/mmxidct.S b/gst-libs/gst/idct/mmxidct.S deleted file mode 100644 index 1b15be5782..0000000000 --- a/gst-libs/gst/idct/mmxidct.S +++ /dev/null @@ -1,740 +0,0 @@ -/* - * the input data is tranposed and each 16 bit element in the 8x8 matrix - * is left aligned: - * for example in 11...1110000 format - * If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component - * (element[0][0] of the matrix) - * - * Notes: - * - the scratchN variables should be put on the stack to avoid - * reentrancy problems - */ - -#ifdef PIC -#define pic_offset(a) a@GOTOFF(%ebx) -#else -#define pic_offset(a) a -#endif - -/* extrn re_matrix */ - -.data - .align 16 - .type preSC,@object -preSC: .short 16384,22725,21407,19266,16384,12873,8867,4520 - .short 22725,31521,29692,26722,22725,17855,12299,6270 - .short 21407,29692,27969,25172,21407,16819,11585,5906 - .short 19266,26722,25172,22654,19266,15137,10426,5315 - .short 16384,22725,21407,19266,16384,12873,8867,4520 - .short 12873,17855,16819,15137,25746,20228,13933,7103 - .short 17734,24598,23170,20853,17734,13933,9597,4892 - .short 18081,25080,23624,21261,18081,14206,9785,4988 - .size preSC,128 - .align 8 - .type x0005000200010001,@object - .size x0005000200010001,8 -x0005000200010001: - .long 0x00010001,0x00050002 - .align 8 - .type x0040000000000000,@object - .size x0040000000000000,8 -x0040000000000000: - .long 0, 0x00400000 - .align 8 - .type x5a825a825a825a82,@object - .size x5a825a825a825a82,8 -x5a825a825a825a82: - .long 0x5a825a82, 0x5a825a82 - .align 8 - .type x539f539f539f539f,@object - .size x539f539f539f539f,8 -x539f539f539f539f: - .long 0x539f539f,0x539f539f - .align 8 - .type x4546454645464546,@object - .size x4546454645464546,8 -x4546454645464546: - .long 0x45464546,0x45464546 - .align 8 - .type x61f861f861f861f8,@object - .size x61f861f861f861f8,8 -x61f861f861f861f8: - .long 0x61f861f8,0x61f861f8 - .type x0004000000000000,@object - .size x0004000000000000,8 -x0004000000000000: - .long 0x00000000,0x00040000 - .type x0000000000000004,@object - .size x0000000000000004,8 -x0000000000000004: - .long 0x00000004,0x00000000 - .align 8 - .type scratch1,@object - .size scratch1,8 -scratch1: - .long 0,0 - .align 8 - .type scratch3,@object - .size scratch3,8 -scratch3: - .long 0,0 - .align 8 - .type scratch5,@object - .size scratch5,8 -scratch5: - .long 0,0 - .align 8 - .type scratch7,@object - .size scratch7,8 -scratch7: - .long 0,0 - .type x0,@object - .size x0,8 -x0: - .long 0,0 - .align 8 -.text - .align 4 -.globl gst_idct_mmx_idct - .type gst_idct_mmx_idct,@function -gst_idct_mmx_idct: - pushl %ebp - movl %esp,%ebp - pushl %ebx - pushl %ecx - pushl %edx - pushl %esi - pushl %edi -#ifdef PIC - call here -here: popl %ebx - addl $_GLOBAL_OFFSET_TABLE_+[.-here],%ebx -#endif - movl 8(%ebp),%esi /* source matrix */ - movq (%esi), %mm0 - paddw pic_offset(x0000000000000004), %mm0 - movq 8(%esi), %mm1 - psllw $4, %mm0 - movq 16(%esi), %mm2 - psllw $4, %mm1 - movq 24(%esi), %mm3 - psllw $4, %mm2 - movq 32(%esi), %mm4 - psllw $4, %mm3 - movq 40(%esi), %mm5 - psllw $4, %mm4 - movq 48(%esi), %mm6 - psllw $4, %mm5 - movq 56(%esi), %mm7 - psllw $4, %mm6 - psllw $4, %mm7 - movq %mm0, (%esi) - movq %mm1, 8(%esi) - movq %mm2,16(%esi) - movq %mm3,24(%esi) - movq %mm4,32(%esi) - movq %mm5,40(%esi) - movq %mm6,48(%esi) - movq %mm7,56(%esi) - movq 64(%esi), %mm0 - movq 72(%esi), %mm1 - psllw $4, %mm0 - movq 80(%esi), %mm2 - psllw $4, %mm1 - movq 88(%esi), %mm3 - psllw $4, %mm2 - movq 96(%esi), %mm4 - psllw $4, %mm3 - movq 104(%esi), %mm5 - psllw $4, %mm4 - movq 112(%esi), %mm6 - psllw $4, %mm5 - movq 120(%esi), %mm7 - psllw $4, %mm6 - psllw $4, %mm7 - movq %mm0,64(%esi) - movq %mm1,72(%esi) - movq %mm2,80(%esi) - movq %mm3,88(%esi) - movq %mm4,96(%esi) - movq %mm5,104(%esi) - movq %mm6,112(%esi) - movq %mm7,120(%esi) - leal pic_offset(preSC), %ecx -/* column 0: even part - * use V4, V12, V0, V8 to produce V22..V25 - */ - movq 8*12(%ecx), %mm0 /* maybe the first mul can be done together */ - /* with the dequantization in iHuff module */ - pmulhw 8*12(%esi), %mm0 /* V12 */ - movq 8*4(%ecx), %mm1 - pmulhw 8*4(%esi), %mm1 /* V4 */ - movq (%ecx), %mm3 - psraw $1, %mm0 /* t64=t66 */ - pmulhw (%esi), %mm3 /* V0 */ - movq 8*8(%ecx), %mm5 /* duplicate V4 */ - movq %mm1, %mm2 /* added 11/1/96 */ - pmulhw 8*8(%esi),%mm5 /* V8 */ - psubsw %mm0, %mm1 /* V16 */ - pmulhw pic_offset(x5a825a825a825a82), %mm1 /* 23170 ->V18 */ - paddsw %mm0, %mm2 /* V17 */ - movq %mm2, %mm0 /* duplicate V17 */ - psraw $1, %mm2 /* t75=t82 */ - psraw $2, %mm0 /* t72 */ - movq %mm3, %mm4 /* duplicate V0 */ - paddsw %mm5, %mm3 /* V19 */ - psubsw %mm5, %mm4 /* V20 ;mm5 free */ -/* moved from the block below */ - movq 8*10(%ecx), %mm7 - psraw $1, %mm3 /* t74=t81 */ - movq %mm3, %mm6 /* duplicate t74=t81 */ - psraw $2, %mm4 /* t77=t79 */ - psubsw %mm0, %mm1 /* V21 ; mm0 free */ - paddsw %mm2, %mm3 /* V22 */ - movq %mm1, %mm5 /* duplicate V21 */ - paddsw %mm4, %mm1 /* V23 */ - movq %mm3, 8*4(%esi) /* V22 */ - psubsw %mm5, %mm4 /* V24; mm5 free */ - movq %mm1, 8*12(%esi) /* V23 */ - psubsw %mm2, %mm6 /* V25; mm2 free */ - movq %mm4, (%esi) /* V24 */ -/* keep mm6 alive all along the next block */ - /* movq %mm6, 8*8(%esi) V25 */ -/* column 0: odd part - * use V2, V6, V10, V14 to produce V31, V39, V40, V41 - */ -/* moved above: movq 8*10(%ecx), %mm7 */ - - pmulhw 8*10(%esi), %mm7 /* V10 */ - movq 8*6(%ecx), %mm0 - pmulhw 8*6(%esi), %mm0 /* V6 */ - movq 8*2(%ecx), %mm5 - movq %mm7, %mm3 /* duplicate V10 */ - pmulhw 8*2(%esi), %mm5 /* V2 */ - movq 8*14(%ecx), %mm4 - psubsw %mm0, %mm7 /* V26 */ - pmulhw 8*14(%esi), %mm4 /* V14 */ - paddsw %mm0, %mm3 /* V29 ; free mm0 */ - movq %mm7, %mm1 /* duplicate V26 */ - psraw $1, %mm3 /* t91=t94 */ - pmulhw pic_offset(x539f539f539f539f),%mm7 /* V33 */ - psraw $1, %mm1 /* t96 */ - movq %mm5, %mm0 /* duplicate V2 */ - psraw $2, %mm4 /* t85=t87 */ - paddsw %mm4,%mm5 /* V27 */ - psubsw %mm4, %mm0 /* V28 ; free mm4 */ - movq %mm0, %mm2 /* duplicate V28 */ - psraw $1, %mm5 /* t90=t93 */ - pmulhw pic_offset(x4546454645464546),%mm0 /* V35 */ - psraw $1, %mm2 /* t97 */ - movq %mm5, %mm4 /* duplicate t90=t93 */ - psubsw %mm2, %mm1 /* V32 ; free mm2 */ - pmulhw pic_offset(x61f861f861f861f8),%mm1 /* V36 */ - psllw $1, %mm7 /* t107 */ - paddsw %mm3, %mm5 /* V31 */ - psubsw %mm3, %mm4 /* V30 ; free mm3 */ - pmulhw pic_offset(x5a825a825a825a82),%mm4 /* V34 */ - nop - psubsw %mm1, %mm0 /* V38 */ - psubsw %mm7, %mm1 /* V37 ; free mm7 */ - psllw $1, %mm1 /* t114 */ -/* move from the next block */ - movq %mm6, %mm3 /* duplicate V25 */ -/* move from the next block */ - movq 8*4(%esi), %mm7 /* V22 */ - psllw $1, %mm0 /* t110 */ - psubsw %mm5, %mm0 /* V39 (mm5 needed for next block) */ - psllw $2, %mm4 /* t112 */ -/* moved from the next block */ - movq 8*12(%esi), %mm2 /* V23 */ - psubsw %mm0, %mm4 /* V40 */ - paddsw %mm4, %mm1 /* V41; free mm0 */ -/* moved from the next block */ - psllw $1, %mm2 /* t117=t125 */ -/* column 0: output butterfly */ -/* moved above: - * movq %mm6, %mm3 duplicate V25 - * movq 8*4(%esi), %mm7 V22 - * movq 8*12(%esi), %mm2 V23 - * psllw $1, %mm2 t117=t125 - */ - psubsw %mm1, %mm6 /* tm6 */ - paddsw %mm1, %mm3 /* tm8; free mm1 */ - movq %mm7, %mm1 /* duplicate V22 */ - paddsw %mm5, %mm7 /* tm0 */ - movq %mm3, 8*8(%esi) /* tm8; free mm3 */ - psubsw %mm5, %mm1 /* tm14; free mm5 */ - movq %mm6, 8*6(%esi) /* tm6; free mm6 */ - movq %mm2, %mm3 /* duplicate t117=t125 */ - movq (%esi), %mm6 /* V24 */ - paddsw %mm0, %mm2 /* tm2 */ - movq %mm7, (%esi) /* tm0; free mm7 */ - psubsw %mm0, %mm3 /* tm12; free mm0 */ - movq %mm1, 8*14(%esi) /* tm14; free mm1 */ - psllw $1, %mm6 /* t119=t123 */ - movq %mm2, 8*2(%esi) /* tm2; free mm2 */ - movq %mm6, %mm0 /* duplicate t119=t123 */ - movq %mm3, 8*12(%esi) /* tm12; free mm3 */ - paddsw %mm4, %mm6 /* tm4 */ -/* moved from next block */ - movq 8*5(%ecx), %mm1 - psubsw %mm4, %mm0 /* tm10; free mm4 */ -/* moved from next block */ - pmulhw 8*5(%esi), %mm1 /* V5 */ - movq %mm6, 8*4(%esi) /* tm4; free mm6 */ - movq %mm0, 8*10(%esi) /* tm10; free mm0 */ -/* column 1: even part - * use V5, V13, V1, V9 to produce V56..V59 - */ -/* moved to prev block: - * movq 8*5(%ecx), %mm1 - * pmulhw 8*5(%esi), %mm1 V5 - */ - movq 8*13(%ecx), %mm7 - psllw $1, %mm1 /* t128=t130 */ - pmulhw 8*13(%esi), %mm7 /* V13 */ - movq %mm1, %mm2 /* duplicate t128=t130 */ - movq 8(%ecx), %mm3 - pmulhw 8(%esi), %mm3 /* V1 */ - movq 8*9(%ecx), %mm5 - psubsw %mm7, %mm1 /* V50 */ - pmulhw 8*9(%esi), %mm5 /* V9 */ - paddsw %mm7, %mm2 /* V51 */ - pmulhw pic_offset(x5a825a825a825a82), %mm1 /* 23170 ->V52 */ - movq %mm2, %mm6 /* duplicate V51 */ - psraw $1, %mm2 /* t138=t144 */ - movq %mm3, %mm4 /* duplicate V1 */ - psraw $2, %mm6 /* t136 */ - paddsw %mm5, %mm3 /* V53 */ - psubsw %mm5, %mm4 /* V54 ;mm5 free */ - movq %mm3, %mm7 /* duplicate V53 */ -/* moved from next block */ - movq 8*11(%ecx), %mm0 - psraw $1, %mm4 /* t140=t142 */ - psubsw %mm6, %mm1 /* V55 ; mm6 free */ - paddsw %mm2, %mm3 /* V56 */ - movq %mm4, %mm5 /* duplicate t140=t142 */ - paddsw %mm1, %mm4 /* V57 */ - movq %mm3, 8*5(%esi) /* V56 */ - psubsw %mm1, %mm5 /* V58; mm1 free */ - movq %mm4, 8*13(%esi) /* V57 */ - psubsw %mm2, %mm7 /* V59; mm2 free */ - movq %mm5, 8*9(%esi) /* V58 */ -/* keep mm7 alive all along the next block - * movq %mm7, 8(%esi) V59 - * moved above - * movq 8*11(%ecx), %mm0 - */ - pmulhw 8*11(%esi), %mm0 /* V11 */ - movq 8*7(%ecx), %mm6 - pmulhw 8*7(%esi), %mm6 /* V7 */ - movq 8*15(%ecx), %mm4 - movq %mm0, %mm3 /* duplicate V11 */ - pmulhw 8*15(%esi), %mm4 /* V15 */ - movq 8*3(%ecx), %mm5 - psllw $1, %mm6 /* t146=t152 */ - pmulhw 8*3(%esi), %mm5 /* V3 */ - paddsw %mm6, %mm0 /* V63 */ -/* note that V15 computation has a correction step: - * this is a 'magic' constant that rebiases the results to be closer to the - * expected result. this magic constant can be refined to reduce the error - * even more by doing the correction step in a later stage when the number - * is actually multiplied by 16 - */ - paddw pic_offset(x0005000200010001), %mm4 - psubsw %mm6, %mm3 /* V60 ; free mm6 */ - psraw $1, %mm0 /* t154=t156 */ - movq %mm3, %mm1 /* duplicate V60 */ - pmulhw pic_offset(x539f539f539f539f), %mm1 /* V67 */ - movq %mm5, %mm6 /* duplicate V3 */ - psraw $2, %mm4 /* t148=t150 */ - paddsw %mm4, %mm5 /* V61 */ - psubsw %mm4, %mm6 /* V62 ; free mm4 */ - movq %mm5, %mm4 /* duplicate V61 */ - psllw $1, %mm1 /* t169 */ - paddsw %mm0, %mm5 /* V65 -> result */ - psubsw %mm0, %mm4 /* V64 ; free mm0 */ - pmulhw pic_offset(x5a825a825a825a82), %mm4 /* V68 */ - psraw $1, %mm3 /* t158 */ - psubsw %mm6, %mm3 /* V66 */ - movq %mm5, %mm2 /* duplicate V65 */ - pmulhw pic_offset(x61f861f861f861f8), %mm3 /* V70 */ - psllw $1, %mm6 /* t165 */ - pmulhw pic_offset(x4546454645464546), %mm6 /* V69 */ - psraw $1, %mm2 /* t172 */ -/* moved from next block */ - movq 8*5(%esi), %mm0 /* V56 */ - psllw $1, %mm4 /* t174 */ -/* moved from next block */ - psraw $1, %mm0 /* t177=t188 */ - nop - psubsw %mm3, %mm6 /* V72 */ - psubsw %mm1, %mm3 /* V71 ; free mm1 */ - psubsw %mm2, %mm6 /* V73 ; free mm2 */ -/* moved from next block */ - psraw $1, %mm5 /* t178=t189 */ - psubsw %mm6, %mm4 /* V74 */ -/* moved from next block */ - movq %mm0, %mm1 /* duplicate t177=t188 */ - paddsw %mm4, %mm3 /* V75 */ -/* moved from next block */ - paddsw %mm5, %mm0 /* tm1 */ -/* location - * 5 - V56 - * 13 - V57 - * 9 - V58 - * X - V59, mm7 - * X - V65, mm5 - * X - V73, mm6 - * X - V74, mm4 - * X - V75, mm3 - * free mm0, mm1 & mm2 - * moved above - * movq 8*5(%esi), %mm0 V56 - * psllw $1, %mm0 t177=t188 ! new !! - * psllw $1, %mm5 t178=t189 ! new !! - * movq %mm0, %mm1 duplicate t177=t188 - * paddsw %mm5, %mm0 tm1 - */ - movq 8*13(%esi), %mm2 /* V57 */ - psubsw %mm5, %mm1 /* tm15; free mm5 */ - movq %mm0, 8(%esi) /* tm1; free mm0 */ - psraw $1, %mm7 /* t182=t184 ! new !! */ -/* save the store as used directly in the transpose - * movq %mm1, 120(%esi) tm15; free mm1 - */ - movq %mm7, %mm5 /* duplicate t182=t184 */ - psubsw %mm3, %mm7 /* tm7 */ - paddsw %mm3, %mm5 /* tm9; free mm3 */ - movq 8*9(%esi), %mm0 /* V58 */ - movq %mm2, %mm3 /* duplicate V57 */ - movq %mm7, 8*7(%esi) /* tm7; free mm7 */ - psubsw %mm6, %mm3 /* tm13 */ - paddsw %mm6, %mm2 /* tm3 ; free mm6 */ -/* moved up from the transpose */ - movq %mm3, %mm7 -/* moved up from the transpose */ - punpcklwd %mm1, %mm3 - movq %mm0, %mm6 /* duplicate V58 */ - movq %mm2, 8*3(%esi) /* tm3; free mm2 */ - paddsw %mm4, %mm0 /* tm5 */ - psubsw %mm4, %mm6 /* tm11; free mm4 */ -/* moved up from the transpose */ - punpckhwd %mm1, %mm7 - movq %mm0, 8*5(%esi) /* tm5; free mm0 */ -/* moved up from the transpose */ - movq %mm5, %mm2 -/* transpose - M4 part - * --------- --------- - * | M1 | M2 | | M1'| M3'| - * --------- --> --------- - * | M3 | M4 | | M2'| M4'| - * --------- --------- - * Two alternatives: use full mmword approach so the following code can be - * scheduled before the transpose is done without stores, or use the faster - * half mmword stores (when possible) - */ - movd %mm3, 8*9+4(%esi) /* MS part of tmt9 */ - punpcklwd %mm6, %mm5 - movd %mm7, 8*13+4(%esi) /* MS part of tmt13 */ - punpckhwd %mm6, %mm2 - movd %mm5, 8*9(%esi) /* LS part of tmt9 */ - punpckhdq %mm3, %mm5 /* free mm3 */ - movd %mm2, 8*13(%esi) /* LS part of tmt13 */ - punpckhdq %mm7, %mm2 /* free mm7 */ -/* moved up from the M3 transpose */ - movq 8*8(%esi), %mm0 -/* moved up from the M3 transpose */ - movq 8*10(%esi), %mm1 -/* moved up from the M3 transpose */ - movq %mm0, %mm3 -/* shuffle the rest of the data, and write it with 2 mmword writes */ - movq %mm5, 8*11(%esi) /* tmt11 */ -/* moved up from the M3 transpose */ - punpcklwd %mm1, %mm0 - movq %mm2, 8*15(%esi) /* tmt15 */ -/* moved up from the M3 transpose */ - punpckhwd %mm1, %mm3 -/* transpose - M3 part - * moved up to previous code section - * movq 8*8(%esi), %mm0 - * movq 8*10(%esi), %mm1 - * movq %mm0, %mm3 - * punpcklwd %mm1, %mm0 - * punpckhwd %mm1, %mm3 - */ - movq 8*12(%esi), %mm6 - movq 8*14(%esi), %mm4 - movq %mm6, %mm2 -/* shuffle the data and write the lower parts of the transposed in 4 dwords */ - punpcklwd %mm4, %mm6 - movq %mm0, %mm1 - punpckhdq %mm6, %mm1 - movq %mm3, %mm7 - punpckhwd %mm4, %mm2 /* free mm4 */ - punpckldq %mm6, %mm0 /* free mm6 */ -/* moved from next block */ - movq 8*13(%esi), %mm4 /* tmt13 */ - punpckldq %mm2, %mm3 - punpckhdq %mm2, %mm7 /* free mm2 */ -/* moved from next block */ - movq %mm3, %mm5 /* duplicate tmt5 */ -/* column 1: even part (after transpose) -* moved above -* movq %mm3, %mm5 duplicate tmt5 -* movq 8*13(%esi), %mm4 tmt13 -*/ - psubsw %mm4, %mm3 /* V134 */ - pmulhw pic_offset(x5a825a825a825a82), %mm3 /* 23170 ->V136 */ - movq 8*9(%esi), %mm6 /* tmt9 */ - paddsw %mm4, %mm5 /* V135 ; mm4 free */ - movq %mm0, %mm4 /* duplicate tmt1 */ - paddsw %mm6, %mm0 /* V137 */ - psubsw %mm6, %mm4 /* V138 ; mm6 free */ - psllw $2, %mm3 /* t290 */ - psubsw %mm5, %mm3 /* V139 */ - movq %mm0, %mm6 /* duplicate V137 */ - paddsw %mm5, %mm0 /* V140 */ - movq %mm4, %mm2 /* duplicate V138 */ - paddsw %mm3, %mm2 /* V141 */ - psubsw %mm3, %mm4 /* V142 ; mm3 free */ - movq %mm0, 8*9(%esi) /* V140 */ - psubsw %mm5, %mm6 /* V143 ; mm5 free */ -/* moved from next block */ - movq 8*11(%esi), %mm0 /* tmt11 */ - movq %mm2, 8*13(%esi) /* V141 */ -/* moved from next block */ - movq %mm0, %mm2 /* duplicate tmt11 */ -/* column 1: odd part (after transpose) */ -/* moved up to the prev block - * movq 8*11(%esi), %mm0 tmt11 - * movq %mm0, %mm2 duplicate tmt11 - */ - movq 8*15(%esi), %mm5 /* tmt15 */ - psubsw %mm7, %mm0 /* V144 */ - movq %mm0, %mm3 /* duplicate V144 */ - paddsw %mm7, %mm2 /* V147 ; free mm7 */ - pmulhw pic_offset(x539f539f539f539f), %mm0 /* 21407-> V151 */ - movq %mm1, %mm7 /* duplicate tmt3 */ - paddsw %mm5, %mm7 /* V145 */ - psubsw %mm5, %mm1 /* V146 ; free mm5 */ - psubsw %mm1, %mm3 /* V150 */ - movq %mm7, %mm5 /* duplicate V145 */ - pmulhw pic_offset(x4546454645464546), %mm1 /* 17734-> V153 */ - psubsw %mm2, %mm5 /* V148 */ - pmulhw pic_offset(x61f861f861f861f8), %mm3 /* 25080-> V154 */ - psllw $2, %mm0 /* t311 */ - pmulhw pic_offset(x5a825a825a825a82), %mm5 /* 23170-> V152 */ - paddsw %mm2, %mm7 /* V149 ; free mm2 */ - psllw $1, %mm1 /* t313 */ - nop /* without the nop - freeze here for one clock */ - movq %mm3, %mm2 /* duplicate V154 */ - psubsw %mm0, %mm3 /* V155 ; free mm0 */ - psubsw %mm2, %mm1 /* V156 ; free mm2 */ -/* moved from the next block */ - movq %mm6, %mm2 /* duplicate V143 */ -/* moved from the next block */ - movq 8*13(%esi), %mm0 /* V141 */ - psllw $1, %mm1 /* t315 */ - psubsw %mm7, %mm1 /* V157 (keep V149) */ - psllw $2, %mm5 /* t317 */ - psubsw %mm1, %mm5 /* V158 */ - psllw $1, %mm3 /* t319 */ - paddsw %mm5, %mm3 /* V159 */ -/* column 1: output butterfly (after transform) - * moved to the prev block - * movq %mm6, %mm2 duplicate V143 - * movq 8*13(%esi), %mm0 V141 - */ - psubsw %mm3, %mm2 /* V163 */ - paddsw %mm3, %mm6 /* V164 ; free mm3 */ - movq %mm4, %mm3 /* duplicate V142 */ - psubsw %mm5, %mm4 /* V165 ; free mm5 */ - movq %mm2, pic_offset(scratch7) /* out7 */ - psraw $4, %mm6 - psraw $4, %mm4 - paddsw %mm5, %mm3 /* V162 */ - movq 8*9(%esi), %mm2 /* V140 */ - movq %mm0, %mm5 /* duplicate V141 */ -/* in order not to perculate this line up, - * we read 72(%esi) very near to this location - */ - movq %mm6, 8*9(%esi) /* out9 */ - paddsw %mm1, %mm0 /* V161 */ - movq %mm3, pic_offset(scratch5) /* out5 */ - psubsw %mm1, %mm5 /* V166 ; free mm1 */ - movq %mm4, 8*11(%esi) /* out11 */ - psraw $4, %mm5 - movq %mm0, pic_offset(scratch3) /* out3 */ - movq %mm2, %mm4 /* duplicate V140 */ - movq %mm5, 8*13(%esi) /* out13 */ - paddsw %mm7, %mm2 /* V160 */ -/* moved from the next block */ - movq 8(%esi), %mm0 - psubsw %mm7, %mm4 /* V167 ; free mm7 */ -/* moved from the next block */ - movq 8*3(%esi), %mm7 - psraw $4, %mm4 - movq %mm2, pic_offset(scratch1) /* out1 */ -/* moved from the next block */ - movq %mm0, %mm1 - movq %mm4, 8*15(%esi) /* out15 */ -/* moved from the next block */ - punpcklwd %mm7, %mm0 -/* transpose - M2 parts - * moved up to the prev block - * movq 8(%esi), %mm0 - * movq 8*3(%esi), %mm7 - * movq %mm0, %mm1 - * punpcklwd %mm7, %mm0 - */ - movq 8*5(%esi), %mm5 - punpckhwd %mm7, %mm1 - movq 8*7(%esi), %mm4 - movq %mm5, %mm3 -/* shuffle the data and write the lower parts of the trasposed in 4 dwords */ - movd %mm0, 8*8(%esi) /* LS part of tmt8 */ - punpcklwd %mm4, %mm5 - movd %mm1, 8*12(%esi) /* LS part of tmt12 */ - punpckhwd %mm4, %mm3 - movd %mm5, 8*8+4(%esi) /* MS part of tmt8 */ - punpckhdq %mm5, %mm0 /* tmt10 */ - movd %mm3, 8*12+4(%esi) /* MS part of tmt12 */ - punpckhdq %mm3, %mm1 /* tmt14 */ -/* transpose - M1 parts */ - movq (%esi), %mm7 - movq 8*2(%esi), %mm2 - movq %mm7, %mm6 - movq 8*4(%esi), %mm5 - punpcklwd %mm2, %mm7 - movq 8*6(%esi), %mm4 - punpckhwd %mm2, %mm6 /* free mm2 */ - movq %mm5, %mm3 - punpcklwd %mm4, %mm5 - punpckhwd %mm4, %mm3 /* free mm4 */ - movq %mm7, %mm2 - movq %mm6, %mm4 - punpckldq %mm5, %mm7 /* tmt0 */ - punpckhdq %mm5, %mm2 /* tmt2 ; free mm5 */ -/* shuffle the rest of the data, and write it with 2 mmword writes */ - punpckldq %mm3, %mm6 /* tmt4 */ -/* moved from next block */ - movq %mm2, %mm5 /* duplicate tmt2 */ - punpckhdq %mm3, %mm4 /* tmt6 ; free mm3 */ -/* moved from next block */ - movq %mm0, %mm3 /* duplicate tmt10 */ -/* column 0: odd part (after transpose) - *moved up to prev block - * movq %mm0, %mm3 duplicate tmt10 - * movq %mm2, %mm5 duplicate tmt2 - */ - psubsw %mm4, %mm0 /* V110 */ - paddsw %mm4, %mm3 /* V113 ; free mm4 */ - movq %mm0, %mm4 /* duplicate V110 */ - paddsw %mm1, %mm2 /* V111 */ - pmulhw pic_offset(x539f539f539f539f), %mm0 /* 21407-> V117 */ - psubsw %mm1, %mm5 /* V112 ; free mm1 */ - psubsw %mm5, %mm4 /* V116 */ - movq %mm2, %mm1 /* duplicate V111 */ - pmulhw pic_offset(x4546454645464546), %mm5 /* 17734-> V119 */ - psubsw %mm3, %mm2 /* V114 */ - pmulhw pic_offset(x61f861f861f861f8), %mm4 /* 25080-> V120 */ - paddsw %mm3, %mm1 /* V115 ; free mm3 */ - pmulhw pic_offset(x5a825a825a825a82), %mm2 /* 23170-> V118 */ - psllw $2, %mm0 /* t266 */ - movq %mm1, (%esi) /* save V115 */ - psllw $1, %mm5 /* t268 */ - psubsw %mm4, %mm5 /* V122 */ - psubsw %mm0, %mm4 /* V121 ; free mm0 */ - psllw $1, %mm5 /* t270 */ - psubsw %mm1, %mm5 /* V123 ; free mm1 */ - psllw $2, %mm2 /* t272 */ - psubsw %mm5, %mm2 /* V124 (keep V123) */ - psllw $1, %mm4 /* t274 */ - movq %mm5, 8*2(%esi) /* save V123 ; free mm5 */ - paddsw %mm2, %mm4 /* V125 (keep V124) */ -/* column 0: even part (after transpose) */ - movq 8*12(%esi), %mm0 /* tmt12 */ - movq %mm6, %mm3 /* duplicate tmt4 */ - psubsw %mm0, %mm6 /* V100 */ - paddsw %mm0, %mm3 /* V101 ; free mm0 */ - pmulhw pic_offset(x5a825a825a825a82), %mm6 /* 23170 ->V102 */ - movq %mm7, %mm5 /* duplicate tmt0 */ - movq 8*8(%esi), %mm1 /* tmt8 */ - paddsw %mm1, %mm7 /* V103 */ - psubsw %mm1, %mm5 /* V104 ; free mm1 */ - movq %mm7, %mm0 /* duplicate V103 */ - psllw $2, %mm6 /* t245 */ - paddsw %mm3, %mm7 /* V106 */ - movq %mm5, %mm1 /* duplicate V104 */ - psubsw %mm3, %mm6 /* V105 */ - psubsw %mm3, %mm0 /* V109; free mm3 */ - paddsw %mm6, %mm5 /* V107 */ - psubsw %mm6, %mm1 /* V108 ; free mm6 */ -/* column 0: output butterfly (after transform) */ - movq %mm1, %mm3 /* duplicate V108 */ - paddsw %mm2, %mm1 /* out4 */ - psraw $4, %mm1 - psubsw %mm2, %mm3 /* out10 ; free mm2 */ - psraw $4, %mm3 - movq %mm0, %mm6 /* duplicate V109 */ - movq %mm1, 8*4(%esi) /* out4 ; free mm1 */ - psubsw %mm4, %mm0 /* out6 */ - movq %mm3, 8*10(%esi) /* out10 ; free mm3 */ - psraw $4, %mm0 - paddsw %mm4, %mm6 /* out8 ; free mm4 */ - movq %mm7, %mm1 /* duplicate V106 */ - movq %mm0, 8*6(%esi) /* out6 ; free mm0 */ - psraw $4, %mm6 - movq (%esi), %mm4 /* V115 */ - movq %mm6, 8*8(%esi) /* out8 ; free mm6 */ - movq %mm5, %mm2 /* duplicate V107 */ - movq 8*2(%esi), %mm3 /* V123 */ - paddsw %mm4, %mm7 /* out0 */ -/* moved up from next block */ - movq pic_offset(scratch3), %mm0 - psraw $4, %mm7 -/* moved up from next block */ - movq pic_offset(scratch5), %mm6 - psubsw %mm4, %mm1 /* out14 ; free mm4 */ - paddsw %mm3, %mm5 /* out2 */ - psraw $4, %mm1 - movq %mm7, (%esi) /* out0 ; free mm7 */ - psraw $4, %mm5 - movq %mm1, 8*14(%esi) /* out14 ; free mm1 */ - psubsw %mm3, %mm2 /* out12 ; free mm3 */ - movq %mm5, 8*2(%esi) /* out2 ; free mm5 */ - psraw $4, %mm2 -/* moved up to the prev block */ - movq pic_offset(scratch7), %mm4 -/* moved up to the prev block */ - psraw $4, %mm0 - movq %mm2, 8*12(%esi) /* out12 ; free mm2 */ -/* moved up to the prev block */ - psraw $4, %mm6 -/* move back the data to its correct place -* moved up to the prev block - * movq pic_offset(scratch3), %mm0 - * movq pic_offset(scratch5), %mm6 - * movq pic_offset(scratch7), %mm4 - * psraw $4, %mm0 - * psraw $4, %mm6 -*/ - movq pic_offset(scratch1), %mm1 - psraw $4, %mm4 - movq %mm0, 8*3(%esi) /* out3 */ - psraw $4, %mm1 - movq %mm6, 8*5(%esi) /* out5 */ - movq %mm4, 8*7(%esi) /* out7 */ - movq %mm1, 8(%esi) /* out1 */ - emms - popl %edi - popl %esi - popl %edx - popl %ecx - popl %ebx - movl %ebp,%esp - popl %ebp - ret -.Lfe1: - .size gst_idct_mmx_idct,.Lfe1-gst_idct_mmx_idct diff --git a/gst-libs/gst/idct/sseidct.S b/gst-libs/gst/idct/sseidct.S deleted file mode 100644 index 99cda4f2e6..0000000000 --- a/gst-libs/gst/idct/sseidct.S +++ /dev/null @@ -1,740 +0,0 @@ -.data - .align 4 - .type rounder0,@object -rounder0: - .long 65536 - .long 65536 - .size rounder0,8 - .align 4 - .type rounder4,@object -rounder4: - .long 1024 - .long 1024 - .size rounder4,8 - .align 4 - .type rounder1,@object -rounder1: - .long 3597 - .long 3597 - .size rounder1,8 - .align 4 - .type rounder7,@object -rounder7: - .long 512 - .long 512 - .size rounder7,8 - .align 4 - .type rounder2,@object -rounder2: - .long 2260 - .long 2260 - .size rounder2,8 - .align 4 - .type rounder6,@object -rounder6: - .long 512 - .long 512 - .size rounder6,8 - .align 4 - .type rounder3,@object -rounder3: - .long 1203 - .long 1203 - .size rounder3,8 - .align 4 - .type rounder5,@object -rounder5: - .long 120 - .long 120 - .size rounder5,8 - .align 2 - .type _T1.46,@object -_T1.46: - .value 13036 - .value 13036 - .value 13036 - .value 13036 - .align 2 - .type _T2.47,@object -_T2.47: - .value 27146 - .value 27146 - .value 27146 - .value 27146 - .align 2 - .type _T3.48,@object -_T3.48: - .value -21746 - .value -21746 - .value -21746 - .value -21746 - .align 2 - .type _C4.49,@object -_C4.49: - .value 23170 - .value 23170 - .value 23170 - .value 23170 - .local scratch0.50 - .comm scratch0.50,8,4 - .local scratch1.51 - .comm scratch1.51,8,4 - .align 2 - .type table04.54,@object -table04.54: - .value 16384 - .value 21407 - .value -16384 - .value -21407 - .value 16384 - .value 8867 - .value 16384 - .value 8867 - .value 22725 - .value 19266 - .value -22725 - .value -12873 - .value 12873 - .value 4520 - .value 19266 - .value -4520 - .value 16384 - .value -8867 - .value 16384 - .value -8867 - .value -16384 - .value 21407 - .value 16384 - .value -21407 - .value 12873 - .value -22725 - .value 19266 - .value -22725 - .value 4520 - .value 19266 - .value 4520 - .value -12873 - .align 2 - .type table17.55,@object -table17.55: - .value 22725 - .value 29692 - .value -22725 - .value -29692 - .value 22725 - .value 12299 - .value 22725 - .value 12299 - .value 31521 - .value 26722 - .value -31521 - .value -17855 - .value 17855 - .value 6270 - .value 26722 - .value -6270 - .value 22725 - .value -12299 - .value 22725 - .value -12299 - .value -22725 - .value 29692 - .value 22725 - .value -29692 - .value 17855 - .value -31521 - .value 26722 - .value -31521 - .value 6270 - .value 26722 - .value 6270 - .value -17855 - .align 2 - .type table26.56,@object -table26.56: - .value 21407 - .value 27969 - .value -21407 - .value -27969 - .value 21407 - .value 11585 - .value 21407 - .value 11585 - .value 29692 - .value 25172 - .value -29692 - .value -16819 - .value 16819 - .value 5906 - .value 25172 - .value -5906 - .value 21407 - .value -11585 - .value 21407 - .value -11585 - .value -21407 - .value 27969 - .value 21407 - .value -27969 - .value 16819 - .value -29692 - .value 25172 - .value -29692 - .value 5906 - .value 25172 - .value 5906 - .value -16819 - .align 2 - .type table35.57,@object -table35.57: - .value 19266 - .value 25172 - .value -19266 - .value -25172 - .value 19266 - .value 10426 - .value 19266 - .value 10426 - .value 26722 - .value 22654 - .value -26722 - .value -15137 - .value 15137 - .value 5315 - .value 22654 - .value -5315 - .value 19266 - .value -10426 - .value 19266 - .value -10426 - .value -19266 - .value 25172 - .value 19266 - .value -25172 - .value 15137 - .value -26722 - .value 22654 - .value -26722 - .value 5315 - .value 22654 - .value 5315 - .value -15137 -.text - .align 4 -.globl gst_idct_sse_idct - .type gst_idct_sse_idct,@function -gst_idct_sse_idct: - subl $8,%esp - pushl %ebp - pushl %edi - pushl %esi - pushl %ebx - call .L51 -.L51: - popl %ebx - addl $_GLOBAL_OFFSET_TABLE_+[.-.L51],%ebx - movl 28(%esp),%edx - leal table04.54@GOTOFF(%ebx),%eax - movq (%edx), %mm2 - movq 8(%edx), %mm5 - movq %mm2, %mm0 - movq (%eax), %mm3 - movq %mm5, %mm6 - movq 8(%eax), %mm4 - pmaddwd %mm0, %mm3 - pshufw $78, %mm2, %mm2 - leal rounder0@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - movq 64(%edx), %mm2 - psrad $11, %mm0 - movq 72(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, (%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 8(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder4@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - leal table17.55@GOTOFF(%ebx),%eax - movq 16(%edx), %mm2 - psrad $11, %mm0 - movq 24(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 64(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 72(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder1@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - movq 112(%edx), %mm2 - psrad $11, %mm0 - movq 120(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 16(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 24(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder7@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - leal table26.56@GOTOFF(%ebx),%eax - movq 32(%edx), %mm2 - psrad $11, %mm0 - movq 40(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 112(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 120(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder2@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - movq 96(%edx), %mm2 - psrad $11, %mm0 - movq 104(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 32(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 40(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder6@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - leal table35.57@GOTOFF(%ebx),%eax - movq 48(%edx), %mm2 - psrad $11, %mm0 - movq 56(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 96(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 104(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder3@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - movq 80(%edx), %mm2 - psrad $11, %mm0 - movq 88(%edx), %mm5 - psrad $11, %mm4 - packssdw %mm0, %mm1 - movq %mm5, %mm6 - packssdw %mm3, %mm4 - movq %mm2, %mm0 - movq %mm1, 48(%edx) - pshufw $177, %mm4, %mm4 - movq (%eax), %mm3 - movq %mm4, 56(%edx) - pmaddwd %mm0, %mm3 - movq 8(%eax), %mm4 - pshufw $78, %mm2, %mm2 - leal rounder5@GOTOFF(%ebx),%ecx - movq 16(%eax), %mm1 - pmaddwd %mm2, %mm4 - pmaddwd 32(%eax), %mm0 - pshufw $78, %mm6, %mm6 - movq 24(%eax), %mm7 - pmaddwd %mm5, %mm1 - paddd (%ecx), %mm3 - pmaddwd %mm6, %mm7 - pmaddwd 40(%eax), %mm2 - paddd %mm4, %mm3 - pmaddwd 48(%eax), %mm5 - movq %mm3, %mm4 - pmaddwd 56(%eax), %mm6 - paddd %mm7, %mm1 - paddd (%ecx), %mm0 - psubd %mm1, %mm3 - psrad $11, %mm3 - paddd %mm4, %mm1 - paddd %mm2, %mm0 - psrad $11, %mm1 - paddd %mm6, %mm5 - movq %mm0, %mm4 - paddd %mm5, %mm0 - psubd %mm5, %mm4 - psrad $11, %mm0 - psrad $11, %mm4 - packssdw %mm0, %mm1 - packssdw %mm3, %mm4 - movq %mm1, 80(%edx) - pshufw $177, %mm4, %mm4 - movq %mm4, 88(%edx) - leal _T1.46@GOTOFF(%ebx),%edi - movq (%edi), %mm0 - movq 16(%edx), %mm1 - movq %mm0, %mm2 - movq 112(%edx), %mm4 - pmulhw %mm1, %mm0 - leal _T3.48@GOTOFF(%ebx),%esi - movl %esi,16(%esp) - movq (%esi), %mm5 - pmulhw %mm4, %mm2 - movq 80(%edx), %mm6 - movq %mm5, %mm7 - movq 48(%edx), %mm3 - psubsw %mm4, %mm0 - leal _T2.47@GOTOFF(%ebx),%ecx - movq (%ecx), %mm4 - pmulhw %mm3, %mm5 - paddsw %mm2, %mm1 - pmulhw %mm6, %mm7 - movq %mm4, %mm2 - paddsw %mm3, %mm5 - pmulhw 32(%edx), %mm4 - paddsw %mm6, %mm7 - psubsw %mm6, %mm5 - paddsw %mm3, %mm7 - movq 96(%edx), %mm3 - movq %mm0, %mm6 - pmulhw %mm3, %mm2 - psubsw %mm5, %mm0 - psubsw %mm3, %mm4 - paddsw %mm6, %mm5 - leal scratch0.50@GOTOFF(%ebx),%esi - movl %esi,20(%esp) - movq %mm0, scratch0.50@GOTOFF(%ebx) - movq %mm1, %mm6 - paddsw 32(%edx), %mm2 - paddsw %mm7, %mm6 - psubsw %mm7, %mm1 - movq %mm1, %mm7 - movq (%edx), %mm3 - paddsw %mm5, %mm1 - leal _C4.49@GOTOFF(%ebx),%eax - movq (%eax), %mm0 - psubsw %mm5, %mm7 - leal scratch1.51@GOTOFF(%ebx),%ebp - movq %mm6, scratch1.51@GOTOFF(%ebx) - pmulhw %mm0, %mm1 - movq %mm4, %mm6 - pmulhw %mm0, %mm7 - movq 64(%edx), %mm5 - movq %mm3, %mm0 - psubsw %mm5, %mm3 - paddsw %mm5, %mm0 - paddsw %mm3, %mm4 - movq %mm0, %mm5 - psubsw %mm6, %mm3 - paddsw %mm2, %mm5 - paddsw %mm1, %mm1 - psubsw %mm2, %mm0 - paddsw %mm7, %mm7 - movq %mm3, %mm2 - movq %mm4, %mm6 - paddsw %mm7, %mm3 - psraw $6, %mm3 - paddsw %mm1, %mm4 - psraw $6, %mm4 - psubsw %mm1, %mm6 - movq (%ebp), %mm1 - psubsw %mm7, %mm2 - psraw $6, %mm6 - movq %mm5, %mm7 - movq %mm4, 16(%edx) - psraw $6, %mm2 - movq %mm3, 32(%edx) - paddsw %mm1, %mm5 - movq (%esi), %mm4 - psubsw %mm1, %mm7 - psraw $6, %mm5 - movq %mm0, %mm3 - movq %mm2, 80(%edx) - psubsw %mm4, %mm3 - psraw $6, %mm7 - paddsw %mm0, %mm4 - movq %mm5, (%edx) - psraw $6, %mm3 - movq %mm6, 96(%edx) - psraw $6, %mm4 - movq %mm7, 112(%edx) - movq %mm3, 64(%edx) - movq %mm4, 48(%edx) - movq (%edi), %mm0 - movq 24(%edx), %mm1 - movq %mm0, %mm2 - movq 120(%edx), %mm4 - pmulhw %mm1, %mm0 - movl 16(%esp),%esi - movq (%esi), %mm5 - pmulhw %mm4, %mm2 - movq 88(%edx), %mm6 - movq %mm5, %mm7 - movq 56(%edx), %mm3 - psubsw %mm4, %mm0 - movq (%ecx), %mm4 - pmulhw %mm3, %mm5 - paddsw %mm2, %mm1 - pmulhw %mm6, %mm7 - movq %mm4, %mm2 - paddsw %mm3, %mm5 - pmulhw 40(%edx), %mm4 - paddsw %mm6, %mm7 - psubsw %mm6, %mm5 - paddsw %mm3, %mm7 - movq 104(%edx), %mm3 - movq %mm0, %mm6 - pmulhw %mm3, %mm2 - psubsw %mm5, %mm0 - psubsw %mm3, %mm4 - paddsw %mm6, %mm5 - movq %mm0, scratch0.50@GOTOFF(%ebx) - movq %mm1, %mm6 - paddsw 40(%edx), %mm2 - paddsw %mm7, %mm6 - psubsw %mm7, %mm1 - movq %mm1, %mm7 - movq 8(%edx), %mm3 - paddsw %mm5, %mm1 - movq (%eax), %mm0 - psubsw %mm5, %mm7 - movq %mm6, scratch1.51@GOTOFF(%ebx) - pmulhw %mm0, %mm1 - movq %mm4, %mm6 - pmulhw %mm0, %mm7 - movq 72(%edx), %mm5 - movq %mm3, %mm0 - psubsw %mm5, %mm3 - paddsw %mm5, %mm0 - paddsw %mm3, %mm4 - movq %mm0, %mm5 - psubsw %mm6, %mm3 - paddsw %mm2, %mm5 - paddsw %mm1, %mm1 - psubsw %mm2, %mm0 - paddsw %mm7, %mm7 - movq %mm3, %mm2 - movq %mm4, %mm6 - paddsw %mm7, %mm3 - psraw $6, %mm3 - paddsw %mm1, %mm4 - psraw $6, %mm4 - psubsw %mm1, %mm6 - movq (%ebp), %mm1 - psubsw %mm7, %mm2 - psraw $6, %mm6 - movq %mm5, %mm7 - movq %mm4, 24(%edx) - psraw $6, %mm2 - movq %mm3, 40(%edx) - paddsw %mm1, %mm5 - movl 20(%esp),%esi - movq (%esi), %mm4 - psubsw %mm1, %mm7 - psraw $6, %mm5 - movq %mm0, %mm3 - movq %mm2, 88(%edx) - psubsw %mm4, %mm3 - psraw $6, %mm7 - paddsw %mm0, %mm4 - movq %mm5, 8(%edx) - psraw $6, %mm3 - movq %mm6, 104(%edx) - psraw $6, %mm4 - movq %mm7, 120(%edx) - movq %mm3, 72(%edx) - movq %mm4, 56(%edx) - popl %ebx - popl %esi - popl %edi - popl %ebp - addl $8,%esp - ret