more fixes

Original commit message from CVS:
more fixes
This commit is contained in:
Thomas Vander Stichele 2001-12-23 20:21:19 +00:00
parent 2c7cc0f6e8
commit 50f9df6c66
13 changed files with 3494 additions and 58 deletions

View file

@ -96,9 +96,11 @@ HAVE_BROKEN=yes,disabled,
[
AC_MSG_WARN(building broken plugins)
USE_VGA="yes"
USE_XMMS="yes"
dnl AC_MSG_NOTICE(actually there are no broken plugins at the moment)
],[
USE_VGA="no"
USE_XMMS="no"
AC_MSG_NOTICE(not building broken plugins)
])
@ -294,8 +296,6 @@ GST_PLUGINS_ALL="\
smooth smoothwave spectrum speed stereo stereomono\
synaesthesia udp videoscale volenv volume vumeter wavparse y4m"
GST_PLUGINS_ALL=""
AC_SUBST(GST_PLUGINS_ALL)
GST_PLUGINS_SELECTED=""
@ -999,63 +999,51 @@ dnl components/bonobo-gstmediaplay/Makefile
dnl someone should fix this test/misc/Makefile
dnl wtay fix this: testsuite/threads/Makefile
dnl testsuite/refcounting/Makefile
dnl libs/Makefile
dnl libs/riff/Makefile
dnl libs/getbits/Makefile
dnl libs/putbits/Makefile
dnl libs/idct/Makefile
dnl libs/audio/Makefile
dnl libs/bytestream/Makefile
dnl libs/control/Makefile
dnl libs/resample/Makefile
dnl stamp.h
dnl echo "$infomessages", infomessages="$infomessages"
PLUGIN_GST_MAKEFILES="\
gst/ac3parse/Makefile \
gst/adder/Makefile \
gst/audioscale/Makefile \
gst/auparse/Makefile \
gst/avi/Makefile \
gst/chart/Makefile \
gst/cutter/Makefile \
gst/deinterlace/Makefile \
gst/flx/Makefile \
gst/intfloat/Makefile \
gst/law/Makefile \
gst/level/Makefile \
gst/median/Makefile \
gst/mpeg1enc/Makefile \
gst/mpeg1sys/Makefile \
gst/mpeg2enc/Makefile \
gst/mpeg2sub/Makefile \
gst/mpegaudio/Makefile \
gst/mpegaudioparse/Makefile \
gst/mpegstream/Makefile \
gst/mpegtypes/Makefile \
gst/passthrough/Makefile \
gst/playondemand/Makefile \
gst/rtjpeg/Makefile \
gst/silence/Makefile \
gst/sine/Makefile \
gst/smooth/Makefile \
gst/smoothwave/Makefile \
gst/spectrum/Makefile \
gst/speed/Makefile \
gst/stereo/Makefile \
gst/stereomono/Makefile \
gst/synaesthesia/Makefile \
gst/udp/Makefile \
gst/videoscale/Makefile \
gst/volenv/Makefile \
gst/volume/Makefile \
gst/vumeter/Makefile \
gst/wavparse/Makefile \
gst/y4m/Makefile"
dnl $PLUGIN_GST_MAKEFILES
AC_OUTPUT(
Makefile
gst/Makefile
gst/ac3parse/Makefile
gst/adder/Makefile
gst/audioscale/Makefile
gst/auparse/Makefile
gst/avi/Makefile
gst/chart/Makefile
gst/cutter/Makefile
gst/deinterlace/Makefile
gst/flx/Makefile
gst/intfloat/Makefile
gst/law/Makefile
gst/level/Makefile
gst/median/Makefile
gst/mpeg1enc/Makefile
gst/mpeg1sys/Makefile
gst/mpeg2enc/Makefile
gst/mpeg2sub/Makefile
gst/mpegaudio/Makefile
gst/mpegaudioparse/Makefile
gst/mpegstream/Makefile
gst/mpegtypes/Makefile
gst/passthrough/Makefile
gst/playondemand/Makefile
gst/rtjpeg/Makefile
gst/silence/Makefile
gst/sine/Makefile
gst/smooth/Makefile
gst/smoothwave/Makefile
gst/spectrum/Makefile
gst/speed/Makefile
gst/stereo/Makefile
gst/stereomono/Makefile
gst/synaesthesia/Makefile
gst/udp/Makefile
gst/videoscale/Makefile
gst/volenv/Makefile
gst/volume/Makefile
gst/vumeter/Makefile
gst/wavparse/Makefile
gst/y4m/Makefile
sys/Makefile
sys/oss/Makefile
sys/qcam/Makefile
@ -1097,6 +1085,7 @@ ext/xmms/Makefile
gst-libs/Makefile
gst-libs/gst/Makefile
gst-libs/gst/audio/Makefile
gst-libs/gst/idct/Makefile
gst-libs/gst/resample/Makefile
gst-libs/gst/riff/Makefile
gst-plugins.spec

View file

@ -1,5 +1,3 @@
SUBDIRS = audio resample riff
# riff getbits putbits idct bytestream control resample
SUBDIRS = audio idct resample riff
DIST_SUBDIRS = audio resample riff
# riff getbits putbits idct bytestream control resample
DIST_SUBDIRS = audio idct resample riff

48
gst-libs/gst/idct/README Normal file
View file

@ -0,0 +1,48 @@
This archive contains a quick & dirty implementation of the IEEE Standard
1180-1990 accuracy test for inverse DCT. It is not guaranteed to be
correct ... but if you find any bugs, please let me know (by email to
tgl@cs.cmu.edu).
The test harness consists of the C program ieeetest.c and shell script
doieee. For comparison purposes I have also supplied a copy of jrevdct.c,
the inverse DCT routine from release 4 of the Independent JPEG Group's
free JPEG software. (jrevdct.c is slightly modified from the IJG release
so that it will compile without the IJG include files.) jrevdct.c passes
the 1180 test --- or at least, this program thinks so. jrevdct.out is
the output from a test run.
Note that numerical results may vary somewhat across machines. This appears
to be mostly due to differing results from the cosine function.
INSTALLATION:
Check the Makefile, change CC and CFLAGS if needed. Then say "make".
If your C compiler is non-ANSI, you may need to change includes and/or
function headers.
To test a different IDCT routine, link with that routine instead of
jrevdct.o. You will need to modify dct.h and/or ieeetest.c if your
routine's calling convention is not in-place modification of an array
of 64 "short"s.
USAGE:
The standard test procedure is
doieee ieeetest >outputfile
Expect it to take a while (almost 80 minutes on my old 68030 box).
Each of the six passes will emit a row of 100 dots as it runs.
You can grep the output for the word FAILS if you just want to know
yea or nay.
LEGAL MUMBO-JUMBO:
I hereby release the test harness to the public domain.
Thomas G. Lane, 22 Nov 1993
IMPORTANT: jrevdct.c is NOT public domain, but is copyrighted free software
(not the same thing at all). It is subject to IJG's distribution terms, which
primarily state that if you incorporate it into a program you must acknowledge
IJG's contribution in your program documentation. For more details and the
complete IJG software, see the IJG FTP archive at ftp.uu.net, in directory
/graphics/jpeg.

34
gst-libs/gst/idct/dct.h Normal file
View file

@ -0,0 +1,34 @@
/* define DCT types */
#include "config.h"
/*
* DCTSIZE underlying (1d) transform size
* DCTSIZE2 DCTSIZE squared
*/
#define DCTSIZE (8)
#define DCTSIZE2 (DCTSIZE*DCTSIZE)
#define EIGHT_BIT_SAMPLES /* needed in jrevdct.c */
typedef short DCTELEM; /* must be at least 16 bits */
typedef DCTELEM DCTBLOCK[DCTSIZE2];
typedef long INT32; /* must be at least 32 bits */
extern void gst_idct_int_idct();
extern void gst_idct_init_fast_int_idct (void);
extern void gst_idct_fast_int_idct (short *block);
#ifdef HAVE_LIBMMX
extern void gst_idct_mmx_idct (short *block);
extern void gst_idct_mmx32_idct (short *block);
extern void gst_idct_sse_idct (short *block);
#endif /* HAVE_LIBMMX */
extern void gst_idct_init_float_idct(void);
extern void gst_idct_float_idct (short *block);

15
gst-libs/gst/idct/doieee Executable file
View file

@ -0,0 +1,15 @@
# perform IEEE 1180 test series
# Typical usage:
# doieee >outfile
# where progname is ieeetest or a variant
for i in 1 2 3 4 5;
do
time ./ieeetest $i -256 255 1 10000
time ./ieeetest $i -5 5 1 10000
time ./ieeetest $i -300 300 1 10000
time ./ieeetest $i -256 255 -1 10000
time ./ieeetest $i -5 5 -1 10000
time ./ieeetest $i -300 300 -1 10000
done

View file

@ -0,0 +1,207 @@
/* idct.c, inverse fast discrete cosine transform */
/* Copyright (C) 1996, MPEG Software Simulation Group. All Rights Reserved. */
/*
* Disclaimer of Warranty
*
* These software programs are available to the user without any license fee or
* royalty on an "as is" basis. The MPEG Software Simulation Group disclaims
* any and all warranties, whether express, implied, or statuary, including any
* implied warranties or merchantability or of fitness for a particular
* purpose. In no event shall the copyright-holder be liable for any
* incidental, punitive, or consequential damages of any kind whatsoever
* arising from the use of these programs.
*
* This disclaimer of warranty extends to the user of these programs and user's
* customers, employees, agents, transferees, successors, and assigns.
*
* The MPEG Software Simulation Group does not represent or warrant that the
* programs furnished hereunder are free of infringement of any third-party
* patents.
*
* Commercial implementations of MPEG-1 and MPEG-2 video, including shareware,
* are subject to royalty fees to patent holders. Many of these patents are
* general enough such that they are unavoidable regardless of implementation
* design.
*
*/
/**********************************************************/
/* inverse two dimensional DCT, Chen-Wang algorithm */
/* (cf. IEEE ASSP-32, pp. 803-816, Aug. 1984) */
/* 32-bit integer arithmetic (8 bit coefficients) */
/* 11 mults, 29 adds per DCT */
/* sE, 18.8.91 */
/**********************************************************/
/* coefficients extended to 12 bit for IEEE1180-1990 */
/* compliance sE, 2.1.94 */
/**********************************************************/
/* this code assumes >> to be a two's-complement arithmetic */
/* right shift: (-2)>>1 == -1 , (-3)>>1 == -2 */
#define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */
#define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */
#define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */
#define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */
#define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */
#define W7 565 /* 2048*sqrt(2)*cos(7*pi/16) */
#include "dct.h"
/* private data */
static short iclip[1024]; /* clipping table */
static short *iclp;
/* private prototypes */
static void idctrow (short *blk);
static void idctcol (short *blk);
/* row (horizontal) IDCT
*
* 7 pi 1
* dst[k] = sum c[l] * src[l] * cos( -- * ( k + - ) * l )
* l=0 8 2
*
* where: c[0] = 128
* c[1..7] = 128*sqrt(2)
*/
static void idctrow(blk)
short *blk;
{
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
/* shortcut */
if (!((x1 = blk[4]<<11) | (x2 = blk[6]) | (x3 = blk[2]) |
(x4 = blk[1]) | (x5 = blk[7]) | (x6 = blk[5]) | (x7 = blk[3])))
{
blk[0]=blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=blk[0]<<3;
return;
}
x0 = (blk[0]<<11) + 128; /* for proper rounding in the fourth stage */
/* first stage */
x8 = W7*(x4+x5);
x4 = x8 + (W1-W7)*x4;
x5 = x8 - (W1+W7)*x5;
x8 = W3*(x6+x7);
x6 = x8 - (W3-W5)*x6;
x7 = x8 - (W3+W5)*x7;
/* second stage */
x8 = x0 + x1;
x0 -= x1;
x1 = W6*(x3+x2);
x2 = x1 - (W2+W6)*x2;
x3 = x1 + (W2-W6)*x3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
/* third stage */
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181*(x4+x5)+128)>>8;
x4 = (181*(x4-x5)+128)>>8;
/* fourth stage */
blk[0] = (x7+x1)>>8;
blk[1] = (x3+x2)>>8;
blk[2] = (x0+x4)>>8;
blk[3] = (x8+x6)>>8;
blk[4] = (x8-x6)>>8;
blk[5] = (x0-x4)>>8;
blk[6] = (x3-x2)>>8;
blk[7] = (x7-x1)>>8;
}
/* column (vertical) IDCT
*
* 7 pi 1
* dst[8*k] = sum c[l] * src[8*l] * cos( -- * ( k + - ) * l )
* l=0 8 2
*
* where: c[0] = 1/1024
* c[1..7] = (1/1024)*sqrt(2)
*/
static void idctcol(blk)
short *blk;
{
int x0, x1, x2, x3, x4, x5, x6, x7, x8;
/* shortcut */
if (!((x1 = (blk[8*4]<<8)) | (x2 = blk[8*6]) | (x3 = blk[8*2]) |
(x4 = blk[8*1]) | (x5 = blk[8*7]) | (x6 = blk[8*5]) | (x7 = blk[8*3])))
{
blk[8*0]=blk[8*1]=blk[8*2]=blk[8*3]=blk[8*4]=blk[8*5]=blk[8*6]=blk[8*7]=
iclp[(blk[8*0]+32)>>6];
return;
}
x0 = (blk[8*0]<<8) + 8192;
/* first stage */
x8 = W7*(x4+x5) + 4;
x4 = (x8+(W1-W7)*x4)>>3;
x5 = (x8-(W1+W7)*x5)>>3;
x8 = W3*(x6+x7) + 4;
x6 = (x8-(W3-W5)*x6)>>3;
x7 = (x8-(W3+W5)*x7)>>3;
/* second stage */
x8 = x0 + x1;
x0 -= x1;
x1 = W6*(x3+x2) + 4;
x2 = (x1-(W2+W6)*x2)>>3;
x3 = (x1+(W2-W6)*x3)>>3;
x1 = x4 + x6;
x4 -= x6;
x6 = x5 + x7;
x5 -= x7;
/* third stage */
x7 = x8 + x3;
x8 -= x3;
x3 = x0 + x2;
x0 -= x2;
x2 = (181*(x4+x5)+128)>>8;
x4 = (181*(x4-x5)+128)>>8;
/* fourth stage */
blk[8*0] = iclp[(x7+x1)>>14];
blk[8*1] = iclp[(x3+x2)>>14];
blk[8*2] = iclp[(x0+x4)>>14];
blk[8*3] = iclp[(x8+x6)>>14];
blk[8*4] = iclp[(x8-x6)>>14];
blk[8*5] = iclp[(x0-x4)>>14];
blk[8*6] = iclp[(x3-x2)>>14];
blk[8*7] = iclp[(x7-x1)>>14];
}
/* two dimensional inverse discrete cosine transform */
void gst_idct_fast_int_idct(block)
short *block;
{
int i;
for (i=0; i<8; i++)
idctrow(block+8*i);
for (i=0; i<8; i++)
idctcol(block+i);
}
void gst_idct_init_fast_int_idct()
{
int i;
iclp = iclip+512;
for (i= -512; i<512; i++)
iclp[i] = (i<-256) ? -256 : ((i>255) ? 255 : i);
}

View file

@ -0,0 +1,102 @@
/* Reference_IDCT.c, Inverse Discrete Fourier Transform, double precision */
/* Copyright (C) 1996, MPEG Software Simulation Group. All Rights Reserved. */
/*
* Disclaimer of Warranty
*
* These software programs are available to the user without any license fee or
* royalty on an "as is" basis. The MPEG Software Simulation Group disclaims
* any and all warranties, whether express, implied, or statuary, including any
* implied warranties or merchantability or of fitness for a particular
* purpose. In no event shall the copyright-holder be liable for any
* incidental, punitive, or consequential damages of any kind whatsoever
* arising from the use of these programs.
*
* This disclaimer of warranty extends to the user of these programs and user's
* customers, employees, agents, transferees, successors, and assigns.
*
* The MPEG Software Simulation Group does not represent or warrant that the
* programs furnished hereunder are free of infringement of any third-party
* patents.
*
* Commercial implementations of MPEG-1 and MPEG-2 video, including shareware,
* are subject to royalty fees to patent holders. Many of these patents are
* general enough such that they are unavoidable regardless of implementation
* design.
*
*/
/* Perform IEEE 1180 reference (64-bit floating point, separable 8x1
* direct matrix multiply) Inverse Discrete Cosine Transform
*/
/* Here we use math.h to generate constants. Compiler results may
vary a little */
#include <math.h>
#ifndef PI
# ifdef M_PI
# define PI M_PI
# else
# define PI 3.14159265358979323846
# endif
#endif
/* private data */
/* cosine transform matrix for 8x1 IDCT */
static double gst_idct_float_c[8][8];
/* initialize DCT coefficient matrix */
void gst_idct_init_float_idct()
{
int freq, time;
double scale;
for (freq=0; freq < 8; freq++)
{
scale = (freq == 0) ? sqrt(0.125) : 0.5;
for (time=0; time<8; time++)
gst_idct_float_c[freq][time] = scale*cos((PI/8.0)*freq*(time + 0.5));
}
}
/* perform IDCT matrix multiply for 8x8 coefficient block */
void gst_idct_float_idct(block)
short *block;
{
int i, j, k, v;
double partial_product;
double tmp[64];
for (i=0; i<8; i++)
for (j=0; j<8; j++)
{
partial_product = 0.0;
for (k=0; k<8; k++)
partial_product+= gst_idct_float_c[k][j]*block[8*i+k];
tmp[8*i+j] = partial_product;
}
/* Transpose operation is integrated into address mapping by switching
loop order of i and j */
for (j=0; j<8; j++)
for (i=0; i<8; i++)
{
partial_product = 0.0;
for (k=0; k<8; k++)
partial_product+= gst_idct_float_c[k][i]*tmp[8*k+j];
v = (int) floor(partial_product+0.5);
block[8*i+j] = (v<-256) ? -256 : ((v>255) ? 255 : v);
}
}

54
gst-libs/gst/idct/idct.h Normal file
View file

@ -0,0 +1,54 @@
/* Gnome-Streamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef __GST_IDCT_H__
#define __GST_IDCT_H__
#include <glib.h>
typedef enum {
GST_IDCT_DEFAULT,
GST_IDCT_INT,
GST_IDCT_FAST_INT,
GST_IDCT_FLOAT,
GST_IDCT_MMX,
GST_IDCT_MMX32,
GST_IDCT_SSE,
} GstIDCTMethod;
typedef struct _GstIDCT GstIDCT;
typedef void (*GstIDCTFunction) (gshort *block);
#define GST_IDCT_TRANSPOSE(idct) ((idct)->need_transpose)
struct _GstIDCT {
/* private */
GstIDCTFunction convert;
GstIDCTFunction convert_sparse;
gboolean need_transpose;
};
GstIDCT *gst_idct_new(GstIDCTMethod method);
#define gst_idct_convert(idct, blocks) (idct)->convert((blocks))
#define gst_idct_convert_sparse(idct, blocks) (idct)->convert_sparse((blocks))
void gst_idct_destroy(GstIDCT *idct);
#endif /* __GST_IDCT_H__ */

View file

@ -0,0 +1,335 @@
/*
* ieeetest.c --- test IDCT code against the IEEE Std 1180-1990 spec
*
* Note that this does only one pass of the test.
* Six invocations of ieeetest are needed to complete the entire spec.
* The shell script "doieee" performs the complete test.
*
* Written by Tom Lane (tgl@cs.cmu.edu).
* Released to public domain 11/22/93.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <gst/gst.h>
#include "gstidct.h"
#include "dct.h"
/* prototypes */
void usage (char *msg);
long ieeerand (long L, long H);
void dct_init(void);
void ref_fdct(DCTELEM block[8][8]);
void ref_idct(DCTELEM block[8][8]);
/* error stat accumulators -- assume initialized to 0 */
long sumerrs[DCTSIZE2];
long sumsqerrs[DCTSIZE2];
int maxerr[DCTSIZE2];
char * meets (double val, double limit)
{
return ((fabs(val) <= limit) ? "meets" : "FAILS");
}
int
main(int argc, char **argv)
{
long minpix, maxpix, sign;
long curiter, niters;
int i, j;
double max, total;
int method;
DCTELEM block[DCTSIZE2]; /* random source data */
DCTELEM refcoefs[DCTSIZE2]; /* coefs from reference FDCT */
DCTELEM refout[DCTSIZE2]; /* output from reference IDCT */
DCTELEM testout[DCTSIZE2]; /* output from test IDCT */
GstIDCT *idct;
guint64 tscstart, tscmin = ~0, tscmax = 0;
guint64 tscstop;
/* Argument parsing --- not very bulletproof at all */
if (argc != 6) usage(NULL);
method = atoi(argv[1]);
minpix = atoi(argv[2]);
maxpix = atoi(argv[3]);
sign = atoi(argv[4]);
niters = atol(argv[5]);
gst_library_load("gstidct");
idct = gst_idct_new(method);
if (idct == 0) {
printf("method not available\n\n\n");
return 0;
}
dct_init();
/* Loop once per generated random-data block */
for (curiter = 0; curiter < niters; curiter++) {
/* generate a pseudo-random block of data */
for (i = 0; i < DCTSIZE2; i++)
block[i] = (DCTELEM) (ieeerand(-minpix,maxpix) * sign);
/* perform reference FDCT */
memcpy(refcoefs, block, sizeof(DCTELEM)*DCTSIZE2);
ref_fdct(refcoefs);
/* clip */
for (i = 0; i < DCTSIZE2; i++) {
if (refcoefs[i] < -2048) refcoefs[i] = -2048;
else if (refcoefs[i] > 2047) refcoefs[i] = 2047;
}
/* perform reference IDCT */
memcpy(refout, refcoefs, sizeof(DCTELEM)*DCTSIZE2);
ref_idct(refout);
/* clip */
for (i = 0; i < DCTSIZE2; i++) {
if (refout[i] < -256) refout[i] = -256;
else if (refout[i] > 255) refout[i] = 255;
}
/* perform test IDCT */
if (GST_IDCT_TRANSPOSE(idct)) {
for (j = 0; j < DCTSIZE; j++) {
for (i = 0; i < DCTSIZE; i++) {
testout[i*DCTSIZE+j] = refcoefs[j*DCTSIZE+i];
}
}
}
else {
memcpy(testout, refcoefs, sizeof(DCTELEM)*DCTSIZE2);
}
gst_trace_read_tsc(&tscstart);
gst_idct_convert(idct, testout);
gst_trace_read_tsc(&tscstop);
//printf("time %llu, %llu %lld\n", tscstart, tscstop, tscstop-tscstart);
if (tscstop - tscstart < tscmin) tscmin = tscstop-tscstart;
if (tscstop - tscstart > tscmax) tscmax = tscstop-tscstart;
/* clip */
for (i = 0; i < DCTSIZE2; i++) {
if (testout[i] < -256) testout[i] = -256;
else if (testout[i] > 255) testout[i] = 255;
}
/* accumulate error stats */
for (i = 0; i < DCTSIZE2; i++) {
register int err = testout[i] - refout[i];
sumerrs[i] += err;
sumsqerrs[i] += err * err;
if (err < 0) err = -err;
if (maxerr[i] < err) maxerr[i] = err;
}
if (curiter % 100 == 99) {
fprintf(stderr, ".");
fflush(stderr);
}
}
fprintf(stderr, "\n");
/* print results */
printf("IEEE test conditions: -L = %ld, +H = %ld, sign = %ld, #iters = %ld\n",
minpix, maxpix, sign, niters);
printf("Speed, min time %lld, max %lld\n", tscmin, tscmax);
printf("Peak absolute values of errors:\n");
for (i = 0, j = 0; i < DCTSIZE2; i++) {
if (j < maxerr[i]) j = maxerr[i];
printf("%4d", maxerr[i]);
if ((i%DCTSIZE) == DCTSIZE-1) printf("\n");
}
printf("Worst peak error = %d (%s spec limit 1)\n\n", j,
meets((double) j, 1.0));
printf("Mean square errors:\n");
max = total = 0.0;
for (i = 0; i < DCTSIZE2; i++) {
double err = (double) sumsqerrs[i] / ((double) niters);
total += (double) sumsqerrs[i];
if (max < err) max = err;
printf(" %8.4f", err);
if ((i%DCTSIZE) == DCTSIZE-1) printf("\n");
}
printf("Worst pmse = %.6f (%s spec limit 0.06)\n", max, meets(max, 0.06));
total /= (double) (64*niters);
printf("Overall mse = %.6f (%s spec limit 0.02)\n\n", total,
meets(total, 0.02));
printf("Mean errors:\n");
max = total = 0.0;
for (i = 0; i < DCTSIZE2; i++) {
double err = (double) sumerrs[i] / ((double) niters);
total += (double) sumerrs[i];
printf(" %8.4f", err);
if (err < 0.0) err = -err;
if (max < err) max = err;
if ((i%DCTSIZE) == DCTSIZE-1) printf("\n");
}
printf("Worst mean error = %.6f (%s spec limit 0.015)\n", max,
meets(max, 0.015));
total /= (double) (64*niters);
printf("Overall mean error = %.6f (%s spec limit 0.0015)\n\n", total,
meets(total, 0.0015));
/* test for 0 input giving 0 output */
memset(testout, 0, sizeof(DCTELEM)*DCTSIZE2);
gst_idct_convert(idct, testout);
for (i = 0, j=0; i < DCTSIZE2; i++) {
if (testout[i]) {
printf("Position %d of IDCT(0) = %d (FAILS)\n", i, testout[i]);
j++;
}
}
printf("%d elements of IDCT(0) were not zero\n\n\n", j);
exit(0);
return 0;
}
void usage (char *msg)
{
if (msg != NULL)
fprintf(stderr, "\nerror: %s\n", msg);
fprintf(stderr, "\n");
fprintf(stderr, "usage: ieeetest minpix maxpix sign niters\n");
fprintf(stderr, "\n");
fprintf(stderr, " test = 1 - 5\n");
fprintf(stderr, " minpix = -L value per IEEE spec\n");
fprintf(stderr, " maxpix = H value per IEEE spec\n");
fprintf(stderr, " sign = +1 for normal, -1 to run negated test\n");
fprintf(stderr, " niters = # iterations (10000 for full test)\n");
fprintf(stderr, "\n");
exit(1);
}
/* Pseudo-random generator specified by IEEE 1180 */
long ieeerand (long L, long H)
{
static long randx = 1;
static double z = (double) 0x7fffffff;
long i,j;
double x;
randx = (randx * 1103515245) + 12345;
i = randx & 0x7ffffffe;
x = ((double) i) / z;
x *= (L+H+1);
j = x;
return j-L;
}
/* Reference double-precision FDCT and IDCT */
/* The cosine lookup table */
/* coslu[a][b] = C(b)/2 * cos[(2a+1)b*pi/16] */
double coslu[8][8];
/* Routine to initialise the cosine lookup table */
void dct_init(void)
{
int a,b;
double tmp;
for(a=0;a<8;a++)
for(b=0;b<8;b++) {
tmp = cos((double)((a+a+1)*b) * (3.14159265358979323846 / 16.0));
if(b==0)
tmp /= sqrt(2.0);
coslu[a][b] = tmp * 0.5;
}
}
void ref_fdct (DCTELEM block[8][8])
{
int x,y,u,v;
double tmp, tmp2;
double res[8][8];
for (v=0; v<8; v++) {
for (u=0; u<8; u++) {
tmp = 0.0;
for (y=0; y<8; y++) {
tmp2 = 0.0;
for (x=0; x<8; x++) {
tmp2 += (double) block[y][x] * coslu[x][u];
}
tmp += coslu[y][v] * tmp2;
}
res[v][u] = tmp;
}
}
for (v=0; v<8; v++) {
for (u=0; u<8; u++) {
tmp = res[v][u];
if (tmp < 0.0) {
x = - ((int) (0.5 - tmp));
} else {
x = (int) (tmp + 0.5);
}
block[v][u] = (DCTELEM) x;
}
}
}
void ref_idct (DCTELEM block[8][8])
{
int x,y,u,v;
double tmp, tmp2;
double res[8][8];
for (y=0; y<8; y++) {
for (x=0; x<8; x++) {
tmp = 0.0;
for (v=0; v<8; v++) {
tmp2 = 0.0;
for (u=0; u<8; u++) {
tmp2 += (double) block[v][u] * coslu[x][u];
}
tmp += coslu[y][v] * tmp2;
}
res[y][x] = tmp;
}
}
for (v=0; v<8; v++) {
for (u=0; u<8; u++) {
tmp = res[v][u];
if (tmp < 0.0) {
x = - ((int) (0.5 - tmp));
} else {
x = (int) (tmp + 0.5);
}
block[v][u] = (DCTELEM) x;
}
}
}

391
gst-libs/gst/idct/intidct.c Normal file
View file

@ -0,0 +1,391 @@
/*
* jrevdct.c
*
* Copyright (C) 1991, 1992, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file contains the basic inverse-DCT transformation subroutine.
*
* This implementation is based on an algorithm described in
* C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
* Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
* Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
* The primary algorithm described there uses 11 multiplies and 29 adds.
* We use their alternate method with 12 multiplies and 32 adds.
* The advantage of this method is that no data path contains more than one
* multiplication; this allows a very simple and accurate implementation in
* scaled fixed-point arithmetic, with a minimal number of shifts.
*/
#include "dct.h"
/* We assume that right shift corresponds to signed division by 2 with
* rounding towards minus infinity. This is correct for typical "arithmetic
* shift" instructions that shift in copies of the sign bit. But some
* C compilers implement >> with an unsigned shift. For these machines you
* must define RIGHT_SHIFT_IS_UNSIGNED.
* RIGHT_SHIFT provides a proper signed right shift of an INT32 quantity.
* It is only applied with constant shift counts. SHIFT_TEMPS must be
* included in the variables of any routine using RIGHT_SHIFT.
*/
#ifdef RIGHT_SHIFT_IS_UNSIGNED
#define SHIFT_TEMPS INT32 shift_temp;
#define RIGHT_SHIFT(x,shft) \
((shift_temp = (x)) < 0 ? \
(shift_temp >> (shft)) | ((~((INT32) 0)) << (32-(shft))) : \
(shift_temp >> (shft)))
#else
#define SHIFT_TEMPS
#define RIGHT_SHIFT(x,shft) ((x) >> (shft))
#endif
/*
* This routine is specialized to the case DCTSIZE = 8.
*/
#if DCTSIZE != 8
Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */
#endif
/*
* A 2-D IDCT can be done by 1-D IDCT on each row followed by 1-D IDCT
* on each column. Direct algorithms are also available, but they are
* much more complex and seem not to be any faster when reduced to code.
*
* The poop on this scaling stuff is as follows:
*
* Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
* larger than the true IDCT outputs. The final outputs are therefore
* a factor of N larger than desired; since N=8 this can be cured by
* a simple right shift at the end of the algorithm. The advantage of
* this arrangement is that we save two multiplications per 1-D IDCT,
* because the y0 and y4 inputs need not be divided by sqrt(N).
*
* We have to do addition and subtraction of the integer inputs, which
* is no problem, and multiplication by fractional constants, which is
* a problem to do in integer arithmetic. We multiply all the constants
* by CONST_SCALE and convert them to integer constants (thus retaining
* CONST_BITS bits of precision in the constants). After doing a
* multiplication we have to divide the product by CONST_SCALE, with proper
* rounding, to produce the correct output. This division can be done
* cheaply as a right shift of CONST_BITS bits. We postpone shifting
* as long as possible so that partial sums can be added together with
* full fractional precision.
*
* The outputs of the first pass are scaled up by PASS1_BITS bits so that
* they are represented to better-than-integral precision. These outputs
* require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
* with the recommended scaling. (To scale up 12-bit sample data further, an
* intermediate INT32 array would be needed.)
*
* To avoid overflow of the 32-bit intermediate results in pass 2, we must
* have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
* shows that the values given below are the most effective.
*/
#ifdef EIGHT_BIT_SAMPLES
#define CONST_BITS 13
#define PASS1_BITS 2
#else
#define CONST_BITS 13
#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
#endif
#define ONE ((INT32) 1)
#define CONST_SCALE (ONE << CONST_BITS)
/* Convert a positive real constant to an integer scaled by CONST_SCALE. */
#define FIX(x) ((INT32) ((x) * CONST_SCALE + 0.5))
/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
* causing a lot of useless floating-point operations at run time.
* To get around this we use the following pre-calculated constants.
* If you change CONST_BITS you may want to add appropriate values.
* (With a reasonable C compiler, you can just rely on the FIX() macro...)
*/
#if CONST_BITS == 13
#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
#else
#define FIX_0_298631336 FIX(0.298631336)
#define FIX_0_390180644 FIX(0.390180644)
#define FIX_0_541196100 FIX(0.541196100)
#define FIX_0_765366865 FIX(0.765366865)
#define FIX_0_899976223 FIX(0.899976223)
#define FIX_1_175875602 FIX(1.175875602)
#define FIX_1_501321110 FIX(1.501321110)
#define FIX_1_847759065 FIX(1.847759065)
#define FIX_1_961570560 FIX(1.961570560)
#define FIX_2_053119869 FIX(2.053119869)
#define FIX_2_562915447 FIX(2.562915447)
#define FIX_3_072711026 FIX(3.072711026)
#endif
/* Descale and correctly round an INT32 value that's scaled by N bits.
* We assume RIGHT_SHIFT rounds towards minus infinity, so adding
* the fudge factor is correct for either sign of X.
*/
#define DESCALE(x,n) RIGHT_SHIFT((x) + (ONE << ((n)-1)), n)
/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
* For 8-bit samples with the recommended scaling, all the variable
* and constant values involved are no more than 16 bits wide, so a
* 16x16->32 bit multiply can be used instead of a full 32x32 multiply;
* this provides a useful speedup on many machines.
* There is no way to specify a 16x16->32 multiply in portable C, but
* some C compilers will do the right thing if you provide the correct
* combination of casts.
* NB: for 12-bit samples, a full 32-bit multiplication will be needed.
*/
#ifdef EIGHT_BIT_SAMPLES
#ifdef SHORTxSHORT_32 /* may work if 'int' is 32 bits */
#define MULTIPLY(var,const) (((INT16) (var)) * ((INT16) (const)))
#endif
#ifdef SHORTxLCONST_32 /* known to work with Microsoft C 6.0 */
#define MULTIPLY(var,const) (((INT16) (var)) * ((INT32) (const)))
#endif
#endif
#ifndef MULTIPLY /* default definition */
#define MULTIPLY(var,const) ((var) * (const))
#endif
/*
* Perform the inverse DCT on one block of coefficients.
*/
void
gst_idct_int_idct (DCTBLOCK data)
{
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1, z2, z3, z4, z5;
register DCTELEM *dataptr;
int rowctr;
SHIFT_TEMPS
/* Pass 1: process rows. */
/* Note results are scaled up by sqrt(8) compared to a true IDCT; */
/* furthermore, we scale the results by 2**PASS1_BITS. */
dataptr = data;
for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
/* Due to quantization, we will usually find that many of the input
* coefficients are zero, especially the AC terms. We can exploit this
* by short-circuiting the IDCT calculation for any row in which all
* the AC terms are zero. In that case each output is equal to the
* DC coefficient (with scale factor as needed).
* With typical images and quantization tables, half or more of the
* row DCT calculations can be simplified this way.
*/
if ((dataptr[1] | dataptr[2] | dataptr[3] | dataptr[4] |
dataptr[5] | dataptr[6] | dataptr[7]) == 0) {
/* AC terms all zero */
DCTELEM dcval = (DCTELEM) (dataptr[0] << PASS1_BITS);
dataptr[0] = dcval;
dataptr[1] = dcval;
dataptr[2] = dcval;
dataptr[3] = dcval;
dataptr[4] = dcval;
dataptr[5] = dcval;
dataptr[6] = dcval;
dataptr[7] = dcval;
dataptr += DCTSIZE; /* advance pointer to next row */
continue;
}
/* Even part: reverse the even part of the forward DCT. */
/* The rotator is sqrt(2)*c(-6). */
z2 = (INT32) dataptr[2];
z3 = (INT32) dataptr[6];
z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
tmp0 = ((INT32) dataptr[0] + (INT32) dataptr[4]) << CONST_BITS;
tmp1 = ((INT32) dataptr[0] - (INT32) dataptr[4]) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
tmp0 = (INT32) dataptr[7];
tmp1 = (INT32) dataptr[5];
tmp2 = (INT32) dataptr[3];
tmp3 = (INT32) dataptr[1];
z1 = tmp0 + tmp3;
z2 = tmp1 + tmp2;
z3 = tmp0 + tmp2;
z4 = tmp1 + tmp3;
z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
z3 += z5;
z4 += z5;
tmp0 += z1 + z3;
tmp1 += z2 + z4;
tmp2 += z2 + z3;
tmp3 += z1 + z4;
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
dataptr[0] = (DCTELEM) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
dataptr[1] = (DCTELEM) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
dataptr[2] = (DCTELEM) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
dataptr[4] = (DCTELEM) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns. */
/* Note that we must descale the results by a factor of 8 == 2**3, */
/* and also undo the PASS1_BITS scaling. */
dataptr = data;
for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
/* Columns of zeroes can be exploited in the same way as we did with rows.
* However, the row calculation has created many nonzero AC terms, so the
* simplification applies less often (typically 5% to 10% of the time).
* On machines with very fast multiplication, it's possible that the
* test takes more time than it's worth. In that case this section
* may be commented out.
*/
#ifndef NO_ZERO_COLUMN_TEST
if ((dataptr[DCTSIZE*1] | dataptr[DCTSIZE*2] | dataptr[DCTSIZE*3] |
dataptr[DCTSIZE*4] | dataptr[DCTSIZE*5] | dataptr[DCTSIZE*6] |
dataptr[DCTSIZE*7]) == 0) {
/* AC terms all zero */
DCTELEM dcval = (DCTELEM) DESCALE((INT32) dataptr[0], PASS1_BITS+3);
dataptr[DCTSIZE*0] = dcval;
dataptr[DCTSIZE*1] = dcval;
dataptr[DCTSIZE*2] = dcval;
dataptr[DCTSIZE*3] = dcval;
dataptr[DCTSIZE*4] = dcval;
dataptr[DCTSIZE*5] = dcval;
dataptr[DCTSIZE*6] = dcval;
dataptr[DCTSIZE*7] = dcval;
dataptr++; /* advance pointer to next column */
continue;
}
#endif
/* Even part: reverse the even part of the forward DCT. */
/* The rotator is sqrt(2)*c(-6). */
z2 = (INT32) dataptr[DCTSIZE*2];
z3 = (INT32) dataptr[DCTSIZE*6];
z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
tmp0 = ((INT32) dataptr[DCTSIZE*0] + (INT32) dataptr[DCTSIZE*4]) << CONST_BITS;
tmp1 = ((INT32) dataptr[DCTSIZE*0] - (INT32) dataptr[DCTSIZE*4]) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
/* Odd part per figure 8; the matrix is unitary and hence its
* transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
*/
tmp0 = (INT32) dataptr[DCTSIZE*7];
tmp1 = (INT32) dataptr[DCTSIZE*5];
tmp2 = (INT32) dataptr[DCTSIZE*3];
tmp3 = (INT32) dataptr[DCTSIZE*1];
z1 = tmp0 + tmp3;
z2 = tmp1 + tmp2;
z3 = tmp0 + tmp2;
z4 = tmp1 + tmp3;
z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
z3 += z5;
z4 += z5;
tmp0 += z1 + z3;
tmp1 += z2 + z4;
tmp2 += z2 + z3;
tmp3 += z1 + z4;
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp3,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp10 - tmp3,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp11 + tmp2,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(tmp11 - tmp2,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp12 + tmp1,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12 - tmp1,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp13 + tmp0,
CONST_BITS+PASS1_BITS+3);
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp13 - tmp0,
CONST_BITS+PASS1_BITS+3);
dataptr++; /* advance pointer to next column */
}
}

View file

@ -0,0 +1,783 @@
/*
* idctmmx32.cpp
*
* Copyright (C) Alberto Vigata - January 2000 - ultraflask@yahoo.com
*
* This file is part of FlasKMPEG, a free MPEG to MPEG/AVI converter
*
* FlasKMPEG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* FlasKMPEG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU Make; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
// MMX32 iDCT algorithm (IEEE-1180 compliant) :: idct_mmx32()
//
// MPEG2AVI
// --------
// v0.16B33 initial release
//
// This was one of the harder pieces of work to code.
// Intel's app-note focuses on the numerical issues of the algorithm, but
// assumes the programmer is familiar with IDCT mathematics, leaving the
// form of the complete function up to the programmer's imagination.
//
// ALGORITHM OVERVIEW
// ------------------
// I played around with the code for quite a few hours. I came up
// with *A* working IDCT algorithm, however I'm not sure whether my routine
// is "the correct one." But rest assured, my code passes all six IEEE
// accuracy tests with plenty of margin.
//
// My IDCT algorithm consists of 4 steps:
//
// 1) IDCT-row transformation (using the IDCT-row function) on all 8 rows
// This yields an intermediate 8x8 matrix.
//
// 2) intermediate matrix transpose (mandatory)
//
// 3) IDCT-row transformation (2nd time) on all 8 rows of the intermediate
// matrix. The output is the final-result, in transposed form.
//
// 4) post-transformation matrix transpose
// (not necessary if the input-data is already transposed, this could
// be done during the MPEG "zig-zag" scan, but since my algorithm
// requires at least one transpose operation, why not re-use the
// transpose-code.)
//
// Although the (1st) and (3rd) steps use the SAME row-transform operation,
// the (3rd) step uses different shift&round constants (explained later.)
//
// Also note that the intermediate transpose (2) would not be neccessary,
// if the subsequent operation were a iDCT-column transformation. Since
// we only have the iDCT-row transform, we transpose the intermediate
// matrix and use the iDCT-row transform a 2nd time.
//
// I had to change some constants/variables for my method to work :
//
// As given by Intel, the #defines for SHIFT_INV_COL and RND_INV_COL are
// wrong. Not surprising since I'm not using a true column-transform
// operation, but the row-transform operation (as mentioned earlier.)
// round_inv_col[], which is given as "4 short" values, should have the
// same dimensions as round_inv_row[]. The corrected variables are
// shown.
//
// Intel's code defines a different table for each each row operation.
// The tables given are 0/4, 1/7, 2/6, and 5/3. My code only uses row#0.
// Using the other rows messes up the overall transform.
//
// IMPLEMENTATION DETAILs
// ----------------------
//
// I divided the algorithm's work into two subroutines,
// 1) idct_mmx32_rows() - transforms 8 rows, then transpose
// 2) idct_mmx32_cols() - transforms 8 rows, then transpose
// yields final result ("drop-in" direct replacement for INT32 IDCT)
//
// The 2nd function is a clone of the 1st, with changes made only to the
// shift&rounding instructions.
//
// In the 1st function (rows), the shift & round instructions use
// SHIFT_INV_ROW & round_inv_row[] (renamed to r_inv_row[])
//
// In the 2nd function (cols)-> r_inv_col[], and
// SHIFT_INV_COL & round_inv_col[] (renamed to r_inv_col[])
//
// Each function contains an integrated transpose-operator, which comes
// AFTER the primary transformation operation. In the future, I'll optimize
// the code to do more of the transpose-work "in-place". Right now, I've
// left the code as two subroutines and a main calling function, so other
// people can read the code more easily.
//
// liaor@umcc.ais.org http://members.tripod.com/~liaor
//
//;=============================================================================
//;
//; AP-922 http://developer.intel.com/vtune/cbts/strmsimd
//; These examples contain code fragments for first stage iDCT 8x8
//; (for rows) and first stage DCT 8x8 (for columns)
//;
//;=============================================================================
/*
mword typedef qword
qword ptr equ mword ptr */
#include <mmx.h>
#define BITS_INV_ACC 4 //; 4 or 5 for IEEE
// 5 yields higher accuracy, but lessens dynamic range on the input matrix
#define SHIFT_INV_ROW (16 - BITS_INV_ACC)
#define SHIFT_INV_COL (1 + BITS_INV_ACC +14 ) // changed from Intel's val)
//#define SHIFT_INV_COL (1 + BITS_INV_ACC )
#define RND_INV_ROW (1 << (SHIFT_INV_ROW-1))
#define RND_INV_COL (1 << (SHIFT_INV_COL-1))
#define RND_INV_CORR (RND_INV_COL - 1) //; correction -1.0 and round
//#define RND_INV_ROW (1024 * (6 - BITS_INV_ACC)) //; 1 << (SHIFT_INV_ROW-1)
//#define RND_INV_COL (16 * (BITS_INV_ACC - 3)) //; 1 << (SHIFT_INV_COL-1)
//.data
//Align 16
const static long r_inv_row[2] = { RND_INV_ROW, RND_INV_ROW};
const static long r_inv_col[2] = {RND_INV_COL, RND_INV_COL};
const static long r_inv_corr[2] = {RND_INV_CORR, RND_INV_CORR };
//const static short r_inv_col[4] =
// {RND_INV_COL, RND_INV_COL, RND_INV_COL, RND_INV_COL};
//const static short r_inv_corr[4] =
// {RND_INV_CORR, RND_INV_CORR, RND_INV_CORR, RND_INV_CORR};
/* constants for the forward DCT
//#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
//#define SHIFT_FRW_COL BITS_FRW_ACC
//#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
//#define RND_FRW_ROW (262144 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_ROW-1)
const static __int64 one_corr = 0x0001000100010001;
const static long r_frw_row[2] = {RND_FRW_ROW, RND_FRW_ROW };
//const static short tg_1_16[4] = {13036, 13036, 13036, 13036 }; //tg * (2<<16) + 0.5
//const static short tg_2_16[4] = {27146, 27146, 27146, 27146 }; //tg * (2<<16) + 0.5
//const static short tg_3_16[4] = {-21746, -21746, -21746, -21746 }; //tg * (2<<16) + 0.5
//const static short cos_4_16[4] = {-19195, -19195, -19195, -19195 }; //cos * (2<<16) + 0.5
//const static short ocos_4_16[4] = {23170, 23170, 23170, 23170 }; //cos * (2<<15) + 0.5
//concatenated table, for forward DCT transformation
const static short tg_all_16[] = {
13036, 13036, 13036, 13036, // tg * (2<<16) + 0.5
27146, 27146, 27146, 27146, //tg * (2<<16) + 0.5
-21746, -21746, -21746, -21746, // tg * (2<<16) + 0.5
-19195, -19195, -19195, -19195, //cos * (2<<16) + 0.5
23170, 23170, 23170, 23170 }; //cos * (2<<15) + 0.5
#define tg_1_16 (tg_all_16 + 0)
#define tg_2_16 (tg_all_16 + 8)
#define tg_3_16 (tg_all_16 + 16)
#define cos_4_16 (tg_all_16 + 24)
#define ocos_4_16 (tg_all_16 + 32)
*/
/*
;=============================================================================
;
; The first stage iDCT 8x8 - inverse DCTs of rows
;
;-----------------------------------------------------------------------------
; The 8-point inverse DCT direct algorithm
;-----------------------------------------------------------------------------
;
; static const short w[32] = {
; FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
; FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
; FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
; FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
; FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
; FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
;
; #define DCT_8_INV_ROW(x, y)
;{
; int a0, a1, a2, a3, b0, b1, b2, b3;
;
; a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3];
; a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7];
; a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11];
; a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
; b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
; b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
; b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
; b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
;
; y[0] = SHIFT_ROUND ( a0 + b0 );
; y[1] = SHIFT_ROUND ( a1 + b1 );
; y[2] = SHIFT_ROUND ( a2 + b2 );
; y[3] = SHIFT_ROUND ( a3 + b3 );
; y[4] = SHIFT_ROUND ( a3 - b3 );
; y[5] = SHIFT_ROUND ( a2 - b2 );
; y[6] = SHIFT_ROUND ( a1 - b1 );
; y[7] = SHIFT_ROUND ( a0 - b0 );
;}
;
;-----------------------------------------------------------------------------
;
; In this implementation the outputs of the iDCT-1D are multiplied
; for rows 0,4 - by cos_4_16,
; for rows 1,7 - by cos_1_16,
; for rows 2,6 - by cos_2_16,
; for rows 3,5 - by cos_3_16
; and are shifted to the left for better accuracy
;
; For the constants used,
; FIX(float_const) = (short) (float_const * (1<<15) + 0.5)
;
;=============================================================================
;=============================================================================
IF _MMX ; MMX code
;=============================================================================
//; Table for rows 0,4 - constants are multiplied by cos_4_16
const short tab_i_04[] = {
16384, 16384, 16384, -16384, // ; movq-> w06 w04 w02 w00
21407, 8867, 8867, -21407, // w07 w05 w03 w01
16384, -16384, 16384, 16384, //; w14 w12 w10 w08
-8867, 21407, -21407, -8867, //; w15 w13 w11 w09
22725, 12873, 19266, -22725, //; w22 w20 w18 w16
19266, 4520, -4520, -12873, //; w23 w21 w19 w17
12873, 4520, 4520, 19266, //; w30 w28 w26 w24
-22725, 19266, -12873, -22725 };//w31 w29 w27 w25
//; Table for rows 1,7 - constants are multiplied by cos_1_16
const short tab_i_17[] = {
22725, 22725, 22725, -22725, // ; movq-> w06 w04 w02 w00
29692, 12299, 12299, -29692, // ; w07 w05 w03 w01
22725, -22725, 22725, 22725, //; w14 w12 w10 w08
-12299, 29692, -29692, -12299, //; w15 w13 w11 w09
31521, 17855, 26722, -31521, //; w22 w20 w18 w16
26722, 6270, -6270, -17855, //; w23 w21 w19 w17
17855, 6270, 6270, 26722, //; w30 w28 w26 w24
-31521, 26722, -17855, -31521}; // w31 w29 w27 w25
//; Table for rows 2,6 - constants are multiplied by cos_2_16
const short tab_i_26[] = {
21407, 21407, 21407, -21407, // ; movq-> w06 w04 w02 w00
27969, 11585, 11585, -27969, // ; w07 w05 w03 w01
21407, -21407, 21407, 21407, // ; w14 w12 w10 w08
-11585, 27969, -27969, -11585, // ;w15 w13 w11 w09
29692, 16819, 25172, -29692, // ;w22 w20 w18 w16
25172, 5906, -5906, -16819, // ;w23 w21 w19 w17
16819, 5906, 5906, 25172, // ;w30 w28 w26 w24
-29692, 25172, -16819, -29692}; // ;w31 w29 w27 w25
//; Table for rows 3,5 - constants are multiplied by cos_3_16
const short tab_i_35[] = {
19266, 19266, 19266, -19266, //; movq-> w06 w04 w02 w00
25172, 10426, 10426, -25172, //; w07 w05 w03 w01
19266, -19266, 19266, 19266, //; w14 w12 w10 w08
-10426, 25172, -25172, -10426, //; w15 w13 w11 w09
26722, 15137, 22654, -26722, //; w22 w20 w18 w16
22654, 5315, -5315, -15137, //; w23 w21 w19 w17
15137, 5315, 5315, 22654, //; w30 w28 w26 w24
-26722, 22654, -15137, -26722}; //; w31 w29 w27 w25
*/
// CONCATENATED TABLE, rows 0,1,2,3,4,5,6,7 (in order )
//
// In our implementation, however, we only use row0 !
//
static const short tab_i_01234567[] = {
//row0, this row is required
16384, 16384, 16384, -16384, // ; movq-> w06 w04 w02 w00
21407, 8867, 8867, -21407, // w07 w05 w03 w01
16384, -16384, 16384, 16384, //; w14 w12 w10 w08
-8867, 21407, -21407, -8867, //; w15 w13 w11 w09
22725, 12873, 19266, -22725, //; w22 w20 w18 w16
19266, 4520, -4520, -12873, //; w23 w21 w19 w17
12873, 4520, 4520, 19266, //; w30 w28 w26 w24
-22725, 19266, -12873, -22725, //w31 w29 w27 w25
// the rest of these rows (1-7), aren't used !
//row1
22725, 22725, 22725, -22725, // ; movq-> w06 w04 w02 w00
29692, 12299, 12299, -29692, // ; w07 w05 w03 w01
22725, -22725, 22725, 22725, //; w14 w12 w10 w08
-12299, 29692, -29692, -12299, //; w15 w13 w11 w09
31521, 17855, 26722, -31521, //; w22 w20 w18 w16
26722, 6270, -6270, -17855, //; w23 w21 w19 w17
17855, 6270, 6270, 26722, //; w30 w28 w26 w24
-31521, 26722, -17855, -31521, // w31 w29 w27 w25
//row2
21407, 21407, 21407, -21407, // ; movq-> w06 w04 w02 w00
27969, 11585, 11585, -27969, // ; w07 w05 w03 w01
21407, -21407, 21407, 21407, // ; w14 w12 w10 w08
-11585, 27969, -27969, -11585, // ;w15 w13 w11 w09
29692, 16819, 25172, -29692, // ;w22 w20 w18 w16
25172, 5906, -5906, -16819, // ;w23 w21 w19 w17
16819, 5906, 5906, 25172, // ;w30 w28 w26 w24
-29692, 25172, -16819, -29692, // ;w31 w29 w27 w25
//row3
19266, 19266, 19266, -19266, //; movq-> w06 w04 w02 w00
25172, 10426, 10426, -25172, //; w07 w05 w03 w01
19266, -19266, 19266, 19266, //; w14 w12 w10 w08
-10426, 25172, -25172, -10426, //; w15 w13 w11 w09
26722, 15137, 22654, -26722, //; w22 w20 w18 w16
22654, 5315, -5315, -15137, //; w23 w21 w19 w17
15137, 5315, 5315, 22654, //; w30 w28 w26 w24
-26722, 22654, -15137, -26722, //; w31 w29 w27 w25
//row4
16384, 16384, 16384, -16384, // ; movq-> w06 w04 w02 w00
21407, 8867, 8867, -21407, // w07 w05 w03 w01
16384, -16384, 16384, 16384, //; w14 w12 w10 w08
-8867, 21407, -21407, -8867, //; w15 w13 w11 w09
22725, 12873, 19266, -22725, //; w22 w20 w18 w16
19266, 4520, -4520, -12873, //; w23 w21 w19 w17
12873, 4520, 4520, 19266, //; w30 w28 w26 w24
-22725, 19266, -12873, -22725, //w31 w29 w27 w25
//row5
19266, 19266, 19266, -19266, //; movq-> w06 w04 w02 w00
25172, 10426, 10426, -25172, //; w07 w05 w03 w01
19266, -19266, 19266, 19266, //; w14 w12 w10 w08
-10426, 25172, -25172, -10426, //; w15 w13 w11 w09
26722, 15137, 22654, -26722, //; w22 w20 w18 w16
22654, 5315, -5315, -15137, //; w23 w21 w19 w17
15137, 5315, 5315, 22654, //; w30 w28 w26 w24
-26722, 22654, -15137, -26722, //; w31 w29 w27 w25
//row6
21407, 21407, 21407, -21407, // ; movq-> w06 w04 w02 w00
27969, 11585, 11585, -27969, // ; w07 w05 w03 w01
21407, -21407, 21407, 21407, // ; w14 w12 w10 w08
-11585, 27969, -27969, -11585, // ;w15 w13 w11 w09
29692, 16819, 25172, -29692, // ;w22 w20 w18 w16
25172, 5906, -5906, -16819, // ;w23 w21 w19 w17
16819, 5906, 5906, 25172, // ;w30 w28 w26 w24
-29692, 25172, -16819, -29692, // ;w31 w29 w27 w25
//row7
22725, 22725, 22725, -22725, // ; movq-> w06 w04 w02 w00
29692, 12299, 12299, -29692, // ; w07 w05 w03 w01
22725, -22725, 22725, 22725, //; w14 w12 w10 w08
-12299, 29692, -29692, -12299, //; w15 w13 w11 w09
31521, 17855, 26722, -31521, //; w22 w20 w18 w16
26722, 6270, -6270, -17855, //; w23 w21 w19 w17
17855, 6270, 6270, 26722, //; w30 w28 w26 w24
-31521, 26722, -17855, -31521}; // w31 w29 w27 w25
#define INP eax // pointer to (short *blk)
#define OUT ecx // pointer to output (temporary store space qwTemp[])
#define TABLE ebx // pointer to tab_i_01234567[]
#define round_inv_row edx
#define round_inv_col edx
#define ROW_STRIDE 8 // for 8x8 matrix transposer
// private variables and functions
//temporary storage space, 8x8 of shorts
__inline static void idct_mmx32_rows( short *blk ); // transform rows
__inline static void idct_mmx32_cols( short *blk ); // transform "columns"
// the "column" transform actually transforms rows, it is
// identical to the row-transform except for the ROUNDING
// and SHIFTING coefficients.
static void
idct_mmx32_rows( short *blk ) // transform all 8 rows of 8x8 iDCT block
{
int x;
short qwTemp[64];
short *out = &qwTemp[0];
short *inptr = blk;
// this subroutine performs two operations
// 1) iDCT row transform
// for( i = 0; i < 8; ++ i)
// DCT_8_INV_ROW_1( blk[i*8], qwTemp[i] );
//
// 2) transpose the matrix (which was stored in qwTemp[])
// qwTemp[] -> [8x8 matrix transpose] -> blk[]
for (x=0; x<8; x++) { // transform one row per iteration
movq_m2r(*(inptr), mm0); // 0 ; x3 x2 x1 x0
movq_m2r(*(inptr+4), mm1); // 1 ; x7 x6 x5 x4
movq_r2r(mm0, mm2); // 2 ; x3 x2 x1 x0
movq_m2r(*(tab_i_01234567), mm3); // 3 ; w06 w04 w02 w00
punpcklwd_r2r(mm1, mm0); // x5 x1 x4 x0
// ----------
movq_r2r(mm0, mm5); // 5 ; x5 x1 x4 x0
punpckldq_r2r(mm0, mm0); // x4 x0 x4 x0
movq_m2r(*(tab_i_01234567+4), mm4); // 4 ; w07 w05 w03 w01
punpckhwd_r2r(mm1, mm2); // 1 ; x7 x3 x6 x2
pmaddwd_r2r(mm0, mm3); // x4*w06+x0*w04 x4*w02+x0*w00
movq_r2r(mm2, mm6); // 6 ; x7 x3 x6 x2
movq_m2r(*(tab_i_01234567+16), mm1);// 1 ; w22 w20 w18 w16
punpckldq_r2r(mm2, mm2); // x6 x2 x6 x2
pmaddwd_r2r(mm2, mm4); // x6*w07+x2*w05 x6*w03+x2*w01
punpckhdq_r2r(mm5, mm5); // x5 x1 x5 x1
pmaddwd_m2r(*(tab_i_01234567+8), mm0);// x4*w14+x0*w12 x4*w10+x0*w08
punpckhdq_r2r(mm6, mm6); // x7 x3 x7 x3
movq_m2r(*(tab_i_01234567+20), mm7);// 7 ; w23 w21 w19 w17
pmaddwd_r2r(mm5, mm1); // x5*w22+x1*w20 x5*w18+x1*w16
paddd_m2r(*(r_inv_row), mm3);// +rounder
pmaddwd_r2r(mm6, mm7); // x7*w23+x3*w21 x7*w19+x3*w17
pmaddwd_m2r(*(tab_i_01234567+12), mm2);// x6*w15+x2*w13 x6*w11+x2*w09
paddd_r2r(mm4, mm3); // 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd_m2r(*(tab_i_01234567+24), mm5);// x5*w30+x1*w28 x5*w26+x1*w24
movq_r2r(mm3, mm4); // 4 ; a1 a0
pmaddwd_m2r(*(tab_i_01234567+28), mm6);// x7*w31+x3*w29 x7*w27+x3*w25
paddd_r2r(mm7, mm1); // 7 ; b1=sum(odd1) b0=sum(odd0)
paddd_m2r(*(r_inv_row), mm0);// +rounder
psubd_r2r(mm1, mm3); // a1-b1 a0-b0
psrad_i2r(SHIFT_INV_ROW, mm3); // y6=a1-b1 y7=a0-b0
paddd_r2r(mm4, mm1); // 4 ; a1+b1 a0+b0
paddd_r2r(mm2, mm0); // 2 ; a3=sum(even3) a2=sum(even2)
psrad_i2r(SHIFT_INV_ROW, mm1); // y1=a1+b1 y0=a0+b0
paddd_r2r(mm6, mm5); // 6 ; b3=sum(odd3) b2=sum(odd2)
movq_r2r(mm0, mm4); // 4 ; a3 a2
paddd_r2r(mm5, mm0); // a3+b3 a2+b2
psubd_r2r(mm5, mm4); // 5 ; a3-b3 a2-b2
psrad_i2r(SHIFT_INV_ROW, mm4); // y4=a3-b3 y5=a2-b2
psrad_i2r(SHIFT_INV_ROW, mm0); // y3=a3+b3 y2=a2+b2
packssdw_r2r(mm3, mm4); // 3 ; y6 y7 y4 y5
packssdw_r2r(mm0, mm1); // 0 ; y3 y2 y1 y0
movq_r2r(mm4, mm7); // 7 ; y6 y7 y4 y5
psrld_i2r(16, mm4); // 0 y6 0 y4
movq_r2m(mm1, *(out)); // 1 ; save y3 y2 y1 y0
pslld_i2r(16, mm7); // y7 0 y5 0
por_r2r(mm4, mm7); // 4 ; y7 y6 y5 y4
// begin processing row 1
movq_r2m(mm7, *(out+4)); // 7 ; save y7 y6 y5 y4
inptr += 8;
out += 8;
}
// done with the iDCT row-transformation
// now we have to transpose the output 8x8 matrix
// 8x8 (OUT) -> 8x8't' (IN)
// the transposition is implemented as 4 sub-operations.
// 1) transpose upper-left quad
// 2) transpose lower-right quad
// 3) transpose lower-left quad
// 4) transpose upper-right quad
// mm0 = 1st row [ A B C D ] row1
// mm1 = 2nd row [ E F G H ] 2
// mm2 = 3rd row [ I J K L ] 3
// mm3 = 4th row [ M N O P ] 4
// 1) transpose upper-left quad
out = &qwTemp[0];
movq_m2r(*(out + ROW_STRIDE * 0), mm0);
movq_m2r(*(out + ROW_STRIDE * 1), mm1);
movq_r2r(mm0, mm4); // mm4 = copy of row1[A B C D]
movq_m2r(*(out + ROW_STRIDE * 2), mm2);
punpcklwd_r2r(mm1, mm0); // mm0 = [ 0 4 1 5]
movq_m2r(*(out + ROW_STRIDE * 3), mm3);
punpckhwd_r2r(mm1, mm4); // mm4 = [ 2 6 3 7]
movq_r2r(mm2, mm6);
punpcklwd_r2r(mm3, mm2); // mm2 = [ 8 12 9 13]
punpckhwd_r2r(mm3, mm6); // mm6 = 10 14 11 15]
movq_r2r(mm0, mm1); // mm1 = [ 0 4 1 5]
inptr = blk;
punpckldq_r2r(mm2, mm0); // final result mm0 = row1 [0 4 8 12]
movq_r2r(mm4, mm3); // mm3 = [ 2 6 3 7]
punpckhdq_r2r(mm2, mm1); // mm1 = final result mm1 = row2 [1 5 9 13]
movq_r2m(mm0, *(inptr + ROW_STRIDE * 0)); // store row 1
punpckldq_r2r(mm6, mm4); // final result mm4 = row3 [2 6 10 14]
// begin reading next quadrant (lower-right)
movq_m2r(*(out + ROW_STRIDE*4 + 4), mm0);
punpckhdq_r2r(mm6, mm3); // final result mm3 = row4 [3 7 11 15]
movq_r2m(mm4, *(inptr + ROW_STRIDE * 2)); // store row 3
movq_r2r(mm0, mm4); // mm4 = copy of row1[A B C D]
movq_r2m(mm1, *(inptr + ROW_STRIDE * 1)); // store row 2
movq_m2r(*(out + ROW_STRIDE*5 + 4), mm1);
movq_r2m(mm3, *(inptr + ROW_STRIDE * 3)); // store row 4
punpcklwd_r2r(mm1, mm0); // mm0 = [ 0 4 1 5]
// 2) transpose lower-right quadrant
// movq mm0, qword ptr [OUT + ROW_STRIDE*4 + 8]
// movq mm1, qword ptr [OUT + ROW_STRIDE*5 + 8]
// movq mm4, mm0; // mm4 = copy of row1[A B C D]
movq_m2r(*(out + ROW_STRIDE*6 + 4), mm2);
// punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5]
punpckhwd_r2r(mm1, mm4); // mm4 = [ 2 6 3 7]
movq_m2r(*(out + ROW_STRIDE*7 + 4), mm3);
movq_r2r(mm2, mm6);
punpcklwd_r2r(mm3, mm2); // mm2 = [ 8 12 9 13]
movq_r2r(mm0, mm1); // mm1 = [ 0 4 1 5]
punpckhwd_r2r(mm3, mm6); // mm6 = 10 14 11 15]
movq_r2r(mm4, mm3); // mm3 = [ 2 6 3 7]
punpckldq_r2r(mm2, mm0); // final result mm0 = row1 [0 4 8 12]
punpckhdq_r2r(mm2, mm1); // mm1 = final result mm1 = row2 [1 5 9 13]
; // slot
movq_r2m(mm0, *(inptr + ROW_STRIDE*4 + 4)); // store row 1
punpckldq_r2r(mm6, mm4); // final result mm4 = row3 [2 6 10 14]
movq_m2r(*(out + ROW_STRIDE * 4 ), mm0);
punpckhdq_r2r(mm6, mm3); // final result mm3 = row4 [3 7 11 15]
movq_r2m(mm4, *(inptr + ROW_STRIDE*6 + 4)); // store row 3
movq_r2r(mm0, mm4); // mm4 = copy of row1[A B C D]
movq_r2m(mm1, *(inptr + ROW_STRIDE*5 + 4)); // store row 2
; // slot
movq_m2r(*(out + ROW_STRIDE * 5 ), mm1);
; // slot
movq_r2m(mm3, *(inptr + ROW_STRIDE*7 + 4)); // store row 4
punpcklwd_r2r(mm1, mm0); // mm0 = [ 0 4 1 5]
// 3) transpose lower-left
// movq mm0, qword ptr [OUT + ROW_STRIDE * 4 ]
// movq mm1, qword ptr [OUT + ROW_STRIDE * 5 ]
// movq mm4, mm0; // mm4 = copy of row1[A B C D]
movq_m2r(*(out + ROW_STRIDE * 6 ), mm2);
// punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5]
punpckhwd_r2r(mm1, mm4); // mm4 = [ 2 6 3 7]
movq_m2r(*(out + ROW_STRIDE * 7 ), mm3);
movq_r2r(mm2, mm6);
punpcklwd_r2r(mm3, mm2); // mm2 = [ 8 12 9 13]
movq_r2r(mm0, mm1); // mm1 = [ 0 4 1 5]
punpckhwd_r2r(mm3, mm6); // mm6 = 10 14 11 15]
movq_r2r(mm4, mm3); // mm3 = [ 2 6 3 7]
punpckldq_r2r(mm2, mm0); // final result mm0 = row1 [0 4 8 12]
punpckhdq_r2r(mm2, mm1); // mm1 = final result mm1 = row2 [1 5 9 13]
;//slot
movq_r2m(mm0, *(inptr + ROW_STRIDE * 0 + 4 )); // store row 1
punpckldq_r2r(mm6, mm4); // final result mm4 = row3 [2 6 10 14]
// begin reading next quadrant (upper-right)
movq_m2r(*(out + ROW_STRIDE*0 + 4), mm0);
punpckhdq_r2r(mm6, mm3); // final result mm3 = row4 [3 7 11 15]
movq_r2m(mm4, *(inptr + ROW_STRIDE * 2 + 4)); // store row 3
movq_r2r(mm0, mm4); // mm4 = copy of row1[A B C D]
movq_r2m(mm1, *(inptr + ROW_STRIDE * 1 + 4)); // store row 2
movq_m2r(*(out + ROW_STRIDE*1 + 4), mm1);
movq_r2m(mm3, *(inptr + ROW_STRIDE * 3 + 4)); // store row 4
punpcklwd_r2r(mm1, mm0); // mm0 = [ 0 4 1 5]
// 2) transpose lower-right quadrant
// movq mm0, qword ptr [OUT + ROW_STRIDE*4 + 8]
// movq mm1, qword ptr [OUT + ROW_STRIDE*5 + 8]
// movq mm4, mm0; // mm4 = copy of row1[A B C D]
movq_m2r(*(out + ROW_STRIDE*2 + 4), mm2);
// punpcklwd mm0, mm1; // mm0 = [ 0 4 1 5]
punpckhwd_r2r(mm1, mm4); // mm4 = [ 2 6 3 7]
movq_m2r(*(out + ROW_STRIDE*3 + 4), mm3);
movq_r2r(mm2, mm6);
punpcklwd_r2r(mm3, mm2); // mm2 = [ 8 12 9 13]
movq_r2r(mm0, mm1); // mm1 = [ 0 4 1 5]
punpckhwd_r2r(mm3, mm6); // mm6 = 10 14 11 15]
movq_r2r(mm4, mm3); // mm3 = [ 2 6 3 7]
punpckldq_r2r(mm2, mm0); // final result mm0 = row1 [0 4 8 12]
punpckhdq_r2r(mm2, mm1); // mm1 = final result mm1 = row2 [1 5 9 13]
; // slot
movq_r2m(mm0, *(inptr + ROW_STRIDE*4)); // store row 1
punpckldq_r2r(mm6, mm4); // final result mm4 = row3 [2 6 10 14]
movq_r2m(mm1, *(inptr + ROW_STRIDE*5)); // store row 2
punpckhdq_r2r(mm6, mm3); // final result mm3 = row4 [3 7 11 15]
movq_r2m(mm4, *(inptr + ROW_STRIDE*6)); // store row 3
; // slot
movq_r2m(mm3, *(inptr + ROW_STRIDE*7)); // store row 4
; // slot
}
static void
idct_mmx32_cols( short *blk ) // transform all 8 cols of 8x8 iDCT block
{
int x;
short *inptr = blk;
// Despite the function's name, the matrix is transformed
// row by row. This function is identical to idct_mmx32_rows(),
// except for the SHIFT amount and ROUND_INV amount.
// this subroutine performs two operations
// 1) iDCT row transform
// for( i = 0; i < 8; ++ i)
// DCT_8_INV_ROW_1( blk[i*8], qwTemp[i] );
//
// 2) transpose the matrix (which was stored in qwTemp[])
// qwTemp[] -> [8x8 matrix transpose] -> blk[]
for (x=0; x<8; x++) { // transform one row per iteration
movq_m2r(*(inptr), mm0); // 0 ; x3 x2 x1 x0
movq_m2r(*(inptr+4), mm1); // 1 ; x7 x6 x5 x4
movq_r2r(mm0, mm2); // 2 ; x3 x2 x1 x0
movq_m2r(*(tab_i_01234567), mm3); // 3 ; w06 w04 w02 w00
punpcklwd_r2r(mm1, mm0); // x5 x1 x4 x0
// ----------
movq_r2r(mm0, mm5); // 5 ; x5 x1 x4 x0
punpckldq_r2r(mm0, mm0); // x4 x0 x4 x0
movq_m2r(*(tab_i_01234567+4), mm4); // 4 ; w07 w05 w03 w01
punpckhwd_r2r(mm1, mm2); // 1 ; x7 x3 x6 x2
pmaddwd_r2r(mm0, mm3); // x4*w06+x0*w04 x4*w02+x0*w00
movq_r2r(mm2, mm6); // 6 ; x7 x3 x6 x2
movq_m2r(*(tab_i_01234567+16), mm1);// 1 ; w22 w20 w18 w16
punpckldq_r2r(mm2, mm2); // x6 x2 x6 x2
pmaddwd_r2r(mm2, mm4); // x6*w07+x2*w05 x6*w03+x2*w01
punpckhdq_r2r(mm5, mm5); // x5 x1 x5 x1
pmaddwd_m2r(*(tab_i_01234567+8), mm0);// x4*w14+x0*w12 x4*w10+x0*w08
punpckhdq_r2r(mm6, mm6); // x7 x3 x7 x3
movq_m2r(*(tab_i_01234567+20), mm7);// 7 ; w23 w21 w19 w17
pmaddwd_r2r(mm5, mm1); // x5*w22+x1*w20 x5*w18+x1*w16
paddd_m2r(*(r_inv_col), mm3);// +rounder
pmaddwd_r2r(mm6, mm7); // x7*w23+x3*w21 x7*w19+x3*w17
pmaddwd_m2r(*(tab_i_01234567+12), mm2);// x6*w15+x2*w13 x6*w11+x2*w09
paddd_r2r(mm4, mm3); // 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd_m2r(*(tab_i_01234567+24), mm5);// x5*w30+x1*w28 x5*w26+x1*w24
movq_r2r(mm3, mm4); // 4 ; a1 a0
pmaddwd_m2r(*(tab_i_01234567+28), mm6);// x7*w31+x3*w29 x7*w27+x3*w25
paddd_r2r(mm7, mm1); // 7 ; b1=sum(odd1) b0=sum(odd0)
paddd_m2r(*(r_inv_col), mm0);// +rounder
psubd_r2r(mm1, mm3); // a1-b1 a0-b0
psrad_i2r(SHIFT_INV_COL, mm3); // y6=a1-b1 y7=a0-b0
paddd_r2r(mm4, mm1); // 4 ; a1+b1 a0+b0
paddd_r2r(mm2, mm0); // 2 ; a3=sum(even3) a2=sum(even2)
psrad_i2r(SHIFT_INV_COL, mm1); // y1=a1+b1 y0=a0+b0
paddd_r2r(mm6, mm5); // 6 ; b3=sum(odd3) b2=sum(odd2)
movq_r2r(mm0, mm4); // 4 ; a3 a2
paddd_r2r(mm5, mm0); // a3+b3 a2+b2
psubd_r2r(mm5, mm4); // 5 ; a3-b3 a2-b2
psrad_i2r(SHIFT_INV_COL, mm4); // y4=a3-b3 y5=a2-b2
psrad_i2r(SHIFT_INV_COL, mm0); // y3=a3+b3 y2=a2+b2
packssdw_r2r(mm3, mm4); // 3 ; y6 y7 y4 y5
packssdw_r2r(mm0, mm1); // 0 ; y3 y2 y1 y0
movq_r2r(mm4, mm7); // 7 ; y6 y7 y4 y5
psrld_i2r(16, mm4); // 0 y6 0 y4
movq_r2m(mm1, *(inptr)); // 1 ; save y3 y2 y1 y0
pslld_i2r(16, mm7); // y7 0 y5 0
por_r2r(mm4, mm7); // 4 ; y7 y6 y5 y4
// begin processing row 1
movq_r2m(mm7, *(inptr+4)); // 7 ; save y7 y6 y5 y4
inptr += 8;
}
// done with the iDCT column-transformation
}
//
// public interface to MMX32 IDCT 8x8 operation
//
void
gst_idct_mmx32_idct( short *blk )
{
// 1) iDCT row transformation
idct_mmx32_rows( blk ); // 1) transform iDCT row, and transpose
// 2) iDCT column transformation
idct_mmx32_cols( blk ); // 2) transform iDCT row, and transpose
emms(); // restore processor state
// all done
}

740
gst-libs/gst/idct/mmxidct.S Normal file
View file

@ -0,0 +1,740 @@
/*
* the input data is tranposed and each 16 bit element in the 8x8 matrix
* is left aligned:
* for example in 11...1110000 format
* If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component
* (element[0][0] of the matrix)
*
* Notes:
* - the scratchN variables should be put on the stack to avoid
* reentrancy problems
*/
#ifdef PIC
#define pic_offset(a) a@GOTOFF(%ebx)
#else
#define pic_offset(a) a
#endif
/* extrn re_matrix */
.data
.align 16
.type preSC,@object
preSC: .short 16384,22725,21407,19266,16384,12873,8867,4520
.short 22725,31521,29692,26722,22725,17855,12299,6270
.short 21407,29692,27969,25172,21407,16819,11585,5906
.short 19266,26722,25172,22654,19266,15137,10426,5315
.short 16384,22725,21407,19266,16384,12873,8867,4520
.short 12873,17855,16819,15137,25746,20228,13933,7103
.short 17734,24598,23170,20853,17734,13933,9597,4892
.short 18081,25080,23624,21261,18081,14206,9785,4988
.size preSC,128
.align 8
.type x0005000200010001,@object
.size x0005000200010001,8
x0005000200010001:
.long 0x00010001,0x00050002
.align 8
.type x0040000000000000,@object
.size x0040000000000000,8
x0040000000000000:
.long 0, 0x00400000
.align 8
.type x5a825a825a825a82,@object
.size x5a825a825a825a82,8
x5a825a825a825a82:
.long 0x5a825a82, 0x5a825a82
.align 8
.type x539f539f539f539f,@object
.size x539f539f539f539f,8
x539f539f539f539f:
.long 0x539f539f,0x539f539f
.align 8
.type x4546454645464546,@object
.size x4546454645464546,8
x4546454645464546:
.long 0x45464546,0x45464546
.align 8
.type x61f861f861f861f8,@object
.size x61f861f861f861f8,8
x61f861f861f861f8:
.long 0x61f861f8,0x61f861f8
.type x0004000000000000,@object
.size x0004000000000000,8
x0004000000000000:
.long 0x00000000,0x00040000
.type x0000000000000004,@object
.size x0000000000000004,8
x0000000000000004:
.long 0x00000004,0x00000000
.align 8
.type scratch1,@object
.size scratch1,8
scratch1:
.long 0,0
.align 8
.type scratch3,@object
.size scratch3,8
scratch3:
.long 0,0
.align 8
.type scratch5,@object
.size scratch5,8
scratch5:
.long 0,0
.align 8
.type scratch7,@object
.size scratch7,8
scratch7:
.long 0,0
.type x0,@object
.size x0,8
x0:
.long 0,0
.align 8
.text
.align 4
.globl gst_idct_mmx_idct
.type gst_idct_mmx_idct,@function
gst_idct_mmx_idct:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
#ifdef PIC
call here
here: popl %ebx
addl $_GLOBAL_OFFSET_TABLE_+[.-here],%ebx
#endif
movl 8(%ebp),%esi /* source matrix */
movq (%esi), %mm0
paddw pic_offset(x0000000000000004), %mm0
movq 8(%esi), %mm1
psllw $4, %mm0
movq 16(%esi), %mm2
psllw $4, %mm1
movq 24(%esi), %mm3
psllw $4, %mm2
movq 32(%esi), %mm4
psllw $4, %mm3
movq 40(%esi), %mm5
psllw $4, %mm4
movq 48(%esi), %mm6
psllw $4, %mm5
movq 56(%esi), %mm7
psllw $4, %mm6
psllw $4, %mm7
movq %mm0, (%esi)
movq %mm1, 8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
movq 64(%esi), %mm0
movq 72(%esi), %mm1
psllw $4, %mm0
movq 80(%esi), %mm2
psllw $4, %mm1
movq 88(%esi), %mm3
psllw $4, %mm2
movq 96(%esi), %mm4
psllw $4, %mm3
movq 104(%esi), %mm5
psllw $4, %mm4
movq 112(%esi), %mm6
psllw $4, %mm5
movq 120(%esi), %mm7
psllw $4, %mm6
psllw $4, %mm7
movq %mm0,64(%esi)
movq %mm1,72(%esi)
movq %mm2,80(%esi)
movq %mm3,88(%esi)
movq %mm4,96(%esi)
movq %mm5,104(%esi)
movq %mm6,112(%esi)
movq %mm7,120(%esi)
leal pic_offset(preSC), %ecx
/* column 0: even part
* use V4, V12, V0, V8 to produce V22..V25
*/
movq 8*12(%ecx), %mm0 /* maybe the first mul can be done together */
/* with the dequantization in iHuff module */
pmulhw 8*12(%esi), %mm0 /* V12 */
movq 8*4(%ecx), %mm1
pmulhw 8*4(%esi), %mm1 /* V4 */
movq (%ecx), %mm3
psraw $1, %mm0 /* t64=t66 */
pmulhw (%esi), %mm3 /* V0 */
movq 8*8(%ecx), %mm5 /* duplicate V4 */
movq %mm1, %mm2 /* added 11/1/96 */
pmulhw 8*8(%esi),%mm5 /* V8 */
psubsw %mm0, %mm1 /* V16 */
pmulhw pic_offset(x5a825a825a825a82), %mm1 /* 23170 ->V18 */
paddsw %mm0, %mm2 /* V17 */
movq %mm2, %mm0 /* duplicate V17 */
psraw $1, %mm2 /* t75=t82 */
psraw $2, %mm0 /* t72 */
movq %mm3, %mm4 /* duplicate V0 */
paddsw %mm5, %mm3 /* V19 */
psubsw %mm5, %mm4 /* V20 ;mm5 free */
/* moved from the block below */
movq 8*10(%ecx), %mm7
psraw $1, %mm3 /* t74=t81 */
movq %mm3, %mm6 /* duplicate t74=t81 */
psraw $2, %mm4 /* t77=t79 */
psubsw %mm0, %mm1 /* V21 ; mm0 free */
paddsw %mm2, %mm3 /* V22 */
movq %mm1, %mm5 /* duplicate V21 */
paddsw %mm4, %mm1 /* V23 */
movq %mm3, 8*4(%esi) /* V22 */
psubsw %mm5, %mm4 /* V24; mm5 free */
movq %mm1, 8*12(%esi) /* V23 */
psubsw %mm2, %mm6 /* V25; mm2 free */
movq %mm4, (%esi) /* V24 */
/* keep mm6 alive all along the next block */
/* movq %mm6, 8*8(%esi) V25 */
/* column 0: odd part
* use V2, V6, V10, V14 to produce V31, V39, V40, V41
*/
/* moved above: movq 8*10(%ecx), %mm7 */
pmulhw 8*10(%esi), %mm7 /* V10 */
movq 8*6(%ecx), %mm0
pmulhw 8*6(%esi), %mm0 /* V6 */
movq 8*2(%ecx), %mm5
movq %mm7, %mm3 /* duplicate V10 */
pmulhw 8*2(%esi), %mm5 /* V2 */
movq 8*14(%ecx), %mm4
psubsw %mm0, %mm7 /* V26 */
pmulhw 8*14(%esi), %mm4 /* V14 */
paddsw %mm0, %mm3 /* V29 ; free mm0 */
movq %mm7, %mm1 /* duplicate V26 */
psraw $1, %mm3 /* t91=t94 */
pmulhw pic_offset(x539f539f539f539f),%mm7 /* V33 */
psraw $1, %mm1 /* t96 */
movq %mm5, %mm0 /* duplicate V2 */
psraw $2, %mm4 /* t85=t87 */
paddsw %mm4,%mm5 /* V27 */
psubsw %mm4, %mm0 /* V28 ; free mm4 */
movq %mm0, %mm2 /* duplicate V28 */
psraw $1, %mm5 /* t90=t93 */
pmulhw pic_offset(x4546454645464546),%mm0 /* V35 */
psraw $1, %mm2 /* t97 */
movq %mm5, %mm4 /* duplicate t90=t93 */
psubsw %mm2, %mm1 /* V32 ; free mm2 */
pmulhw pic_offset(x61f861f861f861f8),%mm1 /* V36 */
psllw $1, %mm7 /* t107 */
paddsw %mm3, %mm5 /* V31 */
psubsw %mm3, %mm4 /* V30 ; free mm3 */
pmulhw pic_offset(x5a825a825a825a82),%mm4 /* V34 */
nop
psubsw %mm1, %mm0 /* V38 */
psubsw %mm7, %mm1 /* V37 ; free mm7 */
psllw $1, %mm1 /* t114 */
/* move from the next block */
movq %mm6, %mm3 /* duplicate V25 */
/* move from the next block */
movq 8*4(%esi), %mm7 /* V22 */
psllw $1, %mm0 /* t110 */
psubsw %mm5, %mm0 /* V39 (mm5 needed for next block) */
psllw $2, %mm4 /* t112 */
/* moved from the next block */
movq 8*12(%esi), %mm2 /* V23 */
psubsw %mm0, %mm4 /* V40 */
paddsw %mm4, %mm1 /* V41; free mm0 */
/* moved from the next block */
psllw $1, %mm2 /* t117=t125 */
/* column 0: output butterfly */
/* moved above:
* movq %mm6, %mm3 duplicate V25
* movq 8*4(%esi), %mm7 V22
* movq 8*12(%esi), %mm2 V23
* psllw $1, %mm2 t117=t125
*/
psubsw %mm1, %mm6 /* tm6 */
paddsw %mm1, %mm3 /* tm8; free mm1 */
movq %mm7, %mm1 /* duplicate V22 */
paddsw %mm5, %mm7 /* tm0 */
movq %mm3, 8*8(%esi) /* tm8; free mm3 */
psubsw %mm5, %mm1 /* tm14; free mm5 */
movq %mm6, 8*6(%esi) /* tm6; free mm6 */
movq %mm2, %mm3 /* duplicate t117=t125 */
movq (%esi), %mm6 /* V24 */
paddsw %mm0, %mm2 /* tm2 */
movq %mm7, (%esi) /* tm0; free mm7 */
psubsw %mm0, %mm3 /* tm12; free mm0 */
movq %mm1, 8*14(%esi) /* tm14; free mm1 */
psllw $1, %mm6 /* t119=t123 */
movq %mm2, 8*2(%esi) /* tm2; free mm2 */
movq %mm6, %mm0 /* duplicate t119=t123 */
movq %mm3, 8*12(%esi) /* tm12; free mm3 */
paddsw %mm4, %mm6 /* tm4 */
/* moved from next block */
movq 8*5(%ecx), %mm1
psubsw %mm4, %mm0 /* tm10; free mm4 */
/* moved from next block */
pmulhw 8*5(%esi), %mm1 /* V5 */
movq %mm6, 8*4(%esi) /* tm4; free mm6 */
movq %mm0, 8*10(%esi) /* tm10; free mm0 */
/* column 1: even part
* use V5, V13, V1, V9 to produce V56..V59
*/
/* moved to prev block:
* movq 8*5(%ecx), %mm1
* pmulhw 8*5(%esi), %mm1 V5
*/
movq 8*13(%ecx), %mm7
psllw $1, %mm1 /* t128=t130 */
pmulhw 8*13(%esi), %mm7 /* V13 */
movq %mm1, %mm2 /* duplicate t128=t130 */
movq 8(%ecx), %mm3
pmulhw 8(%esi), %mm3 /* V1 */
movq 8*9(%ecx), %mm5
psubsw %mm7, %mm1 /* V50 */
pmulhw 8*9(%esi), %mm5 /* V9 */
paddsw %mm7, %mm2 /* V51 */
pmulhw pic_offset(x5a825a825a825a82), %mm1 /* 23170 ->V52 */
movq %mm2, %mm6 /* duplicate V51 */
psraw $1, %mm2 /* t138=t144 */
movq %mm3, %mm4 /* duplicate V1 */
psraw $2, %mm6 /* t136 */
paddsw %mm5, %mm3 /* V53 */
psubsw %mm5, %mm4 /* V54 ;mm5 free */
movq %mm3, %mm7 /* duplicate V53 */
/* moved from next block */
movq 8*11(%ecx), %mm0
psraw $1, %mm4 /* t140=t142 */
psubsw %mm6, %mm1 /* V55 ; mm6 free */
paddsw %mm2, %mm3 /* V56 */
movq %mm4, %mm5 /* duplicate t140=t142 */
paddsw %mm1, %mm4 /* V57 */
movq %mm3, 8*5(%esi) /* V56 */
psubsw %mm1, %mm5 /* V58; mm1 free */
movq %mm4, 8*13(%esi) /* V57 */
psubsw %mm2, %mm7 /* V59; mm2 free */
movq %mm5, 8*9(%esi) /* V58 */
/* keep mm7 alive all along the next block
* movq %mm7, 8(%esi) V59
* moved above
* movq 8*11(%ecx), %mm0
*/
pmulhw 8*11(%esi), %mm0 /* V11 */
movq 8*7(%ecx), %mm6
pmulhw 8*7(%esi), %mm6 /* V7 */
movq 8*15(%ecx), %mm4
movq %mm0, %mm3 /* duplicate V11 */
pmulhw 8*15(%esi), %mm4 /* V15 */
movq 8*3(%ecx), %mm5
psllw $1, %mm6 /* t146=t152 */
pmulhw 8*3(%esi), %mm5 /* V3 */
paddsw %mm6, %mm0 /* V63 */
/* note that V15 computation has a correction step:
* this is a 'magic' constant that rebiases the results to be closer to the
* expected result. this magic constant can be refined to reduce the error
* even more by doing the correction step in a later stage when the number
* is actually multiplied by 16
*/
paddw pic_offset(x0005000200010001), %mm4
psubsw %mm6, %mm3 /* V60 ; free mm6 */
psraw $1, %mm0 /* t154=t156 */
movq %mm3, %mm1 /* duplicate V60 */
pmulhw pic_offset(x539f539f539f539f), %mm1 /* V67 */
movq %mm5, %mm6 /* duplicate V3 */
psraw $2, %mm4 /* t148=t150 */
paddsw %mm4, %mm5 /* V61 */
psubsw %mm4, %mm6 /* V62 ; free mm4 */
movq %mm5, %mm4 /* duplicate V61 */
psllw $1, %mm1 /* t169 */
paddsw %mm0, %mm5 /* V65 -> result */
psubsw %mm0, %mm4 /* V64 ; free mm0 */
pmulhw pic_offset(x5a825a825a825a82), %mm4 /* V68 */
psraw $1, %mm3 /* t158 */
psubsw %mm6, %mm3 /* V66 */
movq %mm5, %mm2 /* duplicate V65 */
pmulhw pic_offset(x61f861f861f861f8), %mm3 /* V70 */
psllw $1, %mm6 /* t165 */
pmulhw pic_offset(x4546454645464546), %mm6 /* V69 */
psraw $1, %mm2 /* t172 */
/* moved from next block */
movq 8*5(%esi), %mm0 /* V56 */
psllw $1, %mm4 /* t174 */
/* moved from next block */
psraw $1, %mm0 /* t177=t188 */
nop
psubsw %mm3, %mm6 /* V72 */
psubsw %mm1, %mm3 /* V71 ; free mm1 */
psubsw %mm2, %mm6 /* V73 ; free mm2 */
/* moved from next block */
psraw $1, %mm5 /* t178=t189 */
psubsw %mm6, %mm4 /* V74 */
/* moved from next block */
movq %mm0, %mm1 /* duplicate t177=t188 */
paddsw %mm4, %mm3 /* V75 */
/* moved from next block */
paddsw %mm5, %mm0 /* tm1 */
/* location
* 5 - V56
* 13 - V57
* 9 - V58
* X - V59, mm7
* X - V65, mm5
* X - V73, mm6
* X - V74, mm4
* X - V75, mm3
* free mm0, mm1 & mm2
* moved above
* movq 8*5(%esi), %mm0 V56
* psllw $1, %mm0 t177=t188 ! new !!
* psllw $1, %mm5 t178=t189 ! new !!
* movq %mm0, %mm1 duplicate t177=t188
* paddsw %mm5, %mm0 tm1
*/
movq 8*13(%esi), %mm2 /* V57 */
psubsw %mm5, %mm1 /* tm15; free mm5 */
movq %mm0, 8(%esi) /* tm1; free mm0 */
psraw $1, %mm7 /* t182=t184 ! new !! */
/* save the store as used directly in the transpose
* movq %mm1, 120(%esi) tm15; free mm1
*/
movq %mm7, %mm5 /* duplicate t182=t184 */
psubsw %mm3, %mm7 /* tm7 */
paddsw %mm3, %mm5 /* tm9; free mm3 */
movq 8*9(%esi), %mm0 /* V58 */
movq %mm2, %mm3 /* duplicate V57 */
movq %mm7, 8*7(%esi) /* tm7; free mm7 */
psubsw %mm6, %mm3 /* tm13 */
paddsw %mm6, %mm2 /* tm3 ; free mm6 */
/* moved up from the transpose */
movq %mm3, %mm7
/* moved up from the transpose */
punpcklwd %mm1, %mm3
movq %mm0, %mm6 /* duplicate V58 */
movq %mm2, 8*3(%esi) /* tm3; free mm2 */
paddsw %mm4, %mm0 /* tm5 */
psubsw %mm4, %mm6 /* tm11; free mm4 */
/* moved up from the transpose */
punpckhwd %mm1, %mm7
movq %mm0, 8*5(%esi) /* tm5; free mm0 */
/* moved up from the transpose */
movq %mm5, %mm2
/* transpose - M4 part
* --------- ---------
* | M1 | M2 | | M1'| M3'|
* --------- --> ---------
* | M3 | M4 | | M2'| M4'|
* --------- ---------
* Two alternatives: use full mmword approach so the following code can be
* scheduled before the transpose is done without stores, or use the faster
* half mmword stores (when possible)
*/
movd %mm3, 8*9+4(%esi) /* MS part of tmt9 */
punpcklwd %mm6, %mm5
movd %mm7, 8*13+4(%esi) /* MS part of tmt13 */
punpckhwd %mm6, %mm2
movd %mm5, 8*9(%esi) /* LS part of tmt9 */
punpckhdq %mm3, %mm5 /* free mm3 */
movd %mm2, 8*13(%esi) /* LS part of tmt13 */
punpckhdq %mm7, %mm2 /* free mm7 */
/* moved up from the M3 transpose */
movq 8*8(%esi), %mm0
/* moved up from the M3 transpose */
movq 8*10(%esi), %mm1
/* moved up from the M3 transpose */
movq %mm0, %mm3
/* shuffle the rest of the data, and write it with 2 mmword writes */
movq %mm5, 8*11(%esi) /* tmt11 */
/* moved up from the M3 transpose */
punpcklwd %mm1, %mm0
movq %mm2, 8*15(%esi) /* tmt15 */
/* moved up from the M3 transpose */
punpckhwd %mm1, %mm3
/* transpose - M3 part
* moved up to previous code section
* movq 8*8(%esi), %mm0
* movq 8*10(%esi), %mm1
* movq %mm0, %mm3
* punpcklwd %mm1, %mm0
* punpckhwd %mm1, %mm3
*/
movq 8*12(%esi), %mm6
movq 8*14(%esi), %mm4
movq %mm6, %mm2
/* shuffle the data and write the lower parts of the transposed in 4 dwords */
punpcklwd %mm4, %mm6
movq %mm0, %mm1
punpckhdq %mm6, %mm1
movq %mm3, %mm7
punpckhwd %mm4, %mm2 /* free mm4 */
punpckldq %mm6, %mm0 /* free mm6 */
/* moved from next block */
movq 8*13(%esi), %mm4 /* tmt13 */
punpckldq %mm2, %mm3
punpckhdq %mm2, %mm7 /* free mm2 */
/* moved from next block */
movq %mm3, %mm5 /* duplicate tmt5 */
/* column 1: even part (after transpose)
* moved above
* movq %mm3, %mm5 duplicate tmt5
* movq 8*13(%esi), %mm4 tmt13
*/
psubsw %mm4, %mm3 /* V134 */
pmulhw pic_offset(x5a825a825a825a82), %mm3 /* 23170 ->V136 */
movq 8*9(%esi), %mm6 /* tmt9 */
paddsw %mm4, %mm5 /* V135 ; mm4 free */
movq %mm0, %mm4 /* duplicate tmt1 */
paddsw %mm6, %mm0 /* V137 */
psubsw %mm6, %mm4 /* V138 ; mm6 free */
psllw $2, %mm3 /* t290 */
psubsw %mm5, %mm3 /* V139 */
movq %mm0, %mm6 /* duplicate V137 */
paddsw %mm5, %mm0 /* V140 */
movq %mm4, %mm2 /* duplicate V138 */
paddsw %mm3, %mm2 /* V141 */
psubsw %mm3, %mm4 /* V142 ; mm3 free */
movq %mm0, 8*9(%esi) /* V140 */
psubsw %mm5, %mm6 /* V143 ; mm5 free */
/* moved from next block */
movq 8*11(%esi), %mm0 /* tmt11 */
movq %mm2, 8*13(%esi) /* V141 */
/* moved from next block */
movq %mm0, %mm2 /* duplicate tmt11 */
/* column 1: odd part (after transpose) */
/* moved up to the prev block
* movq 8*11(%esi), %mm0 tmt11
* movq %mm0, %mm2 duplicate tmt11
*/
movq 8*15(%esi), %mm5 /* tmt15 */
psubsw %mm7, %mm0 /* V144 */
movq %mm0, %mm3 /* duplicate V144 */
paddsw %mm7, %mm2 /* V147 ; free mm7 */
pmulhw pic_offset(x539f539f539f539f), %mm0 /* 21407-> V151 */
movq %mm1, %mm7 /* duplicate tmt3 */
paddsw %mm5, %mm7 /* V145 */
psubsw %mm5, %mm1 /* V146 ; free mm5 */
psubsw %mm1, %mm3 /* V150 */
movq %mm7, %mm5 /* duplicate V145 */
pmulhw pic_offset(x4546454645464546), %mm1 /* 17734-> V153 */
psubsw %mm2, %mm5 /* V148 */
pmulhw pic_offset(x61f861f861f861f8), %mm3 /* 25080-> V154 */
psllw $2, %mm0 /* t311 */
pmulhw pic_offset(x5a825a825a825a82), %mm5 /* 23170-> V152 */
paddsw %mm2, %mm7 /* V149 ; free mm2 */
psllw $1, %mm1 /* t313 */
nop /* without the nop - freeze here for one clock */
movq %mm3, %mm2 /* duplicate V154 */
psubsw %mm0, %mm3 /* V155 ; free mm0 */
psubsw %mm2, %mm1 /* V156 ; free mm2 */
/* moved from the next block */
movq %mm6, %mm2 /* duplicate V143 */
/* moved from the next block */
movq 8*13(%esi), %mm0 /* V141 */
psllw $1, %mm1 /* t315 */
psubsw %mm7, %mm1 /* V157 (keep V149) */
psllw $2, %mm5 /* t317 */
psubsw %mm1, %mm5 /* V158 */
psllw $1, %mm3 /* t319 */
paddsw %mm5, %mm3 /* V159 */
/* column 1: output butterfly (after transform)
* moved to the prev block
* movq %mm6, %mm2 duplicate V143
* movq 8*13(%esi), %mm0 V141
*/
psubsw %mm3, %mm2 /* V163 */
paddsw %mm3, %mm6 /* V164 ; free mm3 */
movq %mm4, %mm3 /* duplicate V142 */
psubsw %mm5, %mm4 /* V165 ; free mm5 */
movq %mm2, pic_offset(scratch7) /* out7 */
psraw $4, %mm6
psraw $4, %mm4
paddsw %mm5, %mm3 /* V162 */
movq 8*9(%esi), %mm2 /* V140 */
movq %mm0, %mm5 /* duplicate V141 */
/* in order not to perculate this line up,
* we read 72(%esi) very near to this location
*/
movq %mm6, 8*9(%esi) /* out9 */
paddsw %mm1, %mm0 /* V161 */
movq %mm3, pic_offset(scratch5) /* out5 */
psubsw %mm1, %mm5 /* V166 ; free mm1 */
movq %mm4, 8*11(%esi) /* out11 */
psraw $4, %mm5
movq %mm0, pic_offset(scratch3) /* out3 */
movq %mm2, %mm4 /* duplicate V140 */
movq %mm5, 8*13(%esi) /* out13 */
paddsw %mm7, %mm2 /* V160 */
/* moved from the next block */
movq 8(%esi), %mm0
psubsw %mm7, %mm4 /* V167 ; free mm7 */
/* moved from the next block */
movq 8*3(%esi), %mm7
psraw $4, %mm4
movq %mm2, pic_offset(scratch1) /* out1 */
/* moved from the next block */
movq %mm0, %mm1
movq %mm4, 8*15(%esi) /* out15 */
/* moved from the next block */
punpcklwd %mm7, %mm0
/* transpose - M2 parts
* moved up to the prev block
* movq 8(%esi), %mm0
* movq 8*3(%esi), %mm7
* movq %mm0, %mm1
* punpcklwd %mm7, %mm0
*/
movq 8*5(%esi), %mm5
punpckhwd %mm7, %mm1
movq 8*7(%esi), %mm4
movq %mm5, %mm3
/* shuffle the data and write the lower parts of the trasposed in 4 dwords */
movd %mm0, 8*8(%esi) /* LS part of tmt8 */
punpcklwd %mm4, %mm5
movd %mm1, 8*12(%esi) /* LS part of tmt12 */
punpckhwd %mm4, %mm3
movd %mm5, 8*8+4(%esi) /* MS part of tmt8 */
punpckhdq %mm5, %mm0 /* tmt10 */
movd %mm3, 8*12+4(%esi) /* MS part of tmt12 */
punpckhdq %mm3, %mm1 /* tmt14 */
/* transpose - M1 parts */
movq (%esi), %mm7
movq 8*2(%esi), %mm2
movq %mm7, %mm6
movq 8*4(%esi), %mm5
punpcklwd %mm2, %mm7
movq 8*6(%esi), %mm4
punpckhwd %mm2, %mm6 /* free mm2 */
movq %mm5, %mm3
punpcklwd %mm4, %mm5
punpckhwd %mm4, %mm3 /* free mm4 */
movq %mm7, %mm2
movq %mm6, %mm4
punpckldq %mm5, %mm7 /* tmt0 */
punpckhdq %mm5, %mm2 /* tmt2 ; free mm5 */
/* shuffle the rest of the data, and write it with 2 mmword writes */
punpckldq %mm3, %mm6 /* tmt4 */
/* moved from next block */
movq %mm2, %mm5 /* duplicate tmt2 */
punpckhdq %mm3, %mm4 /* tmt6 ; free mm3 */
/* moved from next block */
movq %mm0, %mm3 /* duplicate tmt10 */
/* column 0: odd part (after transpose)
*moved up to prev block
* movq %mm0, %mm3 duplicate tmt10
* movq %mm2, %mm5 duplicate tmt2
*/
psubsw %mm4, %mm0 /* V110 */
paddsw %mm4, %mm3 /* V113 ; free mm4 */
movq %mm0, %mm4 /* duplicate V110 */
paddsw %mm1, %mm2 /* V111 */
pmulhw pic_offset(x539f539f539f539f), %mm0 /* 21407-> V117 */
psubsw %mm1, %mm5 /* V112 ; free mm1 */
psubsw %mm5, %mm4 /* V116 */
movq %mm2, %mm1 /* duplicate V111 */
pmulhw pic_offset(x4546454645464546), %mm5 /* 17734-> V119 */
psubsw %mm3, %mm2 /* V114 */
pmulhw pic_offset(x61f861f861f861f8), %mm4 /* 25080-> V120 */
paddsw %mm3, %mm1 /* V115 ; free mm3 */
pmulhw pic_offset(x5a825a825a825a82), %mm2 /* 23170-> V118 */
psllw $2, %mm0 /* t266 */
movq %mm1, (%esi) /* save V115 */
psllw $1, %mm5 /* t268 */
psubsw %mm4, %mm5 /* V122 */
psubsw %mm0, %mm4 /* V121 ; free mm0 */
psllw $1, %mm5 /* t270 */
psubsw %mm1, %mm5 /* V123 ; free mm1 */
psllw $2, %mm2 /* t272 */
psubsw %mm5, %mm2 /* V124 (keep V123) */
psllw $1, %mm4 /* t274 */
movq %mm5, 8*2(%esi) /* save V123 ; free mm5 */
paddsw %mm2, %mm4 /* V125 (keep V124) */
/* column 0: even part (after transpose) */
movq 8*12(%esi), %mm0 /* tmt12 */
movq %mm6, %mm3 /* duplicate tmt4 */
psubsw %mm0, %mm6 /* V100 */
paddsw %mm0, %mm3 /* V101 ; free mm0 */
pmulhw pic_offset(x5a825a825a825a82), %mm6 /* 23170 ->V102 */
movq %mm7, %mm5 /* duplicate tmt0 */
movq 8*8(%esi), %mm1 /* tmt8 */
paddsw %mm1, %mm7 /* V103 */
psubsw %mm1, %mm5 /* V104 ; free mm1 */
movq %mm7, %mm0 /* duplicate V103 */
psllw $2, %mm6 /* t245 */
paddsw %mm3, %mm7 /* V106 */
movq %mm5, %mm1 /* duplicate V104 */
psubsw %mm3, %mm6 /* V105 */
psubsw %mm3, %mm0 /* V109; free mm3 */
paddsw %mm6, %mm5 /* V107 */
psubsw %mm6, %mm1 /* V108 ; free mm6 */
/* column 0: output butterfly (after transform) */
movq %mm1, %mm3 /* duplicate V108 */
paddsw %mm2, %mm1 /* out4 */
psraw $4, %mm1
psubsw %mm2, %mm3 /* out10 ; free mm2 */
psraw $4, %mm3
movq %mm0, %mm6 /* duplicate V109 */
movq %mm1, 8*4(%esi) /* out4 ; free mm1 */
psubsw %mm4, %mm0 /* out6 */
movq %mm3, 8*10(%esi) /* out10 ; free mm3 */
psraw $4, %mm0
paddsw %mm4, %mm6 /* out8 ; free mm4 */
movq %mm7, %mm1 /* duplicate V106 */
movq %mm0, 8*6(%esi) /* out6 ; free mm0 */
psraw $4, %mm6
movq (%esi), %mm4 /* V115 */
movq %mm6, 8*8(%esi) /* out8 ; free mm6 */
movq %mm5, %mm2 /* duplicate V107 */
movq 8*2(%esi), %mm3 /* V123 */
paddsw %mm4, %mm7 /* out0 */
/* moved up from next block */
movq pic_offset(scratch3), %mm0
psraw $4, %mm7
/* moved up from next block */
movq pic_offset(scratch5), %mm6
psubsw %mm4, %mm1 /* out14 ; free mm4 */
paddsw %mm3, %mm5 /* out2 */
psraw $4, %mm1
movq %mm7, (%esi) /* out0 ; free mm7 */
psraw $4, %mm5
movq %mm1, 8*14(%esi) /* out14 ; free mm1 */
psubsw %mm3, %mm2 /* out12 ; free mm3 */
movq %mm5, 8*2(%esi) /* out2 ; free mm5 */
psraw $4, %mm2
/* moved up to the prev block */
movq pic_offset(scratch7), %mm4
/* moved up to the prev block */
psraw $4, %mm0
movq %mm2, 8*12(%esi) /* out12 ; free mm2 */
/* moved up to the prev block */
psraw $4, %mm6
/* move back the data to its correct place
* moved up to the prev block
* movq pic_offset(scratch3), %mm0
* movq pic_offset(scratch5), %mm6
* movq pic_offset(scratch7), %mm4
* psraw $4, %mm0
* psraw $4, %mm6
*/
movq pic_offset(scratch1), %mm1
psraw $4, %mm4
movq %mm0, 8*3(%esi) /* out3 */
psraw $4, %mm1
movq %mm6, 8*5(%esi) /* out5 */
movq %mm4, 8*7(%esi) /* out7 */
movq %mm1, 8(%esi) /* out1 */
emms
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
movl %ebp,%esp
popl %ebp
ret
.Lfe1:
.size gst_idct_mmx_idct,.Lfe1-gst_idct_mmx_idct

740
gst-libs/gst/idct/sseidct.S Normal file
View file

@ -0,0 +1,740 @@
.data
.align 4
.type rounder0,@object
rounder0:
.long 65536
.long 65536
.size rounder0,8
.align 4
.type rounder4,@object
rounder4:
.long 1024
.long 1024
.size rounder4,8
.align 4
.type rounder1,@object
rounder1:
.long 3597
.long 3597
.size rounder1,8
.align 4
.type rounder7,@object
rounder7:
.long 512
.long 512
.size rounder7,8
.align 4
.type rounder2,@object
rounder2:
.long 2260
.long 2260
.size rounder2,8
.align 4
.type rounder6,@object
rounder6:
.long 512
.long 512
.size rounder6,8
.align 4
.type rounder3,@object
rounder3:
.long 1203
.long 1203
.size rounder3,8
.align 4
.type rounder5,@object
rounder5:
.long 120
.long 120
.size rounder5,8
.align 2
.type _T1.46,@object
_T1.46:
.value 13036
.value 13036
.value 13036
.value 13036
.align 2
.type _T2.47,@object
_T2.47:
.value 27146
.value 27146
.value 27146
.value 27146
.align 2
.type _T3.48,@object
_T3.48:
.value -21746
.value -21746
.value -21746
.value -21746
.align 2
.type _C4.49,@object
_C4.49:
.value 23170
.value 23170
.value 23170
.value 23170
.local scratch0.50
.comm scratch0.50,8,4
.local scratch1.51
.comm scratch1.51,8,4
.align 2
.type table04.54,@object
table04.54:
.value 16384
.value 21407
.value -16384
.value -21407
.value 16384
.value 8867
.value 16384
.value 8867
.value 22725
.value 19266
.value -22725
.value -12873
.value 12873
.value 4520
.value 19266
.value -4520
.value 16384
.value -8867
.value 16384
.value -8867
.value -16384
.value 21407
.value 16384
.value -21407
.value 12873
.value -22725
.value 19266
.value -22725
.value 4520
.value 19266
.value 4520
.value -12873
.align 2
.type table17.55,@object
table17.55:
.value 22725
.value 29692
.value -22725
.value -29692
.value 22725
.value 12299
.value 22725
.value 12299
.value 31521
.value 26722
.value -31521
.value -17855
.value 17855
.value 6270
.value 26722
.value -6270
.value 22725
.value -12299
.value 22725
.value -12299
.value -22725
.value 29692
.value 22725
.value -29692
.value 17855
.value -31521
.value 26722
.value -31521
.value 6270
.value 26722
.value 6270
.value -17855
.align 2
.type table26.56,@object
table26.56:
.value 21407
.value 27969
.value -21407
.value -27969
.value 21407
.value 11585
.value 21407
.value 11585
.value 29692
.value 25172
.value -29692
.value -16819
.value 16819
.value 5906
.value 25172
.value -5906
.value 21407
.value -11585
.value 21407
.value -11585
.value -21407
.value 27969
.value 21407
.value -27969
.value 16819
.value -29692
.value 25172
.value -29692
.value 5906
.value 25172
.value 5906
.value -16819
.align 2
.type table35.57,@object
table35.57:
.value 19266
.value 25172
.value -19266
.value -25172
.value 19266
.value 10426
.value 19266
.value 10426
.value 26722
.value 22654
.value -26722
.value -15137
.value 15137
.value 5315
.value 22654
.value -5315
.value 19266
.value -10426
.value 19266
.value -10426
.value -19266
.value 25172
.value 19266
.value -25172
.value 15137
.value -26722
.value 22654
.value -26722
.value 5315
.value 22654
.value 5315
.value -15137
.text
.align 4
.globl gst_idct_sse_idct
.type gst_idct_sse_idct,@function
gst_idct_sse_idct:
subl $8,%esp
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
call .L51
.L51:
popl %ebx
addl $_GLOBAL_OFFSET_TABLE_+[.-.L51],%ebx
movl 28(%esp),%edx
leal table04.54@GOTOFF(%ebx),%eax
movq (%edx), %mm2
movq 8(%edx), %mm5
movq %mm2, %mm0
movq (%eax), %mm3
movq %mm5, %mm6
movq 8(%eax), %mm4
pmaddwd %mm0, %mm3
pshufw $78, %mm2, %mm2
leal rounder0@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
movq 64(%edx), %mm2
psrad $11, %mm0
movq 72(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, (%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 8(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder4@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
leal table17.55@GOTOFF(%ebx),%eax
movq 16(%edx), %mm2
psrad $11, %mm0
movq 24(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 64(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 72(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder1@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
movq 112(%edx), %mm2
psrad $11, %mm0
movq 120(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 16(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 24(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder7@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
leal table26.56@GOTOFF(%ebx),%eax
movq 32(%edx), %mm2
psrad $11, %mm0
movq 40(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 112(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 120(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder2@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
movq 96(%edx), %mm2
psrad $11, %mm0
movq 104(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 32(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 40(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder6@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
leal table35.57@GOTOFF(%ebx),%eax
movq 48(%edx), %mm2
psrad $11, %mm0
movq 56(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 96(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 104(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder3@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
movq 80(%edx), %mm2
psrad $11, %mm0
movq 88(%edx), %mm5
psrad $11, %mm4
packssdw %mm0, %mm1
movq %mm5, %mm6
packssdw %mm3, %mm4
movq %mm2, %mm0
movq %mm1, 48(%edx)
pshufw $177, %mm4, %mm4
movq (%eax), %mm3
movq %mm4, 56(%edx)
pmaddwd %mm0, %mm3
movq 8(%eax), %mm4
pshufw $78, %mm2, %mm2
leal rounder5@GOTOFF(%ebx),%ecx
movq 16(%eax), %mm1
pmaddwd %mm2, %mm4
pmaddwd 32(%eax), %mm0
pshufw $78, %mm6, %mm6
movq 24(%eax), %mm7
pmaddwd %mm5, %mm1
paddd (%ecx), %mm3
pmaddwd %mm6, %mm7
pmaddwd 40(%eax), %mm2
paddd %mm4, %mm3
pmaddwd 48(%eax), %mm5
movq %mm3, %mm4
pmaddwd 56(%eax), %mm6
paddd %mm7, %mm1
paddd (%ecx), %mm0
psubd %mm1, %mm3
psrad $11, %mm3
paddd %mm4, %mm1
paddd %mm2, %mm0
psrad $11, %mm1
paddd %mm6, %mm5
movq %mm0, %mm4
paddd %mm5, %mm0
psubd %mm5, %mm4
psrad $11, %mm0
psrad $11, %mm4
packssdw %mm0, %mm1
packssdw %mm3, %mm4
movq %mm1, 80(%edx)
pshufw $177, %mm4, %mm4
movq %mm4, 88(%edx)
leal _T1.46@GOTOFF(%ebx),%edi
movq (%edi), %mm0
movq 16(%edx), %mm1
movq %mm0, %mm2
movq 112(%edx), %mm4
pmulhw %mm1, %mm0
leal _T3.48@GOTOFF(%ebx),%esi
movl %esi,16(%esp)
movq (%esi), %mm5
pmulhw %mm4, %mm2
movq 80(%edx), %mm6
movq %mm5, %mm7
movq 48(%edx), %mm3
psubsw %mm4, %mm0
leal _T2.47@GOTOFF(%ebx),%ecx
movq (%ecx), %mm4
pmulhw %mm3, %mm5
paddsw %mm2, %mm1
pmulhw %mm6, %mm7
movq %mm4, %mm2
paddsw %mm3, %mm5
pmulhw 32(%edx), %mm4
paddsw %mm6, %mm7
psubsw %mm6, %mm5
paddsw %mm3, %mm7
movq 96(%edx), %mm3
movq %mm0, %mm6
pmulhw %mm3, %mm2
psubsw %mm5, %mm0
psubsw %mm3, %mm4
paddsw %mm6, %mm5
leal scratch0.50@GOTOFF(%ebx),%esi
movl %esi,20(%esp)
movq %mm0, scratch0.50@GOTOFF(%ebx)
movq %mm1, %mm6
paddsw 32(%edx), %mm2
paddsw %mm7, %mm6
psubsw %mm7, %mm1
movq %mm1, %mm7
movq (%edx), %mm3
paddsw %mm5, %mm1
leal _C4.49@GOTOFF(%ebx),%eax
movq (%eax), %mm0
psubsw %mm5, %mm7
leal scratch1.51@GOTOFF(%ebx),%ebp
movq %mm6, scratch1.51@GOTOFF(%ebx)
pmulhw %mm0, %mm1
movq %mm4, %mm6
pmulhw %mm0, %mm7
movq 64(%edx), %mm5
movq %mm3, %mm0
psubsw %mm5, %mm3
paddsw %mm5, %mm0
paddsw %mm3, %mm4
movq %mm0, %mm5
psubsw %mm6, %mm3
paddsw %mm2, %mm5
paddsw %mm1, %mm1
psubsw %mm2, %mm0
paddsw %mm7, %mm7
movq %mm3, %mm2
movq %mm4, %mm6
paddsw %mm7, %mm3
psraw $6, %mm3
paddsw %mm1, %mm4
psraw $6, %mm4
psubsw %mm1, %mm6
movq (%ebp), %mm1
psubsw %mm7, %mm2
psraw $6, %mm6
movq %mm5, %mm7
movq %mm4, 16(%edx)
psraw $6, %mm2
movq %mm3, 32(%edx)
paddsw %mm1, %mm5
movq (%esi), %mm4
psubsw %mm1, %mm7
psraw $6, %mm5
movq %mm0, %mm3
movq %mm2, 80(%edx)
psubsw %mm4, %mm3
psraw $6, %mm7
paddsw %mm0, %mm4
movq %mm5, (%edx)
psraw $6, %mm3
movq %mm6, 96(%edx)
psraw $6, %mm4
movq %mm7, 112(%edx)
movq %mm3, 64(%edx)
movq %mm4, 48(%edx)
movq (%edi), %mm0
movq 24(%edx), %mm1
movq %mm0, %mm2
movq 120(%edx), %mm4
pmulhw %mm1, %mm0
movl 16(%esp),%esi
movq (%esi), %mm5
pmulhw %mm4, %mm2
movq 88(%edx), %mm6
movq %mm5, %mm7
movq 56(%edx), %mm3
psubsw %mm4, %mm0
movq (%ecx), %mm4
pmulhw %mm3, %mm5
paddsw %mm2, %mm1
pmulhw %mm6, %mm7
movq %mm4, %mm2
paddsw %mm3, %mm5
pmulhw 40(%edx), %mm4
paddsw %mm6, %mm7
psubsw %mm6, %mm5
paddsw %mm3, %mm7
movq 104(%edx), %mm3
movq %mm0, %mm6
pmulhw %mm3, %mm2
psubsw %mm5, %mm0
psubsw %mm3, %mm4
paddsw %mm6, %mm5
movq %mm0, scratch0.50@GOTOFF(%ebx)
movq %mm1, %mm6
paddsw 40(%edx), %mm2
paddsw %mm7, %mm6
psubsw %mm7, %mm1
movq %mm1, %mm7
movq 8(%edx), %mm3
paddsw %mm5, %mm1
movq (%eax), %mm0
psubsw %mm5, %mm7
movq %mm6, scratch1.51@GOTOFF(%ebx)
pmulhw %mm0, %mm1
movq %mm4, %mm6
pmulhw %mm0, %mm7
movq 72(%edx), %mm5
movq %mm3, %mm0
psubsw %mm5, %mm3
paddsw %mm5, %mm0
paddsw %mm3, %mm4
movq %mm0, %mm5
psubsw %mm6, %mm3
paddsw %mm2, %mm5
paddsw %mm1, %mm1
psubsw %mm2, %mm0
paddsw %mm7, %mm7
movq %mm3, %mm2
movq %mm4, %mm6
paddsw %mm7, %mm3
psraw $6, %mm3
paddsw %mm1, %mm4
psraw $6, %mm4
psubsw %mm1, %mm6
movq (%ebp), %mm1
psubsw %mm7, %mm2
psraw $6, %mm6
movq %mm5, %mm7
movq %mm4, 24(%edx)
psraw $6, %mm2
movq %mm3, 40(%edx)
paddsw %mm1, %mm5
movl 20(%esp),%esi
movq (%esi), %mm4
psubsw %mm1, %mm7
psraw $6, %mm5
movq %mm0, %mm3
movq %mm2, 88(%edx)
psubsw %mm4, %mm3
psraw $6, %mm7
paddsw %mm0, %mm4
movq %mm5, 8(%edx)
psraw $6, %mm3
movq %mm6, 104(%edx)
psraw $6, %mm4
movq %mm7, 120(%edx)
movq %mm3, 72(%edx)
movq %mm4, 56(%edx)
popl %ebx
popl %esi
popl %edi
popl %ebp
addl $8,%esp
ret