adding ffmpegcolorspace element, thanks ronald

Original commit message from CVS:
adding ffmpegcolorspace element, thanks ronald
This commit is contained in:
Thomas Vander Stichele 2004-03-15 16:27:29 +00:00
parent 2622996a82
commit f369c03c32
16 changed files with 6222 additions and 0 deletions

View file

@ -1,3 +1,54 @@
2004-03-15 Thomas Vander Stichele <thomas at apestaart dot org>
* configure.ac:
adding ffmpegcolorspace element
* gst/ffmpegcolorspace/Makefile.am:
* gst/ffmpegcolorspace/avcodec.h:
* gst/ffmpegcolorspace/common.h:
* gst/ffmpegcolorspace/dsputil.c: (dsputil_static_init):
* gst/ffmpegcolorspace/dsputil.h:
* gst/ffmpegcolorspace/gstffmpeg.c: (plugin_init):
* gst/ffmpegcolorspace/gstffmpegcodecmap.c:
(gst_ffmpeg_pixfmt_to_caps), (gst_ffmpeg_pix_fmt_to_caps),
(gst_ffmpeg_caps_to_pix_fmt):
* gst/ffmpegcolorspace/gstffmpegcodecmap.h:
* gst/ffmpegcolorspace/gstffmpegcolorspace.c:
(gst_ffmpegcolorspace_caps_remove_format_info),
(gst_ffmpegcolorspace_getcaps), (gst_ffmpegcolorspace_pad_link),
(gst_ffmpegcolorspace_get_type), (gst_ffmpegcolorspace_base_init),
(gst_ffmpegcolorspace_class_init), (gst_ffmpegcolorspace_init),
(gst_ffmpegcolorspace_chain), (gst_ffmpegcolorspace_change_state),
(gst_ffmpegcolorspace_set_property),
(gst_ffmpegcolorspace_get_property),
(gst_ffmpegcolorspace_register):
* gst/ffmpegcolorspace/imgconvert.c:
(avcodec_get_chroma_sub_sample), (avcodec_get_pix_fmt_name),
(avcodec_get_pix_fmt), (avpicture_fill), (avpicture_layout),
(avpicture_get_size), (avcodec_get_pix_fmt_loss),
(avg_bits_per_pixel), (avcodec_find_best_pix_fmt1),
(avcodec_find_best_pix_fmt), (img_copy_plane), (img_copy),
(yuv422_to_yuv420p), (yuv422_to_yuv422p), (yuv422p_to_yuv422),
(C_JPEG_TO_CCIR), (img_convert_init), (img_apply_table),
(shrink41), (shrink21), (shrink12), (shrink22), (shrink44),
(grow21_line), (grow41_line), (grow21), (grow22), (grow41),
(grow44), (conv411), (gif_clut_index), (build_rgb_palette),
(bitcopy_n), (mono_to_gray), (monowhite_to_gray),
(monoblack_to_gray), (gray_to_mono), (gray_to_monowhite),
(gray_to_monoblack), (avpicture_alloc), (avpicture_free),
(is_yuv_planar), (img_convert), (get_alpha_info_pal8),
(img_get_alpha_info), (deinterlace_line),
(deinterlace_line_inplace), (deinterlace_bottom_field),
(deinterlace_bottom_field_inplace), (avpicture_deinterlace):
* gst/ffmpegcolorspace/imgconvert_template.h:
* gst/ffmpegcolorspace/mem.c: (av_malloc), (av_realloc), (av_free):
* gst/ffmpegcolorspace/mmx.h:
* gst/ffmpegcolorspace/utils.c: (avcodec_init):
adding ffmpegcolorspace element supplied by Ronald after cleaning
up and pulling in the right bits of upstream source.
I'm sure a better C/compiler wizard could do some cleaning up (for
example use GLIB's malloc stuff), but as a first pass this
works very well
2004-03-15 Thomas Vander Stichele <thomas at apestaart dot org>
* ext/alsa/gstalsa.h:

View file

@ -87,6 +87,11 @@ AC_HEADER_STDC([])
AC_C_INLINE
AX_CREATE_STDINT_H
dnl Check for malloc.h
AC_CHECK_HEADER(malloc.h,[
AC_DEFINE(HAVE_MALLOC_H, 1, [whether malloc.h available])
])
dnl Check for a way to display the function name in debug output
GST_CHECK_FUNCTION()
@ -348,6 +353,7 @@ GST_PLUGINS_ALL="\
deinterlace \
effectv \
festival \
ffmpegcolorspace \
filter \
flx \
goom \
@ -1597,6 +1603,7 @@ gst/debug/Makefile
gst/deinterlace/Makefile
gst/effectv/Makefile
gst/festival/Makefile
gst/ffmpegcolorspace/Makefile
gst/filter/Makefile
gst/flx/Makefile
gst/goom/Makefile

View file

@ -0,0 +1,19 @@
plugin_LTLIBRARIES = libgstffmpegcolorspace.la
libgstffmpegcolorspace_la_SOURCES = \
gstffmpeg.c \
gstffmpegcolorspace.c \
gstffmpegcodecmap.c \
dsputil.c \
mem.c \
utils.c \
imgconvert.c
libgstffmpegcolorspace_la_CFLAGS = $(GST_CFLAGS)
libgstffmpegcolorspace_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
noinst_HEADERS = \
gstffmpegcodecmap.h \
imgconvert_template.h \
mmx.h \
avcodec.h

View file

@ -0,0 +1,164 @@
#ifndef AVCODEC_H
#define AVCODEC_H
/**
* @file avcodec.h
* external api header.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "common.h"
#include <sys/types.h> /* size_t */
#define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4707
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION
#define AV_STRINGIFY(s) AV_TOSTRING(s)
#define AV_TOSTRING(s) #s
#define LIBAVCODEC_IDENT "FFmpeg" LIBAVCODEC_VERSION "b" AV_STRINGIFY(LIBAVCODEC_BUILD)
#define AV_NOPTS_VALUE int64_t_C(0x8000000000000000)
#define AV_TIME_BASE 1000000
enum CodecType {
CODEC_TYPE_UNKNOWN = -1,
CODEC_TYPE_VIDEO,
CODEC_TYPE_AUDIO,
CODEC_TYPE_DATA,
};
/**
* Pixel format. Notes:
*
* PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
* color is put together as:
* (A << 24) | (R << 16) | (G << 8) | B
* This is stored as BGRA on little endian CPU architectures and ARGB on
* big endian CPUs.
*
* When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
* image data is stored in AVFrame.data[0]. The palette is transported in
* AVFrame.data[1] and, is 1024 bytes long (256 4-byte entries) and is
* formatted the same as in PIX_FMT_RGBA32 described above (i.e., it is
* also endian-specific). Note also that the individual RGB palette
* components stored in AVFrame.data[1] should be in the range 0..255.
* This is important as many custom PAL8 video codecs that were designed
* to run on the IBM VGA graphics adapter use 6-bit palette components.
*/
enum PixelFormat {
PIX_FMT_YUV420P, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
PIX_FMT_YUV422,
PIX_FMT_RGB24, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
PIX_FMT_BGR24, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
PIX_FMT_YUV422P, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
PIX_FMT_YUV444P, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
PIX_FMT_RGBA32, ///< Packed pixel, 4 bytes per pixel, BGRABGRA..., stored in cpu endianness
PIX_FMT_YUV410P, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
PIX_FMT_YUV411P, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
PIX_FMT_RGB565, ///< always stored in cpu endianness
PIX_FMT_RGB555, ///< always stored in cpu endianness, most significant bit to 1
PIX_FMT_GRAY8,
PIX_FMT_MONOWHITE, ///< 0 is white
PIX_FMT_MONOBLACK, ///< 0 is black
PIX_FMT_PAL8, ///< 8 bit with RGBA palette
PIX_FMT_YUVJ420P, ///< Planar YUV 4:2:0 full scale (jpeg)
PIX_FMT_YUVJ422P, ///< Planar YUV 4:2:2 full scale (jpeg)
PIX_FMT_YUVJ444P, ///< Planar YUV 4:4:4 full scale (jpeg)
PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing(xvmc_render.h)
PIX_FMT_XVMC_MPEG2_IDCT,
PIX_FMT_NB,
};
/**
* four components are given, that's all.
* the last component is alpha
*/
typedef struct AVPicture {
uint8_t *data[4];
int linesize[4]; ///< number of bytes per line
} AVPicture;
/**
* Allocate memory for a picture. Call avpicture_free to free it.
*
* @param picture the picture to be filled in.
* @param pix_fmt the format of the picture.
* @param width the width of the picture.
* @param height the height of the picture.
* @return 0 if successful, -1 if not.
*/
int avpicture_alloc(AVPicture *picture, int pix_fmt, int width, int height);
/* Free a picture previously allocated by avpicture_alloc. */
void avpicture_free(AVPicture *picture);
int avpicture_fill(AVPicture *picture, uint8_t *ptr,
int pix_fmt, int width, int height);
int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
unsigned char *dest, int dest_size);
int avpicture_get_size(int pix_fmt, int width, int height);
void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift);
const char *avcodec_get_pix_fmt_name(int pix_fmt);
enum PixelFormat avcodec_get_pix_fmt(const char* name);
#define FF_LOSS_RESOLUTION 0x0001 /* loss due to resolution change */
#define FF_LOSS_DEPTH 0x0002 /* loss due to color depth change */
#define FF_LOSS_COLORSPACE 0x0004 /* loss due to color space conversion */
#define FF_LOSS_ALPHA 0x0008 /* loss of alpha bits */
#define FF_LOSS_COLORQUANT 0x0010 /* loss due to color quantization */
#define FF_LOSS_CHROMA 0x0020 /* loss of chroma (e.g. rgb to gray conversion) */
int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
int has_alpha);
int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
int has_alpha, int *loss_ptr);
#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
int img_get_alpha_info(const AVPicture *src,
int pix_fmt, int width, int height);
/* convert among pixel formats */
int img_convert(AVPicture *dst, int dst_pix_fmt,
const AVPicture *src, int pix_fmt,
int width, int height);
/* deinterlace a picture */
int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
int pix_fmt, int width, int height);
void avcodec_init(void);
/* memory */
void *av_malloc(unsigned int size);
void *av_mallocz(unsigned int size);
void *av_realloc(void *ptr, unsigned int size);
void av_free(void *ptr);
char *av_strdup(const char *s);
void av_freep(void *ptr);
void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size);
/* for static data only */
/* call av_free_static to release all staticaly allocated tables */
void av_free_static(void);
void *__av_mallocz_static(void** location, unsigned int size);
#define av_mallocz_static(p, s) __av_mallocz_static((void **)(p), s)
/* add by bero : in adx.c */
int is_adx(const unsigned char *buf,size_t bufsize);
void img_copy(AVPicture *dst, const AVPicture *src,
int pix_fmt, int width, int height);
#ifdef __cplusplus
}
#endif
#endif /* AVCODEC_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,49 @@
/*
* DSP utils
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*/
/**
* @file dsputil.c
* DSP utils
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
#include "dsputil.h"
uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
/* init static data */
void
dsputil_static_init (void)
{
int i;
for (i = 0; i < 256; i++)
cropTbl[i + MAX_NEG_CROP] = i;
for (i = 0; i < MAX_NEG_CROP; i++) {
cropTbl[i] = 0;
cropTbl[i + MAX_NEG_CROP + 256] = 255;
}
}

View file

@ -0,0 +1,576 @@
/*
* DSP utils
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
* @file dsputil.h
* DSP utils.
* note, many functions in here may use MMX which trashes the FPU state, it is
* absolutely necessary to call emms_c() between dsp & float/double code
*/
#ifndef DSPUTIL_H
#define DSPUTIL_H
#include "common.h"
#include "avcodec.h"
//#define DEBUG
/* dct code */
typedef short DCTELEM;
void fdct_ifast (DCTELEM *data);
void fdct_ifast248 (DCTELEM *data);
void ff_jpeg_fdct_islow (DCTELEM *data);
void ff_fdct248_islow (DCTELEM *data);
void j_rev_dct (DCTELEM *data);
void ff_fdct_mmx(DCTELEM *block);
void ff_fdct_mmx2(DCTELEM *block);
void ff_fdct_sse2(DCTELEM *block);
/* encoding scans */
extern const uint8_t ff_alternate_horizontal_scan[64];
extern const uint8_t ff_alternate_vertical_scan[64];
extern const uint8_t ff_zigzag_direct[64];
extern const uint8_t ff_zigzag248_direct[64];
/* pixel operations */
#define MAX_NEG_CROP 384
/* temporary */
extern uint32_t squareTbl[512];
extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
/* VP3 DSP functions */
void vp3_dsp_init_c(void);
void vp3_idct_put_c(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_idct_add_c(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_dsp_init_mmx(void);
void vp3_idct_put_mmx(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_idct_add_mmx(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
/* minimum alignment rules ;)
if u notice errors in the align stuff, need more alignment for some asm code for some cpu
or need to use a function with less aligned data then send a mail to the ffmpeg-dev list, ...
!warning these alignments might not match reallity, (missing attribute((align)) stuff somewhere possible)
i (michael) didnt check them, these are just the alignents which i think could be reached easily ...
!future video codecs might need functions with less strict alignment
*/
/*
void get_pixels_c(DCTELEM *block, const uint8_t *pixels, int line_size);
void diff_pixels_c(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride);
void put_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
void add_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
void clear_blocks_c(DCTELEM *blocks);
*/
/* add and put pixel (decoding) */
// blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16
//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4
typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h);
typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h);
typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
#define DEF_OLD_QPEL(name)\
void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
DEF_OLD_QPEL(qpel16_mc11_old_c)
DEF_OLD_QPEL(qpel16_mc31_old_c)
DEF_OLD_QPEL(qpel16_mc12_old_c)
DEF_OLD_QPEL(qpel16_mc32_old_c)
DEF_OLD_QPEL(qpel16_mc13_old_c)
DEF_OLD_QPEL(qpel16_mc33_old_c)
DEF_OLD_QPEL(qpel8_mc11_old_c)
DEF_OLD_QPEL(qpel8_mc31_old_c)
DEF_OLD_QPEL(qpel8_mc12_old_c)
DEF_OLD_QPEL(qpel8_mc32_old_c)
DEF_OLD_QPEL(qpel8_mc13_old_c)
DEF_OLD_QPEL(qpel8_mc33_old_c)
#define CALL_2X_PIXELS(a, b, n)\
static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
b(block , pixels , line_size, h);\
b(block+n, pixels+n, line_size, h);\
}
/* motion estimation */
// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2
// allthough currently h<4 is not used as functions with width <8 are not used and neither implemented
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
/**
* DSPContext.
*/
typedef struct DSPContext {
/* pixel ops : interface with DCT */
void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size);
void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride);
void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
/**
* translational global motion compensation.
*/
void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder);
/**
* global motion compensation.
*/
void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
int (*pix_sum)(uint8_t * pix, int line_size);
int (*pix_norm1)(uint8_t * pix, int line_size);
// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
me_cmp_func sad[5]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[5];
me_cmp_func hadamard8_diff[5];
me_cmp_func dct_sad[5];
me_cmp_func quant_psnr[5];
me_cmp_func bit[5];
me_cmp_func rd[5];
me_cmp_func vsad[5];
me_cmp_func vsse[5];
me_cmp_func me_pre_cmp[5];
me_cmp_func me_cmp[5];
me_cmp_func me_sub_cmp[5];
me_cmp_func mb_cmp[5];
me_cmp_func ildct_cmp[5]; //only width 16 used
/**
* Halfpel motion compensation with rounding (a+b+1)>>1.
* this is an array[4][4] of motion compensation funcions for 4
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func put_pixels_tab[4][4];
/**
* Halfpel motion compensation with rounding (a+b+1)>>1.
* This is an array[4][4] of motion compensation functions for 4
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination into which the result is averaged (a+b+1)>>1
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func avg_pixels_tab[4][4];
/**
* Halfpel motion compensation with no rounding (a+b)>>1.
* this is an array[2][4] of motion compensation funcions for 2
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func put_no_rnd_pixels_tab[2][4];
/**
* Halfpel motion compensation with no rounding (a+b)>>1.
* this is an array[2][4] of motion compensation funcions for 2
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination into which the result is averaged (a+b)>>1
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func avg_no_rnd_pixels_tab[2][4];
void (*put_no_rnd_pixels_l2[2])(uint8_t *block/*align width (8 or 16)*/, const uint8_t *a/*align 1*/, const uint8_t *b/*align 1*/, int line_size, int h);
/**
* Thirdpel motion compensation with rounding (a+b+1)>>1.
* this is an array[12] of motion compensation funcions for the 9 thirdpel positions<br>
* *pixels_tab[ xthirdpel + 4*ythirdpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
tpel_mc_func put_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
tpel_mc_func avg_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
qpel_mc_func put_qpel_pixels_tab[2][16];
qpel_mc_func avg_qpel_pixels_tab[2][16];
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
qpel_mc_func put_mspel_pixels_tab[8];
/**
* h264 Chram MC
*/
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
qpel_mc_func put_h264_qpel_pixels_tab[3][16];
qpel_mc_func avg_h264_qpel_pixels_tab[3][16];
me_cmp_func pix_abs[2][4];
/* huffyuv specific */
void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w);
/**
* subtract huffyuv's variant of median prediction
* note, this might read from src1[-1], src2[-1]
*/
void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top);
void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w);
void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
/* (I)DCT */
void (*fdct)(DCTELEM *block/* align 16*/);
void (*fdct248)(DCTELEM *block/* align 16*/);
/* IDCT really*/
void (*idct)(DCTELEM *block/* align 16*/);
/**
* block -> idct -> clip to unsigned 8 bit -> dest.
* (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...)
* @param line_size size in bytes of a horizotal line of dest
*/
void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
/**
* block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
* @param line_size size in bytes of a horizotal line of dest
*/
void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
/**
* idct input permutation.
* several optimized IDCTs need a permutated input (relative to the normal order of the reference
* IDCT)
* this permutation must be performed before the idct_put/add, note, normally this can be merged
* with the zigzag/alternate scan<br>
* an example to avoid confusion:
* - (->decode coeffs -> zigzag reorder -> dequant -> reference idct ->...)
* - (x -> referece dct -> reference idct -> x)
* - (x -> referece dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x)
* - (->decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant -> simple_idct_mmx ->...)
*/
uint8_t idct_permutation[64];
int idct_permutation_type;
#define FF_NO_IDCT_PERM 1
#define FF_LIBMPEG2_IDCT_PERM 2
#define FF_SIMPLE_IDCT_PERM 3
#define FF_TRANSPOSE_IDCT_PERM 4
int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
#define BASIS_SHIFT 16
#define RECON_SHIFT 6
/**
* This function handles any initialization for the VP3 DSP functions.
*/
void (*vp3_dsp_init)(void);
/**
* This function is responsible for taking a block of zigzag'd,
* quantized DCT coefficients, reconstructing the original block of
* samples, and placing it into the output.
* @param input_data 64 zigzag'd, quantized DCT coefficients
* @param dequant_matrix 64 zigzag'd quantizer coefficients
* @param coeff_count index of the last coefficient
* @param dest the final output location where the transformed samples
* are to be placed
* @param stride the width in 8-bit samples of a line on this plane
*/
void (*vp3_idct_put)(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
/**
* This function is responsible for taking a block of zigzag'd,
* quantized DCT coefficients, reconstructing the original block of
* samples, and adding the transformed samples to an existing block of
* samples in the output.
* @param input_data 64 zigzag'd, quantized DCT coefficients
* @param dequant_matrix 64 zigzag'd quantizer coefficients
* @param coeff_count index of the last coefficient
* @param dest the final output location where the transformed samples
* are to be placed
* @param stride the width in 8-bit samples of a line on this plane
*/
void (*vp3_idct_add)(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
} DSPContext;
void dsputil_static_init(void);
//void dsputil_init(DSPContext* p, AVCodecContext *avctx);
/**
* permute block according to permuatation.
* @param last last non zero element in scantable order
*/
void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
#define BYTE_VEC32(c) ((c)*0x01010101UL)
static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
{
return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
}
static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
{
return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
}
/**
* Empty mmx state.
* this must be called between any dsp function and float/double code.
* for example sin(); dsp->idct_put(); emms_c(); cos()
*/
#define emms_c()
/* should be defined by architectures supporting
one or more MultiMedia extension */
int mm_support(void);
#if defined(HAVE_MMX)
#undef emms_c
#define MM_MMX 0x0001 /* standard MMX */
#define MM_3DNOW 0x0004 /* AMD 3DNOW */
#define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */
#define MM_SSE 0x0008 /* SSE functions */
#define MM_SSE2 0x0010 /* PIV SSE2 functions */
extern int mm_flags;
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
static inline void emms(void)
{
__asm __volatile ("emms;":::"memory");
}
#define emms_c() \
{\
if (mm_flags & MM_MMX)\
emms();\
}
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_ARMV4L)
/* This is to use 4 bytes read to the IDCT pointers for some 'zero'
line ptimizations */
#define __align8 __attribute__ ((aligned (4)))
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx);
#elif defined(HAVE_MLIB)
/* SPARC/VIS IDCT needs 8-byte aligned DCT blocks */
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_ALPHA)
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_POWERPC)
#define MM_ALTIVEC 0x0001 /* standard AltiVec */
extern int mm_flags;
#if defined(HAVE_ALTIVEC) && !defined(CONFIG_DARWIN)
#define pixel altivec_pixel
#include <altivec.h>
#undef pixel
#endif
#define __align8 __attribute__ ((aligned (16)))
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
#elif defined(HAVE_MMI)
#define __align8 __attribute__ ((aligned (16)))
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_SH4)
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
#else
#define __align8
#endif
#ifdef __GNUC__
struct unaligned_64 { uint64_t l; } __attribute__((packed));
struct unaligned_32 { uint32_t l; } __attribute__((packed));
struct unaligned_16 { uint16_t l; } __attribute__((packed));
#define LD16(a) (((const struct unaligned_16 *) (a))->l)
#define LD32(a) (((const struct unaligned_32 *) (a))->l)
#define LD64(a) (((const struct unaligned_64 *) (a))->l)
#define ST32(a, b) (((struct unaligned_32 *) (a))->l) = (b)
#else /* __GNUC__ */
#define LD16(a) (*((uint16_t*)(a)))
#define LD32(a) (*((uint32_t*)(a)))
#define LD64(a) (*((uint64_t*)(a)))
#define ST32(a, b) *((uint32_t*)(a)) = (b)
#endif /* !__GNUC__ */
/* PSNR */
//void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
// int orig_linesize[3], int coded_linesize,
// AVCodecContext *avctx);
/* FFT computation */
/* NOTE: soon integer code will be added, so you must use the
FFTSample type */
typedef float FFTSample;
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;
typedef struct FFTContext {
int nbits;
int inverse;
uint16_t *revtab;
FFTComplex *exptab;
FFTComplex *exptab1; /* only used by SSE code */
void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
} FFTContext;
int fft_init(FFTContext *s, int nbits, int inverse);
void fft_permute(FFTContext *s, FFTComplex *z);
void fft_calc_c(FFTContext *s, FFTComplex *z);
void fft_calc_sse(FFTContext *s, FFTComplex *z);
void fft_calc_altivec(FFTContext *s, FFTComplex *z);
static inline void fft_calc(FFTContext *s, FFTComplex *z)
{
s->fft_calc(s, z);
}
void fft_end(FFTContext *s);
/* MDCT computation */
typedef struct MDCTContext {
int n; /* size of MDCT (i.e. number of input data * 2) */
int nbits; /* n = 2^nbits */
/* pre/post rotation tables */
FFTSample *tcos;
FFTSample *tsin;
FFTContext fft;
} MDCTContext;
int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
void ff_imdct_calc(MDCTContext *s, FFTSample *output,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_calc(MDCTContext *s, FFTSample *out,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_end(MDCTContext *s);
#define WARPER8_16(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
return name8(s, dst , src , stride, h)\
+name8(s, dst+8 , src+8 , stride, h);\
}
#define WARPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
#ifndef HAVE_LRINTF
/* XXX: add ISOC specific test to avoid specific BSD testing. */
/* better than nothing implementation. */
/* btw, rintf() is existing on fbsd too -- alex */
static inline long int lrintf(float x)
{
#ifdef CONFIG_WIN32
/* XXX: incorrect, but make it compile */
return (int)(x);
#else
return (int)(rint(x));
#endif
}
#endif
#endif

View file

@ -0,0 +1,46 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/* First, include the header file for the plugin, to bring in the
* object definition and other useful things.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gst/gst.h>
#include <avcodec.h>
extern gboolean gst_ffmpegcolorspace_register (GstPlugin * plugin);
static gboolean
plugin_init (GstPlugin * plugin)
{
gst_ffmpegcolorspace_register (plugin);
/* Now we can return the pointer to the newly created Plugin object. */
return TRUE;
}
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
GST_VERSION_MINOR,
"ffmpegcolorspace",
"colorspace conversion copied from FFMpeg",
plugin_init,
FFMPEG_VERSION, "LGPL", "FFMpeg", "http://ffmpeg.sourceforge.net/")

View file

@ -0,0 +1,261 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
* This file:
* Copyright (c) 2002-2003 Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gst/gst.h>
#include <avcodec.h>
#include <string.h>
#include "gstffmpegcodecmap.h"
/* this macro makes a caps width fixed or unfixed width/height
* properties depending on whether we've got a context.
*
* See below for why we use this.
*/
#define GST_FF_VID_CAPS_NEW(mimetype, props...) \
gst_caps_new_simple (mimetype, \
"width", GST_TYPE_INT_RANGE, 16, 4096, \
"height", GST_TYPE_INT_RANGE, 16, 4096, \
"framerate", GST_TYPE_DOUBLE_RANGE, 0., G_MAXDOUBLE, \
##props, NULL)
/* Convert a FFMPEG Pixel Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* See below for usefullness
*/
static GstCaps *
gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt)
{
GstCaps *caps = NULL;
int bpp = 0, depth = 0, endianness = 0;
gulong g_mask = 0, r_mask = 0, b_mask = 0;
guint32 fmt = 0;
switch (pix_fmt) {
case PIX_FMT_YUV420P:
fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
break;
case PIX_FMT_YUV422:
fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
break;
case PIX_FMT_RGB24:
bpp = depth = 24;
endianness = G_BIG_ENDIAN;
r_mask = 0xff0000;
g_mask = 0x00ff00;
b_mask = 0x0000ff;
break;
case PIX_FMT_BGR24:
bpp = depth = 24;
endianness = G_BIG_ENDIAN;
r_mask = 0x0000ff;
g_mask = 0x00ff00;
b_mask = 0xff0000;
break;
case PIX_FMT_YUV422P:
fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
break;
case PIX_FMT_YUV444P:
/* .. */
break;
case PIX_FMT_RGBA32:
bpp = 32;
depth = 24;
endianness = G_BIG_ENDIAN;
#if (G_BYTE_ORDER == G_BIG_ENDIAN)
r_mask = 0x00ff0000;
g_mask = 0x0000ff00;
b_mask = 0x000000ff;
#else
r_mask = 0x0000ff00;
g_mask = 0x00ff0000;
b_mask = 0xff000000;
#endif
break;
case PIX_FMT_YUV410P:
fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
break;
case PIX_FMT_YUV411P:
fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
break;
case PIX_FMT_RGB565:
bpp = depth = 16;
endianness = G_BYTE_ORDER;
r_mask = 0xf800;
g_mask = 0x07e0;
b_mask = 0x001f;
break;
case PIX_FMT_RGB555:
bpp = 16;
depth = 15;
endianness = G_BYTE_ORDER;
r_mask = 0x7c00;
g_mask = 0x03e0;
b_mask = 0x001f;
break;
default:
/* give up ... */
break;
}
if (bpp != 0) {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
"bpp", G_TYPE_INT, bpp,
"depth", G_TYPE_INT, depth,
"red_mask", G_TYPE_INT, r_mask,
"green_mask", G_TYPE_INT, g_mask,
"blue_mask", G_TYPE_INT, b_mask,
"endianness", G_TYPE_INT, endianness, NULL);
} else if (fmt) {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-yuv",
"format", GST_TYPE_FOURCC, fmt, NULL);
}
if (caps != NULL) {
char *str = gst_caps_to_string (caps);
GST_DEBUG ("caps for pix_fmt=%d: %s", pix_fmt, str);
g_free (str);
} else {
GST_WARNING ("No caps found for pix_fmt=%d", pix_fmt);
}
return caps;
}
/* Convert a FFMPEG codec Type and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* CodecType is primarily meant for uncompressed data GstCaps!
*/
GstCaps *
gst_ffmpeg_pix_fmt_to_caps (void)
{
GstCaps *caps, *temp;
enum PixelFormat i;
caps = gst_caps_new_empty ();
for (i = 0; i < PIX_FMT_NB; i++) {
temp = gst_ffmpeg_pixfmt_to_caps (i);
if (temp != NULL) {
gst_caps_append (caps, temp);
}
}
return caps;
}
/* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
* and other video properties in a AVCodecContext.
*
* For usefullness, see below
*/
enum PixelFormat
gst_ffmpeg_caps_to_pix_fmt (const GstCaps * caps,
int *width, int *height, double *framerate)
{
GstStructure *structure;
enum PixelFormat pix_fmt = PIX_FMT_NB;
g_return_val_if_fail (gst_caps_get_size (caps) == 1, PIX_FMT_NB);
structure = gst_caps_get_structure (caps, 0);
gst_structure_get_int (structure, "width", width);
gst_structure_get_int (structure, "height", height);
gst_structure_get_double (structure, "framerate", framerate);
if (strcmp (gst_structure_get_name (structure), "video/x-raw-yuv") == 0) {
guint32 fourcc;
if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
switch (fourcc) {
case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
pix_fmt = PIX_FMT_YUV422;
break;
case GST_MAKE_FOURCC ('I', '4', '2', '0'):
pix_fmt = PIX_FMT_YUV420P;
break;
case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
pix_fmt = PIX_FMT_YUV411P;
break;
case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
pix_fmt = PIX_FMT_YUV422P;
break;
case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
pix_fmt = PIX_FMT_YUV410P;
break;
#if 0
case FIXME:
pix_fmt = PIX_FMT_YUV444P;
break;
#endif
}
}
} else if (strcmp (gst_structure_get_name (structure),
"video/x-raw-rgb") == 0) {
gint bpp = 0, rmask = 0, endianness = 0;
if (gst_structure_get_int (structure, "bpp", &bpp) &&
gst_structure_get_int (structure, "endianness", &endianness) &&
gst_structure_get_int (structure, "red_mask", &rmask)) {
switch (bpp) {
case 32:
#if (G_BYTE_ORDER == G_BIG_ENDIAN)
if (rmask == 0x00ff0000)
#else
if (rmask == 0x0000ff00)
#endif
pix_fmt = PIX_FMT_RGBA32;
break;
case 24:
if (rmask == 0x0000FF)
pix_fmt = PIX_FMT_BGR24;
else
pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if (endianness == G_BYTE_ORDER)
pix_fmt = PIX_FMT_RGB565;
break;
case 15:
if (endianness == G_BYTE_ORDER)
pix_fmt = PIX_FMT_RGB555;
break;
default:
/* nothing */
break;
}
}
}
return pix_fmt;
}

View file

@ -0,0 +1,38 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef __GST_FFMPEG_CODECMAP_H__
#define __GST_FFMPEG_CODECMAP_H__
#include <avcodec.h>
#include <gst/gst.h>
/* Template caps */
GstCaps *
gst_ffmpeg_pix_fmt_to_caps (void);
/* Disect a GstCaps */
enum PixelFormat
gst_ffmpeg_caps_to_pix_fmt (const GstCaps *caps,
int *width, int *height,
double *fps);
#endif /* __GST_FFMPEG_CODECMAP_H__ */

View file

@ -0,0 +1,409 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
* This file:
* Copyright (C) 2003 Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <gst/gst.h>
#include <avcodec.h>
#include "gstffmpegcodecmap.h"
#define GST_TYPE_FFMPEGCOLORSPACE \
(gst_ffmpegcolorspace_get_type())
#define GST_FFMPEGCOLORSPACE(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGCOLORSPACE,GstFFMpegColorspace))
#define GST_FFMPEGCOLORSPACE_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGCOLORSPACE,GstFFMpegColorspace))
#define GST_IS_FFMPEGCOLORSPACE(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGCOLORSPACE))
#define GST_IS_FFMPEGCOLORSPACE_CLASS(obj) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGCOLORSPACE))
typedef struct _GstFFMpegColorspace GstFFMpegColorspace;
typedef struct _GstFFMpegColorspaceClass GstFFMpegColorspaceClass;
struct _GstFFMpegColorspace
{
GstElement element;
GstPad *sinkpad, *srcpad;
gint width, height;
gfloat fps;
enum PixelFormat from_pixfmt, to_pixfmt;
AVPicture from_frame, to_frame;
GstCaps *sinkcaps;
};
struct _GstFFMpegColorspaceClass
{
GstElementClass parent_class;
};
/* elementfactory information */
static GstElementDetails ffmpegcolorspace_details = {
"FFMPEG Colorspace converter",
"Filter/Converter/Video",
"Converts video from one colorspace to another",
"Ronald Bultje <rbultje@ronald.bitfreak.net>",
};
/* Stereo signals and args */
enum
{
/* FILL ME */
LAST_SIGNAL
};
enum
{
ARG_0,
};
static GType gst_ffmpegcolorspace_get_type (void);
static void gst_ffmpegcolorspace_base_init (GstFFMpegColorspaceClass * klass);
static void gst_ffmpegcolorspace_class_init (GstFFMpegColorspaceClass * klass);
static void gst_ffmpegcolorspace_init (GstFFMpegColorspace * space);
static void gst_ffmpegcolorspace_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec);
static void gst_ffmpegcolorspace_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
static GstPadLinkReturn
gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps);
static void gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data);
static GstElementStateReturn
gst_ffmpegcolorspace_change_state (GstElement * element);
static GstPadTemplate *srctempl, *sinktempl;
static GstElementClass *parent_class = NULL;
/*static guint gst_ffmpegcolorspace_signals[LAST_SIGNAL] = { 0 }; */
static GstCaps *
gst_ffmpegcolorspace_caps_remove_format_info (GstCaps * caps)
{
int i;
GstStructure *structure;
GstCaps *rgbcaps;
for (i = 0; i < gst_caps_get_size (caps); i++) {
structure = gst_caps_get_structure (caps, i);
gst_structure_set_name (structure, "video/x-raw-yuv");
gst_structure_remove_field (structure, "format");
gst_structure_remove_field (structure, "endianness");
gst_structure_remove_field (structure, "depth");
gst_structure_remove_field (structure, "bpp");
gst_structure_remove_field (structure, "red_mask");
gst_structure_remove_field (structure, "green_mask");
gst_structure_remove_field (structure, "blue_mask");
}
rgbcaps = gst_caps_simplify (caps);
gst_caps_free (caps);
caps = gst_caps_copy (rgbcaps);
for (i = 0; i < gst_caps_get_size (rgbcaps); i++) {
structure = gst_caps_get_structure (rgbcaps, i);
gst_structure_set_name (structure, "video/x-raw-rgb");
}
gst_caps_append (caps, rgbcaps);
return caps;
}
static GstCaps *
gst_ffmpegcolorspace_getcaps (GstPad * pad)
{
GstFFMpegColorspace *space;
GstCaps *othercaps;
GstCaps *caps;
GstPad *otherpad;
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
otherpad = (pad == space->srcpad) ? space->sinkpad : space->srcpad;
othercaps = gst_pad_get_allowed_caps (otherpad);
othercaps = gst_ffmpegcolorspace_caps_remove_format_info (othercaps);
caps = gst_caps_intersect (othercaps, gst_pad_get_pad_template_caps (pad));
gst_caps_free (othercaps);
return caps;
}
static GstPadLinkReturn
gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps)
{
GstFFMpegColorspace *space;
const GstCaps *othercaps;
GstPad *otherpad;
GstPadLinkReturn ret;
int height, width;
double framerate;
enum PixelFormat pix_fmt;
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
otherpad = (pad == space->srcpad) ? space->sinkpad : space->srcpad;
/* FIXME attempt and/or check for passthru */
/* loop over all possibilities and select the first one we can convert and
* is accepted by the peer */
pix_fmt = gst_ffmpeg_caps_to_pix_fmt (caps, &width, &height, &framerate);
if (pix_fmt == PIX_FMT_NB) {
/* we disable ourself here */
if (pad == space->srcpad) {
space->to_pixfmt = PIX_FMT_NB;
} else {
space->from_pixfmt = PIX_FMT_NB;
}
return GST_PAD_LINK_REFUSED;
}
/* set the size on the otherpad */
othercaps = gst_pad_get_negotiated_caps (otherpad);
if (othercaps) {
GstCaps *caps = gst_caps_copy (othercaps);
gst_caps_set_simple (caps,
"width", G_TYPE_INT, width,
"height", G_TYPE_INT, height,
"framerate", G_TYPE_DOUBLE, framerate, NULL);
ret = gst_pad_try_set_caps (otherpad, caps);
if (GST_PAD_LINK_FAILED (ret)) {
return ret;
}
}
if (pad == space->srcpad) {
space->to_pixfmt = pix_fmt;
} else {
space->from_pixfmt = pix_fmt;
}
space->width = width;
space->height = height;
return GST_PAD_LINK_OK;
}
static GType
gst_ffmpegcolorspace_get_type (void)
{
static GType ffmpegcolorspace_type = 0;
if (!ffmpegcolorspace_type) {
static const GTypeInfo ffmpegcolorspace_info = {
sizeof (GstFFMpegColorspaceClass),
(GBaseInitFunc) gst_ffmpegcolorspace_base_init,
NULL,
(GClassInitFunc) gst_ffmpegcolorspace_class_init,
NULL,
NULL,
sizeof (GstFFMpegColorspace),
0,
(GInstanceInitFunc) gst_ffmpegcolorspace_init,
};
ffmpegcolorspace_type = g_type_register_static (GST_TYPE_ELEMENT,
"GstFFMpegColorspace", &ffmpegcolorspace_info, 0);
}
return ffmpegcolorspace_type;
}
static void
gst_ffmpegcolorspace_base_init (GstFFMpegColorspaceClass * klass)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
gst_element_class_add_pad_template (element_class, srctempl);
gst_element_class_add_pad_template (element_class, sinktempl);
gst_element_class_set_details (element_class, &ffmpegcolorspace_details);
}
static void
gst_ffmpegcolorspace_class_init (GstFFMpegColorspaceClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
gobject_class = (GObjectClass *) klass;
gstelement_class = (GstElementClass *) klass;
parent_class = g_type_class_ref (GST_TYPE_ELEMENT);
gobject_class->set_property = gst_ffmpegcolorspace_set_property;
gobject_class->get_property = gst_ffmpegcolorspace_get_property;
gstelement_class->change_state = gst_ffmpegcolorspace_change_state;
}
static void
gst_ffmpegcolorspace_init (GstFFMpegColorspace * space)
{
space->sinkpad = gst_pad_new_from_template (sinktempl, "sink");
gst_pad_set_link_function (space->sinkpad, gst_ffmpegcolorspace_pad_link);
gst_pad_set_getcaps_function (space->sinkpad, gst_ffmpegcolorspace_getcaps);
gst_pad_set_chain_function (space->sinkpad, gst_ffmpegcolorspace_chain);
gst_element_add_pad (GST_ELEMENT (space), space->sinkpad);
space->srcpad = gst_pad_new_from_template (srctempl, "src");
gst_element_add_pad (GST_ELEMENT (space), space->srcpad);
gst_pad_set_link_function (space->srcpad, gst_ffmpegcolorspace_pad_link);
gst_pad_set_getcaps_function (space->srcpad, gst_ffmpegcolorspace_getcaps);
space->from_pixfmt = space->to_pixfmt = PIX_FMT_NB;
}
static void
gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data)
{
GstBuffer *inbuf = GST_BUFFER (data);
GstFFMpegColorspace *space;
GstBuffer *outbuf = NULL;
g_return_if_fail (pad != NULL);
g_return_if_fail (GST_IS_PAD (pad));
g_return_if_fail (inbuf != NULL);
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
g_return_if_fail (space != NULL);
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (space));
if (space->from_pixfmt == PIX_FMT_NB || space->to_pixfmt == PIX_FMT_NB) {
g_critical ("attempting to convert unknown formats");
gst_buffer_unref (inbuf);
return;
}
if (space->from_pixfmt == space->to_pixfmt) {
outbuf = inbuf;
} else {
/* use bufferpool here */
guint size = avpicture_get_size (space->to_pixfmt,
space->width,
space->height);
outbuf = gst_pad_alloc_buffer (space->srcpad, GST_BUFFER_OFFSET_NONE, size);
/* convert */
avpicture_fill ((AVPicture *) & space->from_frame, GST_BUFFER_DATA (inbuf),
space->from_pixfmt, space->width, space->height);
avpicture_fill ((AVPicture *) & space->to_frame, GST_BUFFER_DATA (outbuf),
space->to_pixfmt, space->width, space->height);
img_convert ((AVPicture *) & space->to_frame, space->to_pixfmt,
(AVPicture *) & space->from_frame, space->from_pixfmt,
space->width, space->height);
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
gst_buffer_unref (inbuf);
}
gst_pad_push (space->srcpad, GST_DATA (outbuf));
}
static GstElementStateReturn
gst_ffmpegcolorspace_change_state (GstElement * element)
{
GstFFMpegColorspace *space;
space = GST_FFMPEGCOLORSPACE (element);
switch (GST_STATE_TRANSITION (element)) {
case GST_STATE_PAUSED_TO_READY:
break;
}
if (parent_class->change_state)
return parent_class->change_state (element);
return GST_STATE_SUCCESS;
}
static void
gst_ffmpegcolorspace_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec)
{
GstFFMpegColorspace *space;
/* it's not null if we got it, but it might not be ours */
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (object));
space = GST_FFMPEGCOLORSPACE (object);
switch (prop_id) {
default:
break;
}
}
static void
gst_ffmpegcolorspace_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec)
{
GstFFMpegColorspace *space;
/* it's not null if we got it, but it might not be ours */
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (object));
space = GST_FFMPEGCOLORSPACE (object);
switch (prop_id) {
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
gboolean
gst_ffmpegcolorspace_register (GstPlugin * plugin)
{
GstCaps *caps;
/* template caps */
caps = gst_ffmpeg_pix_fmt_to_caps ();
/* build templates */
srctempl = gst_pad_template_new ("src",
GST_PAD_SRC, GST_PAD_ALWAYS, gst_caps_copy (caps));
sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, caps);
avcodec_init ();
return gst_element_register (plugin, "ffmpegcolorspace",
GST_RANK_NONE, GST_TYPE_FFMPEGCOLORSPACE);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,857 @@
/*
* Templates for image convertion routines
* Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef RGB_OUT
#define RGB_OUT(d, r, g, b) RGBA_OUT(d, r, g, b, 0xff)
#endif
static void glue(yuv420p_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1, *d2;
int w, y, cb, cr, r_add, g_add, b_add, width2;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
y1_ptr = src->data[0];
cb_ptr = src->data[1];
cr_ptr = src->data[2];
width2 = (width + 1) >> 1;
for(;height >= 2; height -= 2) {
d1 = d;
d2 = d + dst->linesize[0];
y2_ptr = y1_ptr + src->linesize[0];
for(w = width; w >= 2; w -= 2) {
YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
/* output 4 pixels */
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[1]);
RGB_OUT(d1 + BPP, r, g, b);
YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[0]);
RGB_OUT(d2, r, g, b);
YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[1]);
RGB_OUT(d2 + BPP, r, g, b);
d1 += 2 * BPP;
d2 += 2 * BPP;
y1_ptr += 2;
y2_ptr += 2;
cb_ptr++;
cr_ptr++;
}
/* handle odd width */
if (w) {
YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[0]);
RGB_OUT(d2, r, g, b);
d1 += BPP;
d2 += BPP;
y1_ptr++;
y2_ptr++;
cb_ptr++;
cr_ptr++;
}
d += 2 * dst->linesize[0];
y1_ptr += 2 * src->linesize[0] - width;
cb_ptr += src->linesize[1] - width2;
cr_ptr += src->linesize[2] - width2;
}
/* handle odd height */
if (height) {
d1 = d;
for(w = width; w >= 2; w -= 2) {
YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
/* output 2 pixels */
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[1]);
RGB_OUT(d1 + BPP, r, g, b);
d1 += 2 * BPP;
y1_ptr += 2;
cb_ptr++;
cr_ptr++;
}
/* handle width */
if (w) {
YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
/* output 2 pixels */
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
d1 += BPP;
y1_ptr++;
cb_ptr++;
cr_ptr++;
}
}
}
static void glue(yuvj420p_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1, *d2;
int w, y, cb, cr, r_add, g_add, b_add, width2;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
y1_ptr = src->data[0];
cb_ptr = src->data[1];
cr_ptr = src->data[2];
width2 = (width + 1) >> 1;
for(;height >= 2; height -= 2) {
d1 = d;
d2 = d + dst->linesize[0];
y2_ptr = y1_ptr + src->linesize[0];
for(w = width; w >= 2; w -= 2) {
YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
/* output 4 pixels */
YUV_TO_RGB2(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2(r, g, b, y1_ptr[1]);
RGB_OUT(d1 + BPP, r, g, b);
YUV_TO_RGB2(r, g, b, y2_ptr[0]);
RGB_OUT(d2, r, g, b);
YUV_TO_RGB2(r, g, b, y2_ptr[1]);
RGB_OUT(d2 + BPP, r, g, b);
d1 += 2 * BPP;
d2 += 2 * BPP;
y1_ptr += 2;
y2_ptr += 2;
cb_ptr++;
cr_ptr++;
}
/* handle odd width */
if (w) {
YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
YUV_TO_RGB2(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2(r, g, b, y2_ptr[0]);
RGB_OUT(d2, r, g, b);
d1 += BPP;
d2 += BPP;
y1_ptr++;
y2_ptr++;
cb_ptr++;
cr_ptr++;
}
d += 2 * dst->linesize[0];
y1_ptr += 2 * src->linesize[0] - width;
cb_ptr += src->linesize[1] - width2;
cr_ptr += src->linesize[2] - width2;
}
/* handle odd height */
if (height) {
d1 = d;
for(w = width; w >= 2; w -= 2) {
YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
/* output 2 pixels */
YUV_TO_RGB2(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
YUV_TO_RGB2(r, g, b, y1_ptr[1]);
RGB_OUT(d1 + BPP, r, g, b);
d1 += 2 * BPP;
y1_ptr += 2;
cb_ptr++;
cr_ptr++;
}
/* handle width */
if (w) {
YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
/* output 2 pixels */
YUV_TO_RGB2(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
d1 += BPP;
y1_ptr++;
cb_ptr++;
cr_ptr++;
}
}
}
static void glue(RGB_NAME, _to_yuv420p)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
int wrap, wrap3, width2;
int r, g, b, r1, g1, b1, w;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
width2 = (width + 1) >> 1;
wrap = dst->linesize[0];
wrap3 = src->linesize[0];
p = src->data[0];
for(;height>=2;height -= 2) {
for(w = width; w >= 2; w -= 2) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y_CCIR(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y_CCIR(r, g, b);
p += wrap3;
lum += wrap;
RGB_IN(r, g, b, p);
r1 += r;
g1 += g;
b1 += b;
lum[0] = RGB_TO_Y_CCIR(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 2);
cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 2);
cb++;
cr++;
p += -wrap3 + 2 * BPP;
lum += -wrap + 2;
}
if (w) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y_CCIR(r, g, b);
p += wrap3;
lum += wrap;
RGB_IN(r, g, b, p);
r1 += r;
g1 += g;
b1 += b;
lum[0] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1);
cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
p += wrap3 + (wrap3 - width * BPP);
lum += wrap + (wrap - width);
cb += dst->linesize[1] - width2;
cr += dst->linesize[2] - width2;
}
/* handle odd height */
if (height) {
for(w = width; w >= 2; w -= 2) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y_CCIR(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1);
cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r, g, b, 0);
cr[0] = RGB_TO_V_CCIR(r, g, b, 0);
}
}
}
static void glue(RGB_NAME, _to_gray)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int r, g, b, dst_wrap, src_wrap;
int x, y;
p = src->data[0];
src_wrap = src->linesize[0] - BPP * width;
q = dst->data[0];
dst_wrap = dst->linesize[0] - width;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGB_IN(r, g, b, p);
q[0] = RGB_TO_Y(r, g, b);
q++;
p += BPP;
}
p += src_wrap;
q += dst_wrap;
}
}
static void glue(gray_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int r, dst_wrap, src_wrap;
int x, y;
p = src->data[0];
src_wrap = src->linesize[0] - width;
q = dst->data[0];
dst_wrap = dst->linesize[0] - BPP * width;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
r = p[0];
RGB_OUT(q, r, r, r);
q += BPP;
p ++;
}
p += src_wrap;
q += dst_wrap;
}
}
static void glue(pal8_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int r, g, b, dst_wrap, src_wrap;
int x, y;
uint32_t v;
const uint32_t *palette;
p = src->data[0];
src_wrap = src->linesize[0] - width;
palette = (uint32_t *)src->data[1];
q = dst->data[0];
dst_wrap = dst->linesize[0] - BPP * width;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
v = palette[p[0]];
r = (v >> 16) & 0xff;
g = (v >> 8) & 0xff;
b = (v) & 0xff;
#ifdef RGBA_OUT
{
int a;
a = (v >> 24) & 0xff;
RGBA_OUT(q, r, g, b, a);
}
#else
RGB_OUT(q, r, g, b);
#endif
q += BPP;
p ++;
}
p += src_wrap;
q += dst_wrap;
}
}
#if !defined(FMT_RGBA32) && defined(RGBA_OUT)
/* alpha support */
static void glue(rgba32_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
unsigned int v, r, g, b, a;
s = src->data[0];
src_wrap = src->linesize[0] - width * 4;
d = dst->data[0];
dst_wrap = dst->linesize[0] - width * BPP;
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
v = ((const uint32_t *)(s))[0];
a = (v >> 24) & 0xff;
r = (v >> 16) & 0xff;
g = (v >> 8) & 0xff;
b = v & 0xff;
RGBA_OUT(d, r, g, b, a);
s += 4;
d += BPP;
}
s += src_wrap;
d += dst_wrap;
}
}
static void glue(RGB_NAME, _to_rgba32)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
unsigned int r, g, b, a;
s = src->data[0];
src_wrap = src->linesize[0] - width * BPP;
d = dst->data[0];
dst_wrap = dst->linesize[0] - width * 4;
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
RGBA_IN(r, g, b, a, s);
((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;
d += 4;
s += BPP;
}
s += src_wrap;
d += dst_wrap;
}
}
#endif /* !defined(FMT_RGBA32) && defined(RGBA_IN) */
#ifndef FMT_RGB24
static void glue(rgb24_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
unsigned int r, g, b;
s = src->data[0];
src_wrap = src->linesize[0] - width * 3;
d = dst->data[0];
dst_wrap = dst->linesize[0] - width * BPP;
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
r = s[0];
g = s[1];
b = s[2];
RGB_OUT(d, r, g, b);
s += 3;
d += BPP;
}
s += src_wrap;
d += dst_wrap;
}
}
static void glue(RGB_NAME, _to_rgb24)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
unsigned int r, g , b;
s = src->data[0];
src_wrap = src->linesize[0] - width * BPP;
d = dst->data[0];
dst_wrap = dst->linesize[0] - width * 3;
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
RGB_IN(r, g, b, s)
d[0] = r;
d[1] = g;
d[2] = b;
d += 3;
s += BPP;
}
s += src_wrap;
d += dst_wrap;
}
}
#endif /* !FMT_RGB24 */
#ifdef FMT_RGB24
static void yuv444p_to_rgb24(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *y1_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1;
int w, y, cb, cr, r_add, g_add, b_add;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
y1_ptr = src->data[0];
cb_ptr = src->data[1];
cr_ptr = src->data[2];
for(;height > 0; height --) {
d1 = d;
for(w = width; w > 0; w--) {
YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
d1 += BPP;
y1_ptr++;
cb_ptr++;
cr_ptr++;
}
d += dst->linesize[0];
y1_ptr += src->linesize[0] - width;
cb_ptr += src->linesize[1] - width;
cr_ptr += src->linesize[2] - width;
}
}
static void yuvj444p_to_rgb24(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const uint8_t *y1_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1;
int w, y, cb, cr, r_add, g_add, b_add;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
y1_ptr = src->data[0];
cb_ptr = src->data[1];
cr_ptr = src->data[2];
for(;height > 0; height --) {
d1 = d;
for(w = width; w > 0; w--) {
YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
YUV_TO_RGB2(r, g, b, y1_ptr[0]);
RGB_OUT(d1, r, g, b);
d1 += BPP;
y1_ptr++;
cb_ptr++;
cr_ptr++;
}
d += dst->linesize[0];
y1_ptr += src->linesize[0] - width;
cb_ptr += src->linesize[1] - width;
cr_ptr += src->linesize[2] - width;
}
}
static void rgb24_to_yuv444p(AVPicture *dst, const AVPicture *src,
int width, int height)
{
int src_wrap, x, y;
int r, g, b;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
src_wrap = src->linesize[0] - width * BPP;
p = src->data[0];
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y_CCIR(r, g, b);
cb[0] = RGB_TO_U_CCIR(r, g, b, 0);
cr[0] = RGB_TO_V_CCIR(r, g, b, 0);
p += BPP;
cb++;
cr++;
lum++;
}
p += src_wrap;
lum += dst->linesize[0] - width;
cb += dst->linesize[1] - width;
cr += dst->linesize[2] - width;
}
}
static void rgb24_to_yuvj420p(AVPicture *dst, const AVPicture *src,
int width, int height)
{
int wrap, wrap3, width2;
int r, g, b, r1, g1, b1, w;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
width2 = (width + 1) >> 1;
wrap = dst->linesize[0];
wrap3 = src->linesize[0];
p = src->data[0];
for(;height>=2;height -= 2) {
for(w = width; w >= 2; w -= 2) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y(r, g, b);
p += wrap3;
lum += wrap;
RGB_IN(r, g, b, p);
r1 += r;
g1 += g;
b1 += b;
lum[0] = RGB_TO_Y(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r1, g1, b1, 2);
cr[0] = RGB_TO_V(r1, g1, b1, 2);
cb++;
cr++;
p += -wrap3 + 2 * BPP;
lum += -wrap + 2;
}
if (w) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y(r, g, b);
p += wrap3;
lum += wrap;
RGB_IN(r, g, b, p);
r1 += r;
g1 += g;
b1 += b;
lum[0] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r1, g1, b1, 1);
cr[0] = RGB_TO_V(r1, g1, b1, 1);
cb++;
cr++;
p += -wrap3 + BPP;
lum += -wrap + 1;
}
p += wrap3 + (wrap3 - width * BPP);
lum += wrap + (wrap - width);
cb += dst->linesize[1] - width2;
cr += dst->linesize[2] - width2;
}
/* handle odd height */
if (height) {
for(w = width; w >= 2; w -= 2) {
RGB_IN(r, g, b, p);
r1 = r;
g1 = g;
b1 = b;
lum[0] = RGB_TO_Y(r, g, b);
RGB_IN(r, g, b, p + BPP);
r1 += r;
g1 += g;
b1 += b;
lum[1] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r1, g1, b1, 1);
cr[0] = RGB_TO_V(r1, g1, b1, 1);
cb++;
cr++;
p += 2 * BPP;
lum += 2;
}
if (w) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r, g, b, 0);
cr[0] = RGB_TO_V(r, g, b, 0);
}
}
}
static void rgb24_to_yuvj444p(AVPicture *dst, const AVPicture *src,
int width, int height)
{
int src_wrap, x, y;
int r, g, b;
uint8_t *lum, *cb, *cr;
const uint8_t *p;
lum = dst->data[0];
cb = dst->data[1];
cr = dst->data[2];
src_wrap = src->linesize[0] - width * BPP;
p = src->data[0];
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGB_IN(r, g, b, p);
lum[0] = RGB_TO_Y(r, g, b);
cb[0] = RGB_TO_U(r, g, b, 0);
cr[0] = RGB_TO_V(r, g, b, 0);
p += BPP;
cb++;
cr++;
lum++;
}
p += src_wrap;
lum += dst->linesize[0] - width;
cb += dst->linesize[1] - width;
cr += dst->linesize[2] - width;
}
}
#endif /* FMT_RGB24 */
#if defined(FMT_RGB24) || defined(FMT_RGBA32)
static void glue(RGB_NAME, _to_pal8)(AVPicture *dst, const AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int dst_wrap, src_wrap;
int x, y, has_alpha;
unsigned int r, g, b;
p = src->data[0];
src_wrap = src->linesize[0] - BPP * width;
q = dst->data[0];
dst_wrap = dst->linesize[0] - width;
has_alpha = 0;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
#ifdef RGBA_IN
{
unsigned int a;
RGBA_IN(r, g, b, a, p);
/* crude approximation for alpha ! */
if (a < 0x80) {
has_alpha = 1;
q[0] = TRANSP_INDEX;
} else {
q[0] = gif_clut_index(r, g, b);
}
}
#else
RGB_IN(r, g, b, p);
q[0] = gif_clut_index(r, g, b);
#endif
q++;
p += BPP;
}
p += src_wrap;
q += dst_wrap;
}
build_rgb_palette(dst->data[1], has_alpha);
}
#endif /* defined(FMT_RGB24) || defined(FMT_RGBA32) */
#ifdef RGBA_IN
static int glue(get_alpha_info_, RGB_NAME)(const AVPicture *src,
int width, int height)
{
const unsigned char *p;
int src_wrap, ret, x, y;
unsigned int r, g, b, a;
p = src->data[0];
src_wrap = src->linesize[0] - BPP * width;
ret = 0;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
RGBA_IN(r, g, b, a, p);
if (a == 0x00) {
ret |= FF_ALPHA_TRANSP;
} else if (a != 0xff) {
ret |= FF_ALPHA_SEMI_TRANSP;
}
p += BPP;
}
p += src_wrap;
}
return ret;
}
#endif /* RGBA_IN */
#undef RGB_IN
#undef RGBA_IN
#undef RGB_OUT
#undef RGBA_OUT
#undef BPP
#undef RGB_NAME
#undef FMT_RGB24
#undef FMT_RGBA32

106
gst/ffmpegcolorspace/mem.c Normal file
View file

@ -0,0 +1,106 @@
/*
* default memory allocator for libavcodec
* Copyright (c) 2002 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
* @file mem.c
* default memory allocator for libavcodec.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
/* here we can use OS dependant allocation functions */
#undef malloc
#undef free
#undef realloc
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
/* you can redefine av_malloc and av_free in your project to use your
memory allocator. You do not need to suppress this file because the
linker will do it automatically */
/**
* Memory allocation of size byte with alignment suitable for all
* memory accesses (including vectors if available on the
* CPU). av_malloc(0) must return a non NULL pointer.
*/
void *
av_malloc (unsigned int size)
{
void *ptr;
#if defined (HAVE_MEMALIGN)
ptr = memalign (16, size);
/* Why 64?
Indeed, we should align it:
on 4 for 386
on 16 for 486
on 32 for 586, PPro - k6-III
on 64 for K7 (maybe for P3 too).
Because L1 and L2 caches are aligned on those values.
But I don't want to code such logic here!
*/
/* Why 16?
because some cpus need alignment, for example SSE2 on P4, & most RISC cpus
it will just trigger an exception and the unaligned load will be done in the
exception handler or it will just segfault (SSE2 on P4)
Why not larger? because i didnt see a difference in benchmarks ...
*/
/* benchmarks with p3
memalign(64)+1 3071,3051,3032
memalign(64)+2 3051,3032,3041
memalign(64)+4 2911,2896,2915
memalign(64)+8 2545,2554,2550
memalign(64)+16 2543,2572,2563
memalign(64)+32 2546,2545,2571
memalign(64)+64 2570,2533,2558
btw, malloc seems to do 8 byte alignment by default here
*/
#else
ptr = malloc (size);
#endif
return ptr;
}
/**
* av_realloc semantics (same as glibc): if ptr is NULL and size > 0,
* identical to malloc(size). If size is zero, it is identical to
* free(ptr) and NULL is returned.
*/
void *
av_realloc (void *ptr, unsigned int size)
{
return realloc (ptr, size);
}
/* NOTE: ptr = NULL is explicetly allowed */
void
av_free (void *ptr)
{
/* XXX: this test should not be needed on most libcs */
if (ptr)
free (ptr);
}

243
gst/ffmpegcolorspace/mmx.h Normal file
View file

@ -0,0 +1,243 @@
/*
* mmx.h
* Copyright (C) 1997-2001 H. Dietz and R. Fisher
*/
#ifndef AVCODEC_I386MMX_H
#define AVCODEC_I386MMX_H
/*
* The type of an value that fits in an MMX register (note that long
* long constant values MUST be suffixed by LL and unsigned long long
* values by ULL, lest they be truncated by the compiler)
*/
typedef union {
long long q; /* Quadword (64-bit) value */
unsigned long long uq; /* Unsigned Quadword */
int d[2]; /* 2 Doubleword (32-bit) values */
unsigned int ud[2]; /* 2 Unsigned Doubleword */
short w[4]; /* 4 Word (16-bit) values */
unsigned short uw[4]; /* 4 Unsigned Word */
char b[8]; /* 8 Byte (8-bit) values */
unsigned char ub[8]; /* 8 Unsigned Byte */
float s[2]; /* Single-precision (32-bit) value */
} mmx_t; /* On an 8-byte (64-bit) boundary */
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "i" (imm) )
#define mmx_m2r(op,mem,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "m" (mem))
#define mmx_r2m(op,reg,mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \
: "=m" (mem) \
: /* nothing */ )
#define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
#define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd)
#define movq_m2r(var,reg) mmx_m2r (movq, var, reg)
#define movq_r2m(reg,var) mmx_r2m (movq, reg, var)
#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd)
#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg)
#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd)
#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg)
#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd)
#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg)
#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd)
#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg)
#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd)
#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg)
#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd)
#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg)
#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd)
#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg)
#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd)
#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg)
#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd)
#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg)
#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd)
#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg)
#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd)
#define pand_m2r(var,reg) mmx_m2r (pand, var, reg)
#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd)
#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg)
#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd)
#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg)
#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd)
#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg)
#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd)
#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg)
#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd)
#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg)
#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd)
#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg)
#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd)
#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg)
#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd)
#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg)
#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd)
#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg)
#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd)
#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg)
#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd)
#define por_m2r(var,reg) mmx_m2r (por, var, reg)
#define por_r2r(regs,regd) mmx_r2r (por, regs, regd)
#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg)
#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg)
#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd)
#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg)
#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg)
#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd)
#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg)
#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg)
#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd)
#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg)
#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg)
#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd)
#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg)
#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg)
#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd)
#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg)
#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg)
#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd)
#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg)
#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg)
#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd)
#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg)
#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg)
#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd)
#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg)
#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd)
#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg)
#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd)
#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg)
#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd)
#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg)
#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd)
#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg)
#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd)
#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg)
#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd)
#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg)
#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd)
#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg)
#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd)
#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg)
#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd)
#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg)
#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd)
#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg)
#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd)
#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg)
#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd)
#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg)
#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd)
#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
/* 3DNOW extensions */
#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
/* AMD MMX extensions - also available in intel SSE */
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
: "X" (mem), "X" (imm))
#define mmx_r2ri(op,regs,regd,imm) \
__asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \
: /* nothing */ \
: "X" (imm) )
#define mmx_fetch(mem,hint) \
__asm__ __volatile__ ("prefetch" #hint " %0" \
: /* nothing */ \
: "X" (mem))
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg)
#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd)
#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg)
#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd)
#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm)
#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm)
#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg)
#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd)
#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg)
#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd)
#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg)
#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd)
#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg)
#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
#define pmovmskb(mmreg,reg) \
__asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg)
#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
#define prefetcht0(mem) mmx_fetch (mem, t0)
#define prefetcht1(mem) mmx_fetch (mem, t1)
#define prefetcht2(mem) mmx_fetch (mem, t2)
#define prefetchnta(mem) mmx_fetch (mem, nta)
#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg)
#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd)
#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
#define sfence() __asm__ __volatile__ ("sfence\n\t")
#endif /* AVCODEC_I386MMX_H */

View file

@ -0,0 +1,45 @@
/*
* utils for libavcodec
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2003 Michel Bardiaux for the av_log API
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
* @file utils.c
* utils.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
#include "dsputil.h"
/* must be called before any other functions */
void
avcodec_init (void)
{
static int inited = 0;
if (inited != 0)
return;
inited = 1;
dsputil_static_init ();
}