gst/ffmpegcolorspace/: Sync back from gst-ffmpeg. Deprecates ffcolorspace. Adds pallette handling plus update from ff...

Original commit message from CVS:
* gst/ffmpegcolorspace/Makefile.am:
* gst/ffmpegcolorspace/avcodec.h:
* gst/ffmpegcolorspace/common.h:
* gst/ffmpegcolorspace/dsputil.c: (dsputil_static_init):
* gst/ffmpegcolorspace/dsputil.h:
* gst/ffmpegcolorspace/gstffmpeg.c: (plugin_init):
* gst/ffmpegcolorspace/gstffmpegcodecmap.c:
(gst_ffmpeg_get_palette), (gst_ffmpeg_set_palette),
(gst_ffmpeg_pixfmt_to_caps), (gst_ffmpeg_smpfmt_to_caps),
(gst_ffmpegcsp_codectype_to_caps), (gst_ffmpeg_caps_to_smpfmt),
(gst_ffmpeg_caps_to_pixfmt), (gst_ffmpegcsp_caps_with_codectype),
(gst_ffmpegcsp_avpicture_fill):
* gst/ffmpegcolorspace/gstffmpegcodecmap.h:
* gst/ffmpegcolorspace/gstffmpegcolorspace.c:
(gst_ffmpegcsp_caps_remove_format_info), (gst_ffmpegcsp_getcaps),
(gst_ffmpegcsp_pad_link), (gst_ffmpegcsp_get_type),
(gst_ffmpegcsp_base_init), (gst_ffmpegcsp_class_init),
(gst_ffmpegcsp_init), (gst_ffmpegcsp_chain),
(gst_ffmpegcsp_change_state), (gst_ffmpegcsp_set_property),
(gst_ffmpegcsp_get_property), (gst_ffmpegcolorspace_register):
* gst/ffmpegcolorspace/imgconvert.c:
(avcodec_get_chroma_sub_sample), (avcodec_get_pix_fmt_name),
(avcodec_get_pix_fmt), (avpicture_fill), (avpicture_layout),
(avpicture_get_size), (avcodec_get_pix_fmt_loss),
(avg_bits_per_pixel), (avcodec_find_best_pix_fmt1),
(avcodec_find_best_pix_fmt), (img_copy_plane), (img_copy),
(yuv422_to_yuv420p), (uyvy422_to_yuv420p), (uyvy422_to_yuv422p),
(yuv422_to_yuv422p), (yuv422p_to_yuv422), (yuv422p_to_uyvy422),
(uyvy411_to_yuv411p), (yuv420p_to_yuv422), (C_JPEG_TO_CCIR),
(img_convert_init), (img_apply_table), (shrink41), (shrink21),
(shrink12), (shrink22), (shrink44), (grow21_line), (grow41_line),
(grow21), (grow22), (grow41), (grow44), (conv411),
(gif_clut_index), (build_rgb_palette), (bitcopy_n), (mono_to_gray),
(monowhite_to_gray), (monoblack_to_gray), (gray_to_mono),
(gray_to_monowhite), (gray_to_monoblack), (avpicture_alloc),
(avpicture_free), (is_yuv_planar), (img_convert),
(get_alpha_info_pal8), (img_get_alpha_info), (deinterlace_line),
(deinterlace_line_inplace), (deinterlace_bottom_field),
(deinterlace_bottom_field_inplace), (avpicture_deinterlace):
* gst/ffmpegcolorspace/imgconvert_template.h:
* gst/ffmpegcolorspace/mem.c: (av_malloc), (av_realloc), (av_free):
* gst/ffmpegcolorspace/mmx.h:
* gst/ffmpegcolorspace/utils.c: (av_mallocz), (av_strdup),
(av_fast_realloc), (av_mallocz_static), (av_free_static),
(av_freep), (avcodec_get_context_defaults),
(avcodec_alloc_context), (avcodec_init):
Sync back from gst-ffmpeg. Deprecates ffcolorspace. Adds pallette
handling plus update from ffmpeg CVS. Large clean-up.
This commit is contained in:
Ronald S. Bultje 2004-10-31 14:33:18 +00:00
parent df916dcfc9
commit 50edf4b216
15 changed files with 1594 additions and 2610 deletions

View file

@ -1,3 +1,54 @@
2004-10-31 Ronald S. Bultje <rbultje@ronald.bitfreak.net>
* gst/ffmpegcolorspace/Makefile.am:
* gst/ffmpegcolorspace/avcodec.h:
* gst/ffmpegcolorspace/common.h:
* gst/ffmpegcolorspace/dsputil.c: (dsputil_static_init):
* gst/ffmpegcolorspace/dsputil.h:
* gst/ffmpegcolorspace/gstffmpeg.c: (plugin_init):
* gst/ffmpegcolorspace/gstffmpegcodecmap.c:
(gst_ffmpeg_get_palette), (gst_ffmpeg_set_palette),
(gst_ffmpeg_pixfmt_to_caps), (gst_ffmpeg_smpfmt_to_caps),
(gst_ffmpegcsp_codectype_to_caps), (gst_ffmpeg_caps_to_smpfmt),
(gst_ffmpeg_caps_to_pixfmt), (gst_ffmpegcsp_caps_with_codectype),
(gst_ffmpegcsp_avpicture_fill):
* gst/ffmpegcolorspace/gstffmpegcodecmap.h:
* gst/ffmpegcolorspace/gstffmpegcolorspace.c:
(gst_ffmpegcsp_caps_remove_format_info), (gst_ffmpegcsp_getcaps),
(gst_ffmpegcsp_pad_link), (gst_ffmpegcsp_get_type),
(gst_ffmpegcsp_base_init), (gst_ffmpegcsp_class_init),
(gst_ffmpegcsp_init), (gst_ffmpegcsp_chain),
(gst_ffmpegcsp_change_state), (gst_ffmpegcsp_set_property),
(gst_ffmpegcsp_get_property), (gst_ffmpegcolorspace_register):
* gst/ffmpegcolorspace/imgconvert.c:
(avcodec_get_chroma_sub_sample), (avcodec_get_pix_fmt_name),
(avcodec_get_pix_fmt), (avpicture_fill), (avpicture_layout),
(avpicture_get_size), (avcodec_get_pix_fmt_loss),
(avg_bits_per_pixel), (avcodec_find_best_pix_fmt1),
(avcodec_find_best_pix_fmt), (img_copy_plane), (img_copy),
(yuv422_to_yuv420p), (uyvy422_to_yuv420p), (uyvy422_to_yuv422p),
(yuv422_to_yuv422p), (yuv422p_to_yuv422), (yuv422p_to_uyvy422),
(uyvy411_to_yuv411p), (yuv420p_to_yuv422), (C_JPEG_TO_CCIR),
(img_convert_init), (img_apply_table), (shrink41), (shrink21),
(shrink12), (shrink22), (shrink44), (grow21_line), (grow41_line),
(grow21), (grow22), (grow41), (grow44), (conv411),
(gif_clut_index), (build_rgb_palette), (bitcopy_n), (mono_to_gray),
(monowhite_to_gray), (monoblack_to_gray), (gray_to_mono),
(gray_to_monowhite), (gray_to_monoblack), (avpicture_alloc),
(avpicture_free), (is_yuv_planar), (img_convert),
(get_alpha_info_pal8), (img_get_alpha_info), (deinterlace_line),
(deinterlace_line_inplace), (deinterlace_bottom_field),
(deinterlace_bottom_field_inplace), (avpicture_deinterlace):
* gst/ffmpegcolorspace/imgconvert_template.h:
* gst/ffmpegcolorspace/mem.c: (av_malloc), (av_realloc), (av_free):
* gst/ffmpegcolorspace/mmx.h:
* gst/ffmpegcolorspace/utils.c: (av_mallocz), (av_strdup),
(av_fast_realloc), (av_mallocz_static), (av_free_static),
(av_freep), (avcodec_get_context_defaults),
(avcodec_alloc_context), (avcodec_init):
Sync back from gst-ffmpeg. Deprecates ffcolorspace. Adds pallette
handling plus update from ffmpeg CVS. Large clean-up.
2004-10-31 Ronald S. Bultje <rbultje@ronald.bitfreak.net>
* gst/playback/Makefile.am:

View file

@ -15,7 +15,5 @@ libgstffmpegcolorspace_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
noinst_HEADERS = \
gstffmpegcodecmap.h \
imgconvert_template.h \
common.h \
dsputil.h \
mmx.h \
avcodec.h

View file

@ -11,12 +11,13 @@
extern "C" {
#endif
#include "common.h"
#include <stdint.h>
#include <sys/types.h> /* size_t */
#define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4707
#define FFMPEG_VERSION_INT 0x000409
#define FFMPEG_VERSION "0.4.9-pre1"
#define LIBAVCODEC_BUILD 4728
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION
@ -25,14 +26,11 @@ extern "C" {
#define AV_TOSTRING(s) #s
#define LIBAVCODEC_IDENT "FFmpeg" LIBAVCODEC_VERSION "b" AV_STRINGIFY(LIBAVCODEC_BUILD)
#define AV_NOPTS_VALUE int64_t_C(0x8000000000000000)
#define AV_TIME_BASE 1000000
enum CodecType {
CODEC_TYPE_UNKNOWN = -1,
CODEC_TYPE_VIDEO,
CODEC_TYPE_AUDIO,
CODEC_TYPE_DATA
CODEC_TYPE_DATA,
};
/**
@ -55,7 +53,7 @@ enum CodecType {
*/
enum PixelFormat {
PIX_FMT_YUV420P, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
PIX_FMT_YUV422,
PIX_FMT_YUV422, ///< Packed pixel, Y0 Cb Y1 Cr
PIX_FMT_RGB24, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
PIX_FMT_BGR24, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
PIX_FMT_YUV422P, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
@ -74,9 +72,74 @@ enum PixelFormat {
PIX_FMT_YUVJ444P, ///< Planar YUV 4:4:4 full scale (jpeg)
PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing(xvmc_render.h)
PIX_FMT_XVMC_MPEG2_IDCT,
PIX_FMT_NB
PIX_FMT_UYVY422, ///< Packed pixel, Cb Y0 Cr Y1
PIX_FMT_UYVY411, ///< Packed pixel, Cb Y0 Y1 Cr Y2 Y3
PIX_FMT_NB,
};
/* currently unused, may be used if 24/32 bits samples ever supported */
enum SampleFormat {
SAMPLE_FMT_S16 = 0, ///< signed 16 bits
};
#define DEFAULT_FRAME_RATE_BASE 1001000
/**
* main external api structure.
*/
typedef struct AVCodecContext {
/* video only */
/**
* frames per sec multiplied by frame_rate_base.
* for variable fps this is the precission, so if the timestamps
* can be specified in msec precssion then this is 1000*frame_rate_base
* - encoding: MUST be set by user
* - decoding: set by lavc. 0 or the frame_rate if available
*/
int frame_rate;
/**
* picture width / height.
* - encoding: MUST be set by user.
* - decoding: set by lavc.
* Note, for compatibility its possible to set this instead of
* coded_width/height before decoding
*/
int width, height;
/**
* pixel format, see PIX_FMT_xxx.
* - encoding: FIXME: used by ffmpeg to decide whether an pix_fmt
* conversion is in order. This only works for
* codecs with one supported pix_fmt, we should
* do something for a generic case as well.
* - decoding: set by lavc.
*/
enum PixelFormat pix_fmt;
/* audio only */
int sample_rate; ///< samples per sec
int channels;
int sample_fmt; ///< sample format, currenly unused
/**
* Palette control structure
* - encoding: ??? (no palette-enabled encoder yet)
* - decoding: set by user.
*/
struct AVPaletteControl *palctrl;
/**
* frame_rate_base.
* for variable fps this is 1
* - encoding: set by user.
* - decoding: set by lavc.
* @todo move this after frame_rate
*/
int frame_rate_base;
} AVCodecContext;
/**
* four components are given, that's all.
* the last component is alpha
@ -87,26 +150,31 @@ typedef struct AVPicture {
} AVPicture;
/**
* Allocate memory for a picture. Call avpicture_free to free it.
*
* @param picture the picture to be filled in.
* @param pix_fmt the format of the picture.
* @param width the width of the picture.
* @param height the height of the picture.
* @return 0 if successful, -1 if not.
* AVPaletteControl
* This structure defines a method for communicating palette changes
* between and demuxer and a decoder.
*/
int avpicture_alloc(AVPicture *picture, int pix_fmt, int width, int height);
#define AVPALETTE_SIZE 1024
#define AVPALETTE_COUNT 256
typedef struct AVPaletteControl {
/* Free a picture previously allocated by avpicture_alloc. */
void avpicture_free(AVPicture *picture);
/* demuxer sets this to 1 to indicate the palette has changed;
* decoder resets to 0 */
int palette_changed;
/* 4-byte ARGB palette entries, stored in native byte order; note that
* the individual palette components should be on a 8-bit scale; if
* the palette data comes from a IBM VGA native format, the component
* data is probably 6 bits in size and needs to be scaled */
unsigned int palette[AVPALETTE_COUNT];
} AVPaletteControl;
int avpicture_fill(AVPicture *picture, uint8_t *ptr,
int pix_fmt, int width, int height);
int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
unsigned char *dest, int dest_size);
int avpicture_get_size(int pix_fmt, int width, int height);
void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift);
const char *avcodec_get_pix_fmt_name(int pix_fmt);
void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
enum PixelFormat avcodec_get_pix_fmt(const char* name);
#define FF_LOSS_RESOLUTION 0x0001 /* loss due to resolution change */
@ -131,12 +199,11 @@ int img_convert(AVPicture *dst, int dst_pix_fmt,
const AVPicture *src, int pix_fmt,
int width, int height);
/* deinterlace a picture */
int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
int pix_fmt, int width, int height);
void avcodec_init(void);
void avcodec_get_context_defaults(AVCodecContext *s);
AVCodecContext *avcodec_alloc_context(void);
/* memory */
void *av_malloc(unsigned int size);
void *av_mallocz(unsigned int size);
@ -148,14 +215,21 @@ void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size);
/* for static data only */
/* call av_free_static to release all staticaly allocated tables */
void av_free_static(void);
void *__av_mallocz_static(void** location, unsigned int size);
#define av_mallocz_static(p, s) __av_mallocz_static((void **)(p), s)
void *av_mallocz_static(unsigned int size);
/* add by bero : in adx.c */
int is_adx(const unsigned char *buf,size_t bufsize);
void img_copy(AVPicture *dst, const AVPicture *src,
int pix_fmt, int width, int height);
/* endian macros */
#if !defined(BE_16) || !defined(BE_32) || !defined(LE_16) || !defined(LE_32)
#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \
(((uint8_t*)(x))[1] << 16) | \
(((uint8_t*)(x))[2] << 8) | \
((uint8_t*)(x))[3])
#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0])
#define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \
(((uint8_t*)(x))[2] << 16) | \
(((uint8_t*)(x))[1] << 8) | \
((uint8_t*)(x))[0])
#endif
#ifdef __cplusplus
}

File diff suppressed because it is too large Load diff

View file

@ -25,14 +25,10 @@
* DSP utils
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
#include "dsputil.h"
uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
uint8_t cropTbl[256 + 2 * MAX_NEG_CROP] = { 0, };
/* init static data */
void

View file

@ -28,549 +28,12 @@
#ifndef DSPUTIL_H
#define DSPUTIL_H
#include "common.h"
#include "avcodec.h"
#include <math.h>
//#define DEBUG
/* dct code */
typedef short DCTELEM;
void fdct_ifast (DCTELEM *data);
void fdct_ifast248 (DCTELEM *data);
void ff_jpeg_fdct_islow (DCTELEM *data);
void ff_fdct248_islow (DCTELEM *data);
void j_rev_dct (DCTELEM *data);
void ff_fdct_mmx(DCTELEM *block);
void ff_fdct_mmx2(DCTELEM *block);
void ff_fdct_sse2(DCTELEM *block);
/* encoding scans */
extern const uint8_t ff_alternate_horizontal_scan[64];
extern const uint8_t ff_alternate_vertical_scan[64];
extern const uint8_t ff_zigzag_direct[64];
extern const uint8_t ff_zigzag248_direct[64];
/* pixel operations */
#define MAX_NEG_CROP 384
#define MAX_NEG_CROP 1024
/* temporary */
extern uint32_t squareTbl[512];
extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
/* VP3 DSP functions */
void vp3_dsp_init_c(void);
void vp3_idct_put_c(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_idct_add_c(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_dsp_init_mmx(void);
void vp3_idct_put_mmx(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
void vp3_idct_add_mmx(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
/* minimum alignment rules ;)
if u notice errors in the align stuff, need more alignment for some asm code for some cpu
or need to use a function with less aligned data then send a mail to the ffmpeg-dev list, ...
!warning these alignments might not match reallity, (missing attribute((align)) stuff somewhere possible)
i (michael) didnt check them, these are just the alignents which i think could be reached easily ...
!future video codecs might need functions with less strict alignment
*/
/*
void get_pixels_c(DCTELEM *block, const uint8_t *pixels, int line_size);
void diff_pixels_c(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride);
void put_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
void add_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size);
void clear_blocks_c(DCTELEM *blocks);
*/
/* add and put pixel (decoding) */
// blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16
//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4
typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h);
typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h);
typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
#define DEF_OLD_QPEL(name)\
void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\
void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);
DEF_OLD_QPEL(qpel16_mc11_old_c)
DEF_OLD_QPEL(qpel16_mc31_old_c)
DEF_OLD_QPEL(qpel16_mc12_old_c)
DEF_OLD_QPEL(qpel16_mc32_old_c)
DEF_OLD_QPEL(qpel16_mc13_old_c)
DEF_OLD_QPEL(qpel16_mc33_old_c)
DEF_OLD_QPEL(qpel8_mc11_old_c)
DEF_OLD_QPEL(qpel8_mc31_old_c)
DEF_OLD_QPEL(qpel8_mc12_old_c)
DEF_OLD_QPEL(qpel8_mc32_old_c)
DEF_OLD_QPEL(qpel8_mc13_old_c)
DEF_OLD_QPEL(qpel8_mc33_old_c)
#define CALL_2X_PIXELS(a, b, n)\
static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
b(block , pixels , line_size, h);\
b(block+n, pixels+n, line_size, h);\
}
/* motion estimation */
// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2
// allthough currently h<4 is not used as functions with width <8 are not used and neither implemented
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
/**
* DSPContext.
*/
typedef struct DSPContext {
/* pixel ops : interface with DCT */
void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size);
void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride);
void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size);
/**
* translational global motion compensation.
*/
void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder);
/**
* global motion compensation.
*/
void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
int (*pix_sum)(uint8_t * pix, int line_size);
int (*pix_norm1)(uint8_t * pix, int line_size);
// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
me_cmp_func sad[5]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[5];
me_cmp_func hadamard8_diff[5];
me_cmp_func dct_sad[5];
me_cmp_func quant_psnr[5];
me_cmp_func bit[5];
me_cmp_func rd[5];
me_cmp_func vsad[5];
me_cmp_func vsse[5];
me_cmp_func me_pre_cmp[5];
me_cmp_func me_cmp[5];
me_cmp_func me_sub_cmp[5];
me_cmp_func mb_cmp[5];
me_cmp_func ildct_cmp[5]; //only width 16 used
/**
* Halfpel motion compensation with rounding (a+b+1)>>1.
* this is an array[4][4] of motion compensation funcions for 4
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func put_pixels_tab[4][4];
/**
* Halfpel motion compensation with rounding (a+b+1)>>1.
* This is an array[4][4] of motion compensation functions for 4
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination into which the result is averaged (a+b+1)>>1
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func avg_pixels_tab[4][4];
/**
* Halfpel motion compensation with no rounding (a+b)>>1.
* this is an array[2][4] of motion compensation funcions for 2
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func put_no_rnd_pixels_tab[2][4];
/**
* Halfpel motion compensation with no rounding (a+b)>>1.
* this is an array[2][4] of motion compensation funcions for 2
* horizontal blocksizes (8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination into which the result is averaged (a+b)>>1
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
op_pixels_func avg_no_rnd_pixels_tab[2][4];
void (*put_no_rnd_pixels_l2[2])(uint8_t *block/*align width (8 or 16)*/, const uint8_t *a/*align 1*/, const uint8_t *b/*align 1*/, int line_size, int h);
/**
* Thirdpel motion compensation with rounding (a+b+1)>>1.
* this is an array[12] of motion compensation funcions for the 9 thirdpel positions<br>
* *pixels_tab[ xthirdpel + 4*ythirdpel ]
* @param block destination where the result is stored
* @param pixels source
* @param line_size number of bytes in a horizontal line of block
* @param h height
*/
tpel_mc_func put_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
tpel_mc_func avg_tpel_pixels_tab[11]; //FIXME individual func ptr per width?
qpel_mc_func put_qpel_pixels_tab[2][16];
qpel_mc_func avg_qpel_pixels_tab[2][16];
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
qpel_mc_func put_mspel_pixels_tab[8];
/**
* h264 Chram MC
*/
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
qpel_mc_func put_h264_qpel_pixels_tab[3][16];
qpel_mc_func avg_h264_qpel_pixels_tab[3][16];
me_cmp_func pix_abs[2][4];
/* huffyuv specific */
void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w);
/**
* subtract huffyuv's variant of median prediction
* note, this might read from src1[-1], src2[-1]
*/
void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top);
void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w);
void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
/* (I)DCT */
void (*fdct)(DCTELEM *block/* align 16*/);
void (*fdct248)(DCTELEM *block/* align 16*/);
/* IDCT really*/
void (*idct)(DCTELEM *block/* align 16*/);
/**
* block -> idct -> clip to unsigned 8 bit -> dest.
* (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...)
* @param line_size size in bytes of a horizotal line of dest
*/
void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
/**
* block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
* @param line_size size in bytes of a horizotal line of dest
*/
void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
/**
* idct input permutation.
* several optimized IDCTs need a permutated input (relative to the normal order of the reference
* IDCT)
* this permutation must be performed before the idct_put/add, note, normally this can be merged
* with the zigzag/alternate scan<br>
* an example to avoid confusion:
* - (->decode coeffs -> zigzag reorder -> dequant -> reference idct ->...)
* - (x -> referece dct -> reference idct -> x)
* - (x -> referece dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x)
* - (->decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant -> simple_idct_mmx ->...)
*/
uint8_t idct_permutation[64];
int idct_permutation_type;
#define FF_NO_IDCT_PERM 1
#define FF_LIBMPEG2_IDCT_PERM 2
#define FF_SIMPLE_IDCT_PERM 3
#define FF_TRANSPOSE_IDCT_PERM 4
int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
#define BASIS_SHIFT 16
#define RECON_SHIFT 6
/**
* This function handles any initialization for the VP3 DSP functions.
*/
void (*vp3_dsp_init)(void);
/**
* This function is responsible for taking a block of zigzag'd,
* quantized DCT coefficients, reconstructing the original block of
* samples, and placing it into the output.
* @param input_data 64 zigzag'd, quantized DCT coefficients
* @param dequant_matrix 64 zigzag'd quantizer coefficients
* @param coeff_count index of the last coefficient
* @param dest the final output location where the transformed samples
* are to be placed
* @param stride the width in 8-bit samples of a line on this plane
*/
void (*vp3_idct_put)(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
/**
* This function is responsible for taking a block of zigzag'd,
* quantized DCT coefficients, reconstructing the original block of
* samples, and adding the transformed samples to an existing block of
* samples in the output.
* @param input_data 64 zigzag'd, quantized DCT coefficients
* @param dequant_matrix 64 zigzag'd quantizer coefficients
* @param coeff_count index of the last coefficient
* @param dest the final output location where the transformed samples
* are to be placed
* @param stride the width in 8-bit samples of a line on this plane
*/
void (*vp3_idct_add)(int16_t *input_data, int16_t *dequant_matrix,
int coeff_count, uint8_t *dest, int stride);
} DSPContext;
void dsputil_static_init(void);
//void dsputil_init(DSPContext* p, AVCodecContext *avctx);
/**
* permute block according to permuatation.
* @param last last non zero element in scantable order
*/
void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
#define BYTE_VEC32(c) ((c)*0x01010101UL)
static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
{
return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
}
static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
{
return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
}
/**
* Empty mmx state.
* this must be called between any dsp function and float/double code.
* for example sin(); dsp->idct_put(); emms_c(); cos()
*/
#define emms_c()
/* should be defined by architectures supporting
one or more MultiMedia extension */
int mm_support(void);
#if defined(HAVE_MMX)
#undef emms_c
#define MM_MMX 0x0001 /* standard MMX */
#define MM_3DNOW 0x0004 /* AMD 3DNOW */
#define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */
#define MM_SSE 0x0008 /* SSE functions */
#define MM_SSE2 0x0010 /* PIV SSE2 functions */
extern int mm_flags;
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
static inline void emms(void)
{
__asm __volatile ("emms;":::"memory");
}
#define emms_c() \
{\
if (mm_flags & MM_MMX)\
emms();\
}
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_ARMV4L)
/* This is to use 4 bytes read to the IDCT pointers for some 'zero'
line ptimizations */
#define __align8 __attribute__ ((aligned (4)))
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx);
#elif defined(HAVE_MLIB)
/* SPARC/VIS IDCT needs 8-byte aligned DCT blocks */
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_ALPHA)
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_POWERPC)
#define MM_ALTIVEC 0x0001 /* standard AltiVec */
extern int mm_flags;
#if defined(HAVE_ALTIVEC) && !defined(CONFIG_DARWIN)
#define pixel altivec_pixel
#include <altivec.h>
#undef pixel
#endif
#define __align8 __attribute__ ((aligned (16)))
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
#elif defined(HAVE_MMI)
#define __align8 __attribute__ ((aligned (16)))
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
#elif defined(ARCH_SH4)
#define __align8 __attribute__ ((aligned (8)))
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
#else
#define __align8
#endif
#ifdef __GNUC__
struct unaligned_64 { uint64_t l; } __attribute__((packed));
struct unaligned_32 { uint32_t l; } __attribute__((packed));
struct unaligned_16 { uint16_t l; } __attribute__((packed));
#define LD16(a) (((const struct unaligned_16 *) (a))->l)
#define LD32(a) (((const struct unaligned_32 *) (a))->l)
#define LD64(a) (((const struct unaligned_64 *) (a))->l)
#define ST32(a, b) (((struct unaligned_32 *) (a))->l) = (b)
#else /* __GNUC__ */
#define LD16(a) (*((uint16_t*)(a)))
#define LD32(a) (*((uint32_t*)(a)))
#define LD64(a) (*((uint64_t*)(a)))
#define ST32(a, b) *((uint32_t*)(a)) = (b)
#endif /* !__GNUC__ */
/* PSNR */
//void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
// int orig_linesize[3], int coded_linesize,
// AVCodecContext *avctx);
/* FFT computation */
/* NOTE: soon integer code will be added, so you must use the
FFTSample type */
typedef float FFTSample;
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;
typedef struct FFTContext {
int nbits;
int inverse;
uint16_t *revtab;
FFTComplex *exptab;
FFTComplex *exptab1; /* only used by SSE code */
void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
} FFTContext;
int fft_init(FFTContext *s, int nbits, int inverse);
void fft_permute(FFTContext *s, FFTComplex *z);
void fft_calc_c(FFTContext *s, FFTComplex *z);
void fft_calc_sse(FFTContext *s, FFTComplex *z);
void fft_calc_altivec(FFTContext *s, FFTComplex *z);
static inline void fft_calc(FFTContext *s, FFTComplex *z)
{
s->fft_calc(s, z);
}
void fft_end(FFTContext *s);
/* MDCT computation */
typedef struct MDCTContext {
int n; /* size of MDCT (i.e. number of input data * 2) */
int nbits; /* n = 2^nbits */
/* pre/post rotation tables */
FFTSample *tcos;
FFTSample *tsin;
FFTContext fft;
} MDCTContext;
int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
void ff_imdct_calc(MDCTContext *s, FFTSample *output,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_calc(MDCTContext *s, FFTSample *out,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_end(MDCTContext *s);
#define WARPER8_16(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
return name8(s, dst , src , stride, h)\
+name8(s, dst+8 , src+8 , stride, h);\
}
#define WARPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
#ifndef HAVE_LRINTF
/* XXX: add ISOC specific test to avoid specific BSD testing. */
/* better than nothing implementation. */
/* btw, rintf() is existing on fbsd too -- alex */
static inline long int lrintf(float x)
{
#ifdef CONFIG_WIN32
/* XXX: incorrect, but make it compile */
return (int)(x);
#else
return (int)(rint(x));
#endif
}
#endif
#endif

View file

@ -32,6 +32,7 @@ extern gboolean gst_ffmpegcolorspace_register (GstPlugin * plugin);
static gboolean
plugin_init (GstPlugin * plugin)
{
avcodec_init ();
gst_ffmpegcolorspace_register (plugin);
/* Now we can return the pointer to the newly created Plugin object. */

View file

@ -1,7 +1,7 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
* This file:
* Copyright (c) 2002-2003 Ronald Bultje <rbultje@ronald.bitfreak.net>
* Copyright (c) 2002-2004 Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@ -28,29 +28,79 @@
#include "gstffmpegcodecmap.h"
/*
* Read a palette from a caps.
*/
static void
gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
{
GstStructure *str = gst_caps_get_structure (caps, 0);
const GValue *palette_v;
const GstBuffer *palette;
/* do we have a palette? */
if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
palette = g_value_get_boxed (palette_v);
if (GST_BUFFER_SIZE (palette) >= 256 * 4) {
if (context->palctrl)
av_free (context->palctrl);
context->palctrl = av_malloc (sizeof (AVPaletteControl));
context->palctrl->palette_changed = 1;
memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
AVPALETTE_SIZE);
}
}
}
static void
gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
{
if (context->palctrl) {
GstBuffer *palette = gst_buffer_new_and_alloc (256 * 4);
memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
AVPALETTE_SIZE);
gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
}
}
/* this macro makes a caps width fixed or unfixed width/height
* properties depending on whether we've got a context.
*
* See below for why we use this.
*
* We should actually do this stuff at the end, like in riff-media.c,
* but I'm too lazy today. Maybe later.
*/
#ifdef G_HAVE_ISO_VARARGS
#define GST_FF_VID_CAPS_NEW(mimetype, ...) \
(context != NULL) ? \
gst_caps_new_simple (mimetype, \
"width", G_TYPE_INT, context->width, \
"height", G_TYPE_INT, context->height, \
"framerate", G_TYPE_DOUBLE, 1. * context->frame_rate / \
context->frame_rate_base, \
__VA_ARGS__, NULL) \
: \
gst_caps_new_simple (mimetype, \
"width", GST_TYPE_INT_RANGE, 16, 4096, \
"height", GST_TYPE_INT_RANGE, 16, 4096, \
"framerate", GST_TYPE_DOUBLE_RANGE, 0., G_MAXDOUBLE, \
__VA_ARGS__)
#elif defined(G_HAVE_GNUC_VARARGS)
#define GST_FF_VID_CAPS_NEW(mimetype, props...) \
gst_caps_new_simple (mimetype, \
"width", GST_TYPE_INT_RANGE, 16, 4096, \
"height", GST_TYPE_INT_RANGE, 16, 4096, \
"framerate", GST_TYPE_DOUBLE_RANGE, 0., G_MAXDOUBLE, \
##props, NULL)
#else
#error FIXME
#endif
__VA_ARGS__, NULL)
/* same for audio - now with channels/sample rate
*/
#define GST_FF_AUD_CAPS_NEW(mimetype, ...) \
(context != NULL) ? \
gst_caps_new_simple (mimetype, \
"rate", G_TYPE_INT, context->sample_rate, \
"channels", G_TYPE_INT, context->channels, \
__VA_ARGS__, NULL) \
: \
gst_caps_new_simple (mimetype, \
__VA_ARGS__, NULL)
/* Convert a FFMPEG Pixel Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
@ -60,7 +110,7 @@
*/
static GstCaps *
gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt)
gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context)
{
GstCaps *caps = NULL;
@ -130,19 +180,33 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt)
g_mask = 0x03e0;
b_mask = 0x001f;
break;
case PIX_FMT_PAL8:
bpp = depth = 8;
endianness = G_BYTE_ORDER;
break;
default:
/* give up ... */
break;
}
if (bpp != 0) {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
"bpp", G_TYPE_INT, bpp,
"depth", G_TYPE_INT, depth,
"red_mask", G_TYPE_INT, r_mask,
"green_mask", G_TYPE_INT, g_mask,
"blue_mask", G_TYPE_INT, b_mask,
"endianness", G_TYPE_INT, endianness, NULL);
if (r_mask != 0) {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
"bpp", G_TYPE_INT, bpp,
"depth", G_TYPE_INT, depth,
"red_mask", G_TYPE_INT, r_mask,
"green_mask", G_TYPE_INT, g_mask,
"blue_mask", G_TYPE_INT, b_mask,
"endianness", G_TYPE_INT, endianness, NULL);
} else {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
"bpp", G_TYPE_INT, bpp,
"depth", G_TYPE_INT, depth,
"endianness", G_TYPE_INT, endianness, NULL);
if (context) {
gst_ffmpeg_set_palette (caps, context);
}
}
} else if (fmt) {
caps = GST_FF_VID_CAPS_NEW ("video/x-raw-yuv",
"format", GST_TYPE_FOURCC, fmt, NULL);
@ -160,6 +224,52 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt)
return caps;
}
/* Convert a FFMPEG Sample Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* See below for usefullness
*/
static GstCaps *
gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
AVCodecContext * context)
{
GstCaps *caps = NULL;
int bpp = 0;
gboolean signedness = FALSE;
switch (sample_fmt) {
case SAMPLE_FMT_S16:
signedness = TRUE;
bpp = 16;
break;
default:
/* .. */
break;
}
if (bpp) {
caps = GST_FF_AUD_CAPS_NEW ("audio/x-raw-int",
"signed", G_TYPE_BOOLEAN, signedness,
"endianness", G_TYPE_INT, G_BYTE_ORDER,
"width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
}
if (caps != NULL) {
char *str = gst_caps_to_string (caps);
GST_DEBUG ("caps for sample_fmt=%d: %s", sample_fmt, str);
g_free (str);
} else {
GST_WARNING ("No caps found for sample_fmt=%d", sample_fmt);
}
return caps;
}
/* Convert a FFMPEG codec Type and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
@ -168,60 +278,141 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt)
*/
GstCaps *
gst_ffmpeg_pix_fmt_to_caps (void)
gst_ffmpegcsp_codectype_to_caps (enum CodecType codec_type,
AVCodecContext * context)
{
GstCaps *caps, *temp;
enum PixelFormat i;
GstCaps *caps;
caps = gst_caps_new_empty ();
for (i = 0; i < PIX_FMT_NB; i++) {
temp = gst_ffmpeg_pixfmt_to_caps (i);
if (temp != NULL) {
gst_caps_append (caps, temp);
}
switch (codec_type) {
case CODEC_TYPE_VIDEO:
if (context) {
caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt,
context->width == -1 ? NULL : context);
} else {
GstCaps *temp;
enum PixelFormat i;
caps = gst_caps_new_empty ();
for (i = 0; i < PIX_FMT_NB; i++) {
temp = gst_ffmpeg_pixfmt_to_caps (i, NULL);
if (temp != NULL) {
gst_caps_append (caps, temp);
}
}
}
break;
case CODEC_TYPE_AUDIO:
if (context) {
caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context);
} else {
GstCaps *temp;
enum SampleFormat i;
caps = gst_caps_new_empty ();
for (i = 0; i <= SAMPLE_FMT_S16; i++) {
temp = gst_ffmpeg_smpfmt_to_caps (i, NULL);
if (temp != NULL) {
gst_caps_append (caps, temp);
}
}
}
break;
default:
/* .. */
caps = NULL;
break;
}
return caps;
}
/* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
* and other audio properties in a AVCodecContext.
*
* For usefullness, see below
*/
static void
gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
AVCodecContext * context, gboolean raw)
{
GstStructure *structure;
gint depth = 0, width = 0, endianness = 0;
gboolean signedness = FALSE;
g_return_if_fail (gst_caps_get_size (caps) == 1);
structure = gst_caps_get_structure (caps, 0);
gst_structure_get_int (structure, "channels", &context->channels);
gst_structure_get_int (structure, "rate", &context->sample_rate);
if (!raw)
return;
if (gst_structure_get_int (structure, "width", &width) &&
gst_structure_get_int (structure, "depth", &depth) &&
gst_structure_get_int (structure, "signed", &signedness) &&
gst_structure_get_int (structure, "endianness", &endianness)) {
if (width == 16 && depth == 16 &&
endianness == G_BYTE_ORDER && signedness == TRUE) {
context->sample_fmt = SAMPLE_FMT_S16;
}
}
}
/* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
* and other video properties in a AVCodecContext.
*
* For usefullness, see below
*/
enum PixelFormat
gst_ffmpeg_caps_to_pix_fmt (const GstCaps * caps)
static void
gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
AVCodecContext * context, gboolean raw)
{
GstStructure *structure;
enum PixelFormat pix_fmt = PIX_FMT_NB;
gdouble fps;
g_return_val_if_fail (gst_caps_get_size (caps) == 1, PIX_FMT_NB);
g_return_if_fail (gst_caps_get_size (caps) == 1);
structure = gst_caps_get_structure (caps, 0);
gst_structure_get_int (structure, "width", &context->width);
gst_structure_get_int (structure, "height", &context->height);
if (gst_structure_get_double (structure, "framerate", &fps)) {
context->frame_rate = fps * DEFAULT_FRAME_RATE_BASE;
context->frame_rate_base = DEFAULT_FRAME_RATE_BASE;
}
if (!raw)
return;
if (strcmp (gst_structure_get_name (structure), "video/x-raw-yuv") == 0) {
guint32 fourcc;
if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
switch (fourcc) {
case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
pix_fmt = PIX_FMT_YUV422;
context->pix_fmt = PIX_FMT_YUV422;
break;
case GST_MAKE_FOURCC ('I', '4', '2', '0'):
pix_fmt = PIX_FMT_YUV420P;
context->pix_fmt = PIX_FMT_YUV420P;
break;
case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
pix_fmt = PIX_FMT_YUV411P;
context->pix_fmt = PIX_FMT_YUV411P;
break;
case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
pix_fmt = PIX_FMT_YUV422P;
context->pix_fmt = PIX_FMT_YUV422P;
break;
case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
pix_fmt = PIX_FMT_YUV410P;
context->pix_fmt = PIX_FMT_YUV410P;
break;
#if 0
case FIXME:
pix_fmt = PIX_FMT_YUV444P;
context->pix_fmt = PIX_FMT_YUV444P;
break;
#endif
}
@ -231,37 +422,351 @@ gst_ffmpeg_caps_to_pix_fmt (const GstCaps * caps)
gint bpp = 0, rmask = 0, endianness = 0;
if (gst_structure_get_int (structure, "bpp", &bpp) &&
gst_structure_get_int (structure, "endianness", &endianness) &&
gst_structure_get_int (structure, "red_mask", &rmask)) {
switch (bpp) {
case 32:
gst_structure_get_int (structure, "endianness", &endianness)) {
if (gst_structure_get_int (structure, "red_mask", &rmask)) {
switch (bpp) {
case 32:
#if (G_BYTE_ORDER == G_BIG_ENDIAN)
if (rmask == 0x00ff0000)
if (rmask == 0x00ff0000)
#else
if (rmask == 0x0000ff00)
if (rmask == 0x0000ff00)
#endif
pix_fmt = PIX_FMT_RGBA32;
break;
case 24:
if (rmask == 0x0000FF)
pix_fmt = PIX_FMT_BGR24;
else
pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if (endianness == G_BYTE_ORDER)
pix_fmt = PIX_FMT_RGB565;
break;
case 15:
if (endianness == G_BYTE_ORDER)
pix_fmt = PIX_FMT_RGB555;
break;
default:
/* nothing */
break;
context->pix_fmt = PIX_FMT_RGBA32;
break;
case 24:
if (rmask == 0x0000FF)
context->pix_fmt = PIX_FMT_BGR24;
else
context->pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if (endianness == G_BYTE_ORDER)
context->pix_fmt = PIX_FMT_RGB565;
break;
case 15:
if (endianness == G_BYTE_ORDER)
context->pix_fmt = PIX_FMT_RGB555;
break;
default:
/* nothing */
break;
}
} else {
if (bpp == 8) {
context->pix_fmt = PIX_FMT_PAL8;
gst_ffmpeg_get_palette (caps, context);
}
}
}
}
return pix_fmt;
}
/* Convert a GstCaps and a FFMPEG codec Type to a
* AVCodecContext. If the context is ommitted, no fixed values
* for video/audio size will be included in the context
*
* CodecType is primarily meant for uncompressed data GstCaps!
*/
void
gst_ffmpegcsp_caps_with_codectype (enum CodecType type,
const GstCaps * caps, AVCodecContext * context)
{
if (context == NULL)
return;
switch (type) {
case CODEC_TYPE_VIDEO:
gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
break;
case CODEC_TYPE_AUDIO:
gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
break;
default:
/* unknown */
break;
}
}
/*
* Fill in pointers to memory in a AVPicture, where
* everything is aligned by 4 (as required by X).
* This is mostly a copy from imgconvert.c with some
* small changes.
*/
#define FF_COLOR_RGB 0 /* RGB color space */
#define FF_COLOR_GRAY 1 /* gray color space */
#define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
#define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
#define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
#define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
#define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
typedef struct PixFmtInfo
{
const char *name;
uint8_t nb_channels; /* number of channels (including alpha) */
uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
uint8_t is_alpha:1; /* true if alpha can be specified */
uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
uint8_t depth; /* bit depth of the color components */
} PixFmtInfo;
/* this table gives more information about formats */
static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
/* YUV formats */
[PIX_FMT_YUV420P] = {
.name = "yuv420p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 1,.y_chroma_shift = 1,
},
[PIX_FMT_YUV422P] = {
.name = "yuv422p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 1,.y_chroma_shift = 0,
},
[PIX_FMT_YUV444P] = {
.name = "yuv444p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
[PIX_FMT_YUV422] = {
.name = "yuv422",
.nb_channels = 1,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PACKED,
.depth = 8,
.x_chroma_shift = 1,.y_chroma_shift = 0,
},
[PIX_FMT_YUV410P] = {
.name = "yuv410p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 2,.y_chroma_shift = 2,
},
[PIX_FMT_YUV411P] = {
.name = "yuv411p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 2,.y_chroma_shift = 0,
},
/* JPEG YUV */
[PIX_FMT_YUVJ420P] = {
.name = "yuvj420p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV_JPEG,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 1,.y_chroma_shift = 1,
},
[PIX_FMT_YUVJ422P] = {
.name = "yuvj422p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV_JPEG,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 1,.y_chroma_shift = 0,
},
[PIX_FMT_YUVJ444P] = {
.name = "yuvj444p",
.nb_channels = 3,
.color_type = FF_COLOR_YUV_JPEG,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
/* RGB formats */
[PIX_FMT_RGB24] = {
.name = "rgb24",
.nb_channels = 3,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 8,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
[PIX_FMT_BGR24] = {
.name = "bgr24",
.nb_channels = 3,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 8,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
[PIX_FMT_RGBA32] = {
.name = "rgba32",
.nb_channels = 4,.is_alpha = 1,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 8,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
[PIX_FMT_RGB565] = {
.name = "rgb565",
.nb_channels = 3,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 5,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
[PIX_FMT_RGB555] = {
.name = "rgb555",
.nb_channels = 4,.is_alpha = 1,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 5,
.x_chroma_shift = 0,.y_chroma_shift = 0,
},
/* gray / mono formats */
[PIX_FMT_GRAY8] = {
.name = "gray",
.nb_channels = 1,
.color_type = FF_COLOR_GRAY,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 8,
},
[PIX_FMT_MONOWHITE] = {
.name = "monow",
.nb_channels = 1,
.color_type = FF_COLOR_GRAY,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 1,
},
[PIX_FMT_MONOBLACK] = {
.name = "monob",
.nb_channels = 1,
.color_type = FF_COLOR_GRAY,
.pixel_type = FF_PIXEL_PLANAR,
.depth = 1,
},
/* paletted formats */
[PIX_FMT_PAL8] = {
.name = "pal8",
.nb_channels = 4,.is_alpha = 1,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PALETTE,
.depth = 8,
},
};
#define ROUND_UP_4(x) (((x) + 3) & ~3)
int
gst_ffmpegcsp_avpicture_fill (AVPicture * picture,
uint8_t * ptr, enum PixelFormat pix_fmt, int width, int height)
{
int size, w2, h2, size2;
int stride, stride2;
PixFmtInfo *pinfo;
pinfo = &pix_fmt_info[pix_fmt];
stride = ROUND_UP_4 (width);
size = stride * height;
switch (pix_fmt) {
case PIX_FMT_YUV420P:
case PIX_FMT_YUV422P:
case PIX_FMT_YUV444P:
case PIX_FMT_YUV410P:
case PIX_FMT_YUV411P:
case PIX_FMT_YUVJ420P:
case PIX_FMT_YUVJ422P:
case PIX_FMT_YUVJ444P:
stride = ROUND_UP_4 (width);
size = stride * height;
w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
stride2 = ROUND_UP_4 (w2);
h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
size2 = stride2 * h2;
picture->data[0] = ptr;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size2;
picture->linesize[0] = stride;
picture->linesize[1] = stride2;
picture->linesize[2] = stride2;
return size + 2 * size2;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
stride = ROUND_UP_4 (width * 3);
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = stride;
return size;
case PIX_FMT_RGBA32:
stride = width * 4;
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = stride;
return size;
case PIX_FMT_RGB555:
case PIX_FMT_RGB565:
case PIX_FMT_YUV422:
stride = ROUND_UP_4 (width * 2);
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = stride;
return size;
case PIX_FMT_GRAY8:
stride = ROUND_UP_4 (width);
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = stride;
return size;
case PIX_FMT_MONOWHITE:
case PIX_FMT_MONOBLACK:
stride = ROUND_UP_4 ((width + 7) >> 3);
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = stride;
return size;
case PIX_FMT_PAL8:
/* already forced to be with stride, so same result as other function */
stride = ROUND_UP_4 (width);
size = stride * height;
picture->data[0] = ptr;
picture->data[1] = ptr + size; /* palette is stored here as 256 32 bit words */
picture->data[2] = NULL;
picture->linesize[0] = stride;
picture->linesize[1] = 4;
return size + 256 * 4;
default:
picture->data[0] = NULL;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->data[3] = NULL;
return -1;
}
return 0;
}

View file

@ -23,14 +23,35 @@
#include <avcodec.h>
#include <gst/gst.h>
/* Template caps */
/*
* _codectype_to_caps () gets the GstCaps that belongs to
* a certain CodecType for a pad with uncompressed data.
*/
GstCaps *
gst_ffmpeg_pix_fmt_to_caps (void);
gst_ffmpegcsp_codectype_to_caps (enum CodecType codec_type,
AVCodecContext *context);
/* Disect a GstCaps */
/*
* caps_with_codectype () transforms a GstCaps that belongs to
* a pad for uncompressed data to a filled-in context.
*/
enum PixelFormat
gst_ffmpeg_caps_to_pix_fmt (const GstCaps *caps);
void
gst_ffmpegcsp_caps_with_codectype (enum CodecType type,
const GstCaps *caps,
AVCodecContext *context);
/*
* Fill in pointers in an AVPicture, aligned by 4 (required by X).
*/
int
gst_ffmpegcsp_avpicture_fill (AVPicture * picture,
uint8_t * ptr,
enum PixelFormat pix_fmt,
int width,
int height);
#endif /* __GST_FFMPEG_CODECMAP_H__ */

View file

@ -31,21 +31,21 @@
GST_DEBUG_CATEGORY (ffmpegcolorspace_debug);
#define GST_CAT_DEFAULT ffmpegcolorspace_debug
#define GST_TYPE_FFMPEGCOLORSPACE \
(gst_ffmpegcolorspace_get_type())
#define GST_FFMPEGCOLORSPACE(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGCOLORSPACE,GstFFMpegColorspace))
#define GST_FFMPEGCOLORSPACE_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGCOLORSPACE,GstFFMpegColorspace))
#define GST_IS_FFMPEGCOLORSPACE(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGCOLORSPACE))
#define GST_IS_FFMPEGCOLORSPACE_CLASS(obj) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGCOLORSPACE))
#define GST_TYPE_FFMPEGCSP \
(gst_ffmpegcsp_get_type())
#define GST_FFMPEGCSP(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGCSP,GstFFMpegCsp))
#define GST_FFMPEGCSP_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGCSP,GstFFMpegCsp))
#define GST_IS_FFMPEGCSP(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGCSP))
#define GST_IS_FFMPEGCSP_CLASS(obj) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGCSP))
typedef struct _GstFFMpegColorspace GstFFMpegColorspace;
typedef struct _GstFFMpegColorspaceClass GstFFMpegColorspaceClass;
typedef struct _GstFFMpegCsp GstFFMpegCsp;
typedef struct _GstFFMpegCspClass GstFFMpegCspClass;
struct _GstFFMpegColorspace
struct _GstFFMpegCsp
{
GstElement element;
@ -55,17 +55,18 @@ struct _GstFFMpegColorspace
gfloat fps;
enum PixelFormat from_pixfmt, to_pixfmt;
AVPicture from_frame, to_frame;
AVPaletteControl *palette;
GstCaps *sinkcaps;
};
struct _GstFFMpegColorspaceClass
struct _GstFFMpegCspClass
{
GstElementClass parent_class;
};
/* elementfactory information */
static GstElementDetails ffmpegcolorspace_details = {
"FFMPEG-based colorspace converter in gst-plugins",
static GstElementDetails ffmpegcsp_details = {
"FFMPEG Colorspace converter",
"Filter/Converter/Video",
"Converts video from one colorspace to another",
"Ronald Bultje <rbultje@ronald.bitfreak.net>",
@ -81,35 +82,34 @@ enum
enum
{
ARG_0
ARG_0,
};
static GType gst_ffmpegcolorspace_get_type (void);
static GType gst_ffmpegcsp_get_type (void);
static void gst_ffmpegcolorspace_base_init (GstFFMpegColorspaceClass * klass);
static void gst_ffmpegcolorspace_class_init (GstFFMpegColorspaceClass * klass);
static void gst_ffmpegcolorspace_init (GstFFMpegColorspace * space);
static void gst_ffmpegcsp_base_init (GstFFMpegCspClass * klass);
static void gst_ffmpegcsp_class_init (GstFFMpegCspClass * klass);
static void gst_ffmpegcsp_init (GstFFMpegCsp * space);
static void gst_ffmpegcolorspace_set_property (GObject * object,
static void gst_ffmpegcsp_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec);
static void gst_ffmpegcolorspace_get_property (GObject * object,
static void gst_ffmpegcsp_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
static GstPadLinkReturn
gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps);
gst_ffmpegcsp_pad_link (GstPad * pad, const GstCaps * caps);
static void gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data);
static GstElementStateReturn
gst_ffmpegcolorspace_change_state (GstElement * element);
static void gst_ffmpegcsp_chain (GstPad * pad, GstData * data);
static GstElementStateReturn gst_ffmpegcsp_change_state (GstElement * element);
static GstPadTemplate *srctempl, *sinktempl;
static GstElementClass *parent_class = NULL;
/*static guint gst_ffmpegcolorspace_signals[LAST_SIGNAL] = { 0 }; */
/*static guint gst_ffmpegcsp_signals[LAST_SIGNAL] = { 0 }; */
static GstCaps *
gst_ffmpegcolorspace_caps_remove_format_info (GstCaps * caps)
gst_ffmpegcsp_caps_remove_format_info (GstCaps * caps)
{
int i;
GstStructure *structure;
@ -143,20 +143,20 @@ gst_ffmpegcolorspace_caps_remove_format_info (GstCaps * caps)
}
static GstCaps *
gst_ffmpegcolorspace_getcaps (GstPad * pad)
gst_ffmpegcsp_getcaps (GstPad * pad)
{
GstFFMpegColorspace *space;
GstCaps *othercaps = NULL;
GstFFMpegCsp *space;
GstCaps *othercaps;
GstCaps *caps;
GstPad *otherpad;
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
space = GST_FFMPEGCSP (gst_pad_get_parent (pad));
otherpad = (pad == space->srcpad) ? space->sinkpad : space->srcpad;
othercaps = gst_pad_get_allowed_caps (otherpad);
othercaps = gst_ffmpegcolorspace_caps_remove_format_info (othercaps);
othercaps = gst_ffmpegcsp_caps_remove_format_info (othercaps);
caps = gst_caps_intersect (othercaps, gst_pad_get_pad_template_caps (pad));
gst_caps_free (othercaps);
@ -165,41 +165,44 @@ gst_ffmpegcolorspace_getcaps (GstPad * pad)
}
static GstPadLinkReturn
gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps)
gst_ffmpegcsp_pad_link (GstPad * pad, const GstCaps * caps)
{
GstFFMpegColorspace *space;
GstStructure *structure;
AVCodecContext *ctx;
GstFFMpegCsp *space;
const GstCaps *othercaps;
GstPad *otherpad;
GstPadLinkReturn ret;
enum PixelFormat pix_fmt;
int height, width;
double framerate;
const GValue *par = NULL;
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
space = GST_FFMPEGCSP (gst_pad_get_parent (pad));
GST_DEBUG_OBJECT (space, "pad_link on %s:%s with caps %" GST_PTR_FORMAT,
GST_DEBUG_PAD_NAME (pad), caps);
otherpad = (pad == space->srcpad) ? space->sinkpad : space->srcpad;
structure = gst_caps_get_structure (caps, 0);
gst_structure_get_int (structure, "width", &width);
gst_structure_get_int (structure, "height", &height);
gst_structure_get_double (structure, "framerate", &framerate);
par = gst_structure_get_value (structure, "pixel-aspect-ratio");
if (par) {
GST_DEBUG_OBJECT (space, "setting PAR %d/%d",
gst_value_get_fraction_numerator (par),
gst_value_get_fraction_denominator (par));
}
otherpad = (pad == space->srcpad) ? space->sinkpad : space->srcpad;
/* FIXME attempt and/or check for passthru */
/* loop over all possibilities and select the first one we can convert and
* is accepted by the peer */
pix_fmt = gst_ffmpeg_caps_to_pix_fmt (caps);
if (pix_fmt == PIX_FMT_NB) {
ctx = avcodec_alloc_context ();
ctx->width = width;
ctx->height = height;
ctx->pix_fmt = PIX_FMT_NB;
gst_ffmpegcsp_caps_with_codectype (CODEC_TYPE_VIDEO, caps, ctx);
if (ctx->pix_fmt == PIX_FMT_NB) {
av_free (ctx);
/* we disable ourself here */
if (pad == space->srcpad) {
space->to_pixfmt = PIX_FMT_NB;
@ -213,32 +216,35 @@ gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps)
/* set the size on the otherpad */
othercaps = gst_pad_get_negotiated_caps (otherpad);
if (othercaps) {
GstCaps *newothercaps = gst_caps_copy (othercaps);
GstCaps *caps = gst_caps_copy (othercaps);
gst_caps_set_simple (newothercaps,
gst_caps_set_simple (caps,
"width", G_TYPE_INT, width,
"height", G_TYPE_INT, height,
"framerate", G_TYPE_DOUBLE, framerate, NULL);
if (par) {
GST_DEBUG_OBJECT (space, "setting PAR %d/%d",
gst_value_get_fraction_numerator (par),
gst_value_get_fraction_denominator (par));
gst_caps_set_simple (newothercaps,
gst_caps_set_simple (caps,
"pixel-aspect-ratio", GST_TYPE_FRACTION,
gst_value_get_fraction_numerator (par),
gst_value_get_fraction_denominator (par), NULL);
}
ret = gst_pad_try_set_caps (otherpad, newothercaps);
ret = gst_pad_try_set_caps (otherpad, caps);
if (GST_PAD_LINK_FAILED (ret)) {
return ret;
}
}
if (pad == space->srcpad) {
space->to_pixfmt = pix_fmt;
space->to_pixfmt = ctx->pix_fmt;
} else {
space->from_pixfmt = pix_fmt;
space->from_pixfmt = ctx->pix_fmt;
/* palette */
if (space->palette)
av_free (space->palette);
space->palette = ctx->palctrl;
}
av_free (ctx);
space->width = width;
space->height = height;
@ -247,42 +253,42 @@ gst_ffmpegcolorspace_pad_link (GstPad * pad, const GstCaps * caps)
}
static GType
gst_ffmpegcolorspace_get_type (void)
gst_ffmpegcsp_get_type (void)
{
static GType ffmpegcolorspace_type = 0;
static GType ffmpegcsp_type = 0;
if (!ffmpegcolorspace_type) {
static const GTypeInfo ffmpegcolorspace_info = {
sizeof (GstFFMpegColorspaceClass),
(GBaseInitFunc) gst_ffmpegcolorspace_base_init,
if (!ffmpegcsp_type) {
static const GTypeInfo ffmpegcsp_info = {
sizeof (GstFFMpegCspClass),
(GBaseInitFunc) gst_ffmpegcsp_base_init,
NULL,
(GClassInitFunc) gst_ffmpegcolorspace_class_init,
(GClassInitFunc) gst_ffmpegcsp_class_init,
NULL,
NULL,
sizeof (GstFFMpegColorspace),
sizeof (GstFFMpegCsp),
0,
(GInstanceInitFunc) gst_ffmpegcolorspace_init,
(GInstanceInitFunc) gst_ffmpegcsp_init,
};
ffmpegcolorspace_type = g_type_register_static (GST_TYPE_ELEMENT,
"GstFFMpegColorspace", &ffmpegcolorspace_info, 0);
ffmpegcsp_type = g_type_register_static (GST_TYPE_ELEMENT,
"GstFFMpegColorspace", &ffmpegcsp_info, 0);
}
return ffmpegcolorspace_type;
return ffmpegcsp_type;
}
static void
gst_ffmpegcolorspace_base_init (GstFFMpegColorspaceClass * klass)
gst_ffmpegcsp_base_init (GstFFMpegCspClass * klass)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
gst_element_class_add_pad_template (element_class, srctempl);
gst_element_class_add_pad_template (element_class, sinktempl);
gst_element_class_set_details (element_class, &ffmpegcolorspace_details);
gst_element_class_set_details (element_class, &ffmpegcsp_details);
}
static void
gst_ffmpegcolorspace_class_init (GstFFMpegColorspaceClass * klass)
gst_ffmpegcsp_class_init (GstFFMpegCspClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
@ -292,61 +298,54 @@ gst_ffmpegcolorspace_class_init (GstFFMpegColorspaceClass * klass)
parent_class = g_type_class_ref (GST_TYPE_ELEMENT);
gobject_class->set_property = gst_ffmpegcolorspace_set_property;
gobject_class->get_property = gst_ffmpegcolorspace_get_property;
gobject_class->set_property = gst_ffmpegcsp_set_property;
gobject_class->get_property = gst_ffmpegcsp_get_property;
gstelement_class->change_state = gst_ffmpegcolorspace_change_state;
gstelement_class->change_state = gst_ffmpegcsp_change_state;
GST_DEBUG_CATEGORY_INIT (ffmpegcolorspace_debug, "ffmpegcolorspace", 0,
"FFMPEG-based colorspace converter");
}
static void
gst_ffmpegcolorspace_init (GstFFMpegColorspace * space)
gst_ffmpegcsp_init (GstFFMpegCsp * space)
{
space->sinkpad = gst_pad_new_from_template (sinktempl, "sink");
gst_pad_set_link_function (space->sinkpad, gst_ffmpegcolorspace_pad_link);
gst_pad_set_getcaps_function (space->sinkpad, gst_ffmpegcolorspace_getcaps);
gst_pad_set_chain_function (space->sinkpad, gst_ffmpegcolorspace_chain);
gst_pad_set_link_function (space->sinkpad, gst_ffmpegcsp_pad_link);
gst_pad_set_getcaps_function (space->sinkpad, gst_ffmpegcsp_getcaps);
gst_pad_set_chain_function (space->sinkpad, gst_ffmpegcsp_chain);
gst_element_add_pad (GST_ELEMENT (space), space->sinkpad);
space->srcpad = gst_pad_new_from_template (srctempl, "src");
gst_element_add_pad (GST_ELEMENT (space), space->srcpad);
gst_pad_set_link_function (space->srcpad, gst_ffmpegcolorspace_pad_link);
gst_pad_set_getcaps_function (space->srcpad, gst_ffmpegcolorspace_getcaps);
gst_pad_set_link_function (space->srcpad, gst_ffmpegcsp_pad_link);
gst_pad_set_getcaps_function (space->srcpad, gst_ffmpegcsp_getcaps);
space->from_pixfmt = space->to_pixfmt = PIX_FMT_NB;
space->palette = NULL;
}
static void
gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data)
gst_ffmpegcsp_chain (GstPad * pad, GstData * data)
{
GstBuffer *inbuf = GST_BUFFER (data);
GstFFMpegColorspace *space;
GstFFMpegCsp *space;
GstBuffer *outbuf = NULL;
g_return_if_fail (pad != NULL);
g_return_if_fail (GST_IS_PAD (pad));
g_return_if_fail (inbuf != NULL);
space = GST_FFMPEGCOLORSPACE (gst_pad_get_parent (pad));
space = GST_FFMPEGCSP (gst_pad_get_parent (pad));
g_return_if_fail (space != NULL);
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (space));
g_return_if_fail (GST_IS_FFMPEGCSP (space));
if (!GST_PAD_IS_USABLE (space->srcpad)) {
gst_buffer_unref (inbuf);
return;
}
if (!gst_pad_is_negotiated (space->srcpad)) {
if (GST_PAD_LINK_FAILED (gst_pad_renegotiate (space->srcpad))) {
GST_ELEMENT_ERROR (space, CORE, NEGOTIATION, (NULL), GST_ERROR_SYSTEM);
gst_buffer_unref (inbuf);
return;
}
}
if (space->from_pixfmt == PIX_FMT_NB || space->to_pixfmt == PIX_FMT_NB) {
GST_ELEMENT_ERROR (space, CORE, NOT_IMPLEMENTED, (NULL),
("attempting to convert colorspaces between unknown formats"));
@ -357,32 +356,23 @@ gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data)
if (space->from_pixfmt == space->to_pixfmt) {
outbuf = inbuf;
} else {
/* use bufferpool here */
AVPicture *from_p, *to_p;
#define ROUND_UP_4(x) (((x) + 3) & ~3)
guint size = avpicture_get_size (space->to_pixfmt,
space->width,
space->height);
GST_LOG_OBJECT (space, "convert from format %d, %dx%d, buffer size %d",
space->from_pixfmt, space->width, space->height,
GST_BUFFER_SIZE (inbuf));
GST_LOG_OBJECT (space, "convert to format %d, %dx%d, buffer size %d",
space->to_pixfmt, space->width, space->height, size);
ROUND_UP_4 (space->width), ROUND_UP_4 (space->height));
outbuf = gst_pad_alloc_buffer (space->srcpad, GST_BUFFER_OFFSET_NONE, size);
/* convert */
#define ROUND_UP_4(x) (((x) + 3) & ~3)
from_p = &(space->from_frame);
avpicture_fill (from_p, GST_BUFFER_DATA (inbuf),
gst_ffmpegcsp_avpicture_fill (&space->from_frame,
GST_BUFFER_DATA (inbuf),
space->from_pixfmt, space->width, space->height);
to_p = &(space->to_frame);
avpicture_fill (to_p, GST_BUFFER_DATA (outbuf),
if (space->palette)
space->from_frame.data[1] = (uint8_t *) space->palette;
gst_ffmpegcsp_avpicture_fill (&space->to_frame,
GST_BUFFER_DATA (outbuf),
space->to_pixfmt, space->width, space->height);
img_convert (to_p, space->to_pixfmt, from_p, space->from_pixfmt,
space->width, space->height);
img_convert (&space->to_frame, space->to_pixfmt,
&space->from_frame, space->from_pixfmt, space->width, space->height);
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
@ -394,14 +384,17 @@ gst_ffmpegcolorspace_chain (GstPad * pad, GstData * data)
}
static GstElementStateReturn
gst_ffmpegcolorspace_change_state (GstElement * element)
gst_ffmpegcsp_change_state (GstElement * element)
{
GstFFMpegColorspace *space;
GstFFMpegCsp *space;
space = GST_FFMPEGCOLORSPACE (element);
space = GST_FFMPEGCSP (element);
switch (GST_STATE_TRANSITION (element)) {
case GST_STATE_PAUSED_TO_READY:
if (space->palette)
av_free (space->palette);
space->palette = NULL;
break;
}
@ -412,14 +405,14 @@ gst_ffmpegcolorspace_change_state (GstElement * element)
}
static void
gst_ffmpegcolorspace_set_property (GObject * object,
gst_ffmpegcsp_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec)
{
GstFFMpegColorspace *space;
GstFFMpegCsp *space;
/* it's not null if we got it, but it might not be ours */
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (object));
space = GST_FFMPEGCOLORSPACE (object);
g_return_if_fail (GST_IS_FFMPEGCSP (object));
space = GST_FFMPEGCSP (object);
switch (prop_id) {
default:
@ -428,14 +421,14 @@ gst_ffmpegcolorspace_set_property (GObject * object,
}
static void
gst_ffmpegcolorspace_get_property (GObject * object,
gst_ffmpegcsp_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec)
{
GstFFMpegColorspace *space;
GstFFMpegCsp *space;
/* it's not null if we got it, but it might not be ours */
g_return_if_fail (GST_IS_FFMPEGCOLORSPACE (object));
space = GST_FFMPEGCOLORSPACE (object);
g_return_if_fail (GST_IS_FFMPEGCSP (object));
space = GST_FFMPEGCSP (object);
switch (prop_id) {
default:
@ -450,14 +443,15 @@ gst_ffmpegcolorspace_register (GstPlugin * plugin)
GstCaps *caps;
/* template caps */
caps = gst_ffmpeg_pix_fmt_to_caps ();
caps = gst_ffmpegcsp_codectype_to_caps (CODEC_TYPE_VIDEO, NULL);
/* build templates */
srctempl = gst_pad_template_new ("src",
GST_PAD_SRC, GST_PAD_ALWAYS, gst_caps_copy (caps));
/* the sink template will do palette handling as well... */
sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, caps);
avcodec_init ();
return gst_element_register (plugin, "ffmpegcolorspace",
GST_RANK_PRIMARY, GST_TYPE_FFMPEGCOLORSPACE);
GST_RANK_NONE, GST_TYPE_FFMPEGCSP);
}

File diff suppressed because it is too large Load diff

View file

@ -18,7 +18,7 @@
*/
#ifndef RGB_OUT
#define RGB_OUT(d, r, g, b) RGBA_OUT(d, r, g, b, 0xffU)
#define RGB_OUT(d, r, g, b) RGBA_OUT(d, r, g, b, 0xff)
#endif
static void glue(yuv420p_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,

View file

@ -22,14 +22,19 @@
* default memory allocator for libavcodec.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
/* here we can use OS dependant allocation functions */
#undef malloc
#undef free
#undef realloc
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
/* you can redefine av_malloc and av_free in your project to use your
memory allocator. You do not need to suppress this file because the
linker will do it automatically */
@ -44,7 +49,14 @@ av_malloc (unsigned int size)
{
void *ptr;
#if defined (HAVE_MEMALIGN)
#ifdef MEMALIGN_HACK
int diff;
ptr = malloc (size + 16 + 1);
diff = ((-(int) ptr - 1) & 15) + 1;
ptr += diff;
((char *) ptr)[-1] = diff;
#elif defined (HAVE_MEMALIGN)
ptr = memalign (16, size);
/* Why 64?
Indeed, we should align it:
@ -86,7 +98,17 @@ av_malloc (unsigned int size)
void *
av_realloc (void *ptr, unsigned int size)
{
#ifdef MEMALIGN_HACK
//FIXME this isnt aligned correctly though it probably isnt needed
int diff;
if (!ptr)
return av_malloc (size);
diff = ((char *) ptr)[-1];
return realloc (ptr - diff, size + diff) + diff;
#else
return realloc (ptr, size);
#endif
}
/* NOTE: ptr = NULL is explicetly allowed */
@ -95,5 +117,9 @@ av_free (void *ptr)
{
/* XXX: this test should not be needed on most libcs */
if (ptr)
#ifdef MEMALIGN_HACK
free (ptr - ((char *) ptr)[-1]);
#else
free (ptr);
#endif
}

View file

@ -1,243 +0,0 @@
/*
* mmx.h
* Copyright (C) 1997-2001 H. Dietz and R. Fisher
*/
#ifndef AVCODEC_I386MMX_H
#define AVCODEC_I386MMX_H
/*
* The type of an value that fits in an MMX register (note that long
* long constant values MUST be suffixed by LL and unsigned long long
* values by ULL, lest they be truncated by the compiler)
*/
typedef union {
long long q; /* Quadword (64-bit) value */
unsigned long long uq; /* Unsigned Quadword */
int d[2]; /* 2 Doubleword (32-bit) values */
unsigned int ud[2]; /* 2 Unsigned Doubleword */
short w[4]; /* 4 Word (16-bit) values */
unsigned short uw[4]; /* 4 Unsigned Word */
char b[8]; /* 8 Byte (8-bit) values */
unsigned char ub[8]; /* 8 Unsigned Byte */
float s[2]; /* Single-precision (32-bit) value */
} mmx_t; /* On an 8-byte (64-bit) boundary */
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "i" (imm) )
#define mmx_m2r(op,mem,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
: "m" (mem))
#define mmx_r2m(op,reg,mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \
: "=m" (mem) \
: /* nothing */ )
#define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
#define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd)
#define movq_m2r(var,reg) mmx_m2r (movq, var, reg)
#define movq_r2m(reg,var) mmx_r2m (movq, reg, var)
#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd)
#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg)
#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd)
#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg)
#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd)
#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg)
#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd)
#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg)
#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd)
#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg)
#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd)
#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg)
#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd)
#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg)
#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd)
#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg)
#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd)
#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg)
#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd)
#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg)
#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd)
#define pand_m2r(var,reg) mmx_m2r (pand, var, reg)
#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd)
#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg)
#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd)
#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg)
#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd)
#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg)
#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd)
#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg)
#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd)
#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg)
#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd)
#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg)
#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd)
#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg)
#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd)
#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg)
#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd)
#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg)
#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd)
#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg)
#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd)
#define por_m2r(var,reg) mmx_m2r (por, var, reg)
#define por_r2r(regs,regd) mmx_r2r (por, regs, regd)
#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg)
#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg)
#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd)
#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg)
#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg)
#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd)
#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg)
#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg)
#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd)
#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg)
#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg)
#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd)
#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg)
#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg)
#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd)
#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg)
#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg)
#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd)
#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg)
#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg)
#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd)
#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg)
#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg)
#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd)
#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg)
#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd)
#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg)
#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd)
#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg)
#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd)
#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg)
#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd)
#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg)
#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd)
#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg)
#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd)
#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg)
#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd)
#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg)
#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd)
#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg)
#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd)
#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg)
#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd)
#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg)
#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd)
#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg)
#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd)
#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg)
#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd)
#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
/* 3DNOW extensions */
#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
/* AMD MMX extensions - also available in intel SSE */
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
: "X" (mem), "X" (imm))
#define mmx_r2ri(op,regs,regd,imm) \
__asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \
: /* nothing */ \
: "X" (imm) )
#define mmx_fetch(mem,hint) \
__asm__ __volatile__ ("prefetch" #hint " %0" \
: /* nothing */ \
: "X" (mem))
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg)
#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd)
#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg)
#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd)
#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm)
#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm)
#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg)
#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd)
#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg)
#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd)
#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg)
#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd)
#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg)
#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
#define pmovmskb(mmreg,reg) \
__asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg)
#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
#define prefetcht0(mem) mmx_fetch (mem, t0)
#define prefetcht1(mem) mmx_fetch (mem, t1)
#define prefetcht2(mem) mmx_fetch (mem, t2)
#define prefetchnta(mem) mmx_fetch (mem, nta)
#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg)
#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd)
#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
#define sfence() __asm__ __volatile__ ("sfence\n\t")
#endif /* AVCODEC_I386MMX_H */

View file

@ -24,12 +24,126 @@
* utils.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "avcodec.h"
#include "dsputil.h"
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <limits.h>
void *
av_mallocz (unsigned int size)
{
void *ptr;
ptr = av_malloc (size);
if (!ptr)
return NULL;
memset (ptr, 0, size);
return ptr;
}
char *
av_strdup (const char *s)
{
char *ptr;
int len;
len = strlen (s) + 1;
ptr = av_malloc (len);
if (!ptr)
return NULL;
memcpy (ptr, s, len);
return ptr;
}
/**
* realloc which does nothing if the block is large enough
*/
void *
av_fast_realloc (void *ptr, unsigned int *size, unsigned int min_size)
{
if (min_size < *size)
return ptr;
*size = 17 * min_size / 16 + 32;
return av_realloc (ptr, *size);
}
static unsigned int last_static = 0;
static unsigned int allocated_static = 0;
static void **array_static = NULL;
/**
* allocation of static arrays - do not use for normal allocation.
*/
void *
av_mallocz_static (unsigned int size)
{
void *ptr = av_mallocz (size);
if (ptr) {
array_static =
av_fast_realloc (array_static, &allocated_static,
sizeof (void *) * (last_static + 1));
array_static[last_static++] = ptr;
}
return ptr;
}
/**
* free all static arrays and reset pointers to 0.
*/
void
av_free_static (void)
{
while (last_static) {
av_freep (&array_static[--last_static]);
}
av_freep (&array_static);
}
/**
* Frees memory and sets the pointer to NULL.
* @param arg pointer to the pointer which should be freed
*/
void
av_freep (void *arg)
{
void **ptr = (void **) arg;
av_free (*ptr);
*ptr = NULL;
}
void
avcodec_get_context_defaults (AVCodecContext * s)
{
memset (s, 0, sizeof (AVCodecContext));
s->frame_rate_base = 1;
s->frame_rate = 25;
}
/**
* allocates a AVCodecContext and set it to defaults.
* this can be deallocated by simply calling free()
*/
AVCodecContext *
avcodec_alloc_context (void)
{
AVCodecContext *avctx = av_malloc (sizeof (AVCodecContext));
if (avctx == NULL)
return NULL;
avcodec_get_context_defaults (avctx);
return avctx;
}
/* must be called before any other functions */
void