/* Video */
typedef struct vout_thread_t vout_thread_t;
typedef struct vout_sys_t vout_sys_t;
-typedef struct chroma_sys_t chroma_sys_t;
typedef video_format_t video_frame_format_t;
typedef struct picture_t picture_t;
#ifndef _VLC_ES_H
#define _VLC_ES_H 1
+/* FIXME: i'm not too sure about this include but it fixes compilation of
+ * video chromas -- dionoea */
+#include "vlc_common.h"
+
/**
* \file
* This file defines the elementary streams format types
unsigned int i_frame_rate_base; /**< frame rate denominator */
int i_rmask, i_gmask, i_bmask; /**< color masks for RGB chroma */
+ int i_rrshift, i_lrshift;
+ int i_rgshift, i_lgshift;
+ int i_rbshift, i_lbshift;
video_palette_t *p_palette; /**< video palette from demuxer */
};
config_chain_t * p_cfg;
picture_t * ( * pf_video_filter ) ( filter_t *, picture_t * );
+ void ( * pf_video_filter_io ) ( filter_t *, picture_t *, picture_t * ); /* Used by video filters with a preallocated output buffer (ie chroma conversion modules) */
block_t * ( * pf_audio_filter ) ( filter_t *, block_t * );
void ( * pf_video_blend ) ( filter_t *, picture_t *,
picture_t *, picture_t *,
#define VLC_OBJECT_HTTPD_HOST (-30)
#define VLC_OBJECT_INTERACTION (-32)
+#define VLC_OBJECT_CHROMA (-33)
#define VLC_OBJECT_GENERIC (-666)
/*****************************************************************************
* vlc_video.h: common video definitions
*****************************************************************************
- * Copyright (C) 1999 - 2005 the VideoLAN team
+ * Copyright (C) 1999 - 2008 the VideoLAN team
* $Id$
*
* Authors: Vincent Seguin <seguin@via.ecp.fr>
* @{
*/
-/**
- * Chroma conversion function
- *
- * This is the prototype common to all conversion functions.
- * \param p_vout video output thread
- * \param p_source source picture
- * \param p_dest destination picture
- * Picture width and source dimensions must be multiples of 16.
- */
-typedef void (vout_chroma_convert_t)( vout_thread_t *,
- picture_t *, picture_t * );
-
-typedef struct vout_chroma_t
-{
- /** conversion functions */
- vout_chroma_convert_t *pf_convert;
-
- /** Private module-dependent data */
- chroma_sys_t * p_sys; /* private data */
-
- /** Plugin used and shortcuts to access its capabilities */
- module_t * p_module;
-
-} vout_chroma_t;
-
/** Maximum numbers of video filters2 that can be attached to a vout */
#define MAX_VFILTERS 10
picture_heap_t render; /**< rendered pictures */
picture_heap_t output; /**< direct buffers */
bool b_direct; /**< rendered are like direct ? */
- vout_chroma_t chroma; /**< translation tables */
+ filter_t *p_chroma; /**< translation tables */
video_format_t fmt_render; /* render format (from the decoder) */
video_format_t fmt_in; /* input (modified render) format */
* This structure is part of the chroma transformation descriptor, it
* describes the chroma plugin specific properties.
*****************************************************************************/
-struct chroma_sys_t
+struct filter_sys_t
{
int i_src_vlc_chroma;
int i_src_ffmpeg_chroma;
*****************************************************************************/
int OpenChroma( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
int i_ffmpeg_chroma[2], i_vlc_chroma[2], i;
/*
* Check the source chroma first, then the destination chroma
*/
- i_vlc_chroma[0] = p_vout->render.i_chroma;
- i_vlc_chroma[1] = p_vout->output.i_chroma;
+ i_vlc_chroma[0] = p_filter->fmt_in.video.i_chroma;
+ i_vlc_chroma[1] = p_filter->fmt_out.video.i_chroma;
for( i = 0; i < 2; i++ )
{
i_ffmpeg_chroma[i] = GetFfmpegChroma( i_vlc_chroma[i] );
if( i_ffmpeg_chroma[i] < 0 ) return VLC_EGENERIC;
}
- p_vout->chroma.pf_convert = ChromaConversion;
+ p_filter->pf_video_filter_io = ChromaConversion;
- p_vout->chroma.p_sys = malloc( sizeof( chroma_sys_t ) );
- if( p_vout->chroma.p_sys == NULL )
+ p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
+ if( p_filter->p_sys == NULL )
{
return VLC_ENOMEM;
}
- p_vout->chroma.p_sys->i_src_vlc_chroma = p_vout->render.i_chroma;
- p_vout->chroma.p_sys->i_dst_vlc_chroma = p_vout->output.i_chroma;
- p_vout->chroma.p_sys->i_src_ffmpeg_chroma = i_ffmpeg_chroma[0];
- p_vout->chroma.p_sys->i_dst_ffmpeg_chroma = i_ffmpeg_chroma[1];
+ p_filter->.p_sys->i_src_vlc_chroma = p_vout->render.i_chroma;
+ p_filter->p_sys->i_dst_vlc_chroma = p_vout->output.i_chroma;
+ p_filter->p_sys->i_src_ffmpeg_chroma = i_ffmpeg_chroma[0];
+ p_filter->p_sys->i_dst_ffmpeg_chroma = i_ffmpeg_chroma[1];
- if( ( p_vout->render.i_height != p_vout->output.i_height ||
- p_vout->render.i_width != p_vout->output.i_width ) &&
- ( p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('I','4','2','0') ||
- p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ))
+ if( ( p_filter->fmt_in.video.i_height != p_filter->fmt_out.video.i_height ||
+ p_filter->fmt_in.video.i_width != p_filter->fmt_out.video.i_width ) &&
+ ( p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('I','4','2','0') ||
+ p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ))
{
- msg_Dbg( p_vout, "preparing to resample picture" );
- p_vout->chroma.p_sys->p_rsc =
- img_resample_init( p_vout->output.i_width, p_vout->output.i_height,
- p_vout->render.i_width, p_vout->render.i_height );
- avpicture_alloc( &p_vout->chroma.p_sys->tmp_pic,
- p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
- p_vout->render.i_width, p_vout->render.i_height );
+ msg_Dbg( p_filter, "preparing to resample picture" );
+ p_filter->p_sys->p_rsc =
+ img_resample_init( p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
+ p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height );
+ avpicture_alloc( &p_filter->p_sys->tmp_pic,
+ p_filter->p_sys->i_dst_ffmpeg_chroma,
+ p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height );
}
else
{
- msg_Dbg( p_vout, "no resampling" );
- p_vout->chroma.p_sys->p_rsc = NULL;
+ msg_Dbg( p_filter, "no resampling" );
+ p_filter->p_sys->p_rsc = NULL;
}
/* libavcodec needs to be initialized for some chroma conversions */
/*****************************************************************************
* ChromaConversion: actual chroma conversion function
*****************************************************************************/
-static void ChromaConversion( vout_thread_t *p_vout,
+static void ChromaConversion( filter_t *p_filter,
picture_t *p_src, picture_t *p_dest )
{
AVPicture src_pic;
}
/* Special cases */
- if( p_vout->chroma.p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
- p_vout->chroma.p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
+ if( p_filter->p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
+ p_filter->p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
{
/* Invert U and V */
src_pic.data[1] = p_src->p[2].p_pixels;
src_pic.data[2] = p_src->p[1].p_pixels;
}
- if( p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
- p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
+ if( p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
+ p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
{
/* Invert U and V */
dest_pic.data[1] = p_dest->p[2].p_pixels;
dest_pic.data[2] = p_dest->p[1].p_pixels;
}
- if( p_vout->chroma.p_sys->i_src_ffmpeg_chroma == PIX_FMT_RGB24 )
- if( p_vout->render.i_bmask == 0x00ff0000 )
- p_vout->chroma.p_sys->i_src_ffmpeg_chroma = PIX_FMT_BGR24;
+ if( p_filter->p_sys->i_src_ffmpeg_chroma == PIX_FMT_RGB24 )
+ if( p_filter->fmt_in.video.i_bmask == 0x00ff0000 )
+ p_filter->p_sys->i_src_ffmpeg_chroma = PIX_FMT_BGR24;
- if( p_vout->chroma.p_sys->p_rsc )
+ if( p_filter->p_sys->p_rsc )
{
- img_convert( &p_vout->chroma.p_sys->tmp_pic,
- p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
- &src_pic, p_vout->chroma.p_sys->i_src_ffmpeg_chroma,
- p_vout->render.i_width, p_vout->render.i_height );
- img_resample( p_vout->chroma.p_sys->p_rsc, &dest_pic,
- &p_vout->chroma.p_sys->tmp_pic );
+ img_convert( &p_filter->p_sys->tmp_pic,
+ p_filter->p_sys->i_dst_ffmpeg_chroma,
+ &src_pic, p_filter->p_sys->i_src_ffmpeg_chroma,
+ p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height );
+ img_resample( p_filter->p_sys->p_rsc, &dest_pic,
+ &p_filter->p_sys->tmp_pic );
}
else
{
- img_convert( &dest_pic, p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
- &src_pic, p_vout->chroma.p_sys->i_src_ffmpeg_chroma,
- p_vout->render.i_width, p_vout->render.i_height );
+ img_convert( &dest_pic, p_filter->p_sys->i_dst_ffmpeg_chroma,
+ &src_pic, p_filter->p_sys->i_src_ffmpeg_chroma,
+ p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height );
}
}
*****************************************************************************/
void CloseChroma( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
- if( p_vout->chroma.p_sys->p_rsc )
+ filter_t *p_filter = (filter_t *)p_this;
+ if( p_filter->p_sys->p_rsc )
{
- img_resample_close( p_vout->chroma.p_sys->p_rsc );
- avpicture_free( &p_vout->chroma.p_sys->tmp_pic );
+ img_resample_close( p_filter->p_sys->p_rsc );
+ avpicture_free( &p_filter->p_sys->tmp_pic );
}
- free( p_vout->chroma.p_sys );
+ free( p_filter->p_sys );
}
-#else
-
-static void ChromaConversion( vout_thread_t *, picture_t *, picture_t * );
-
-/*****************************************************************************
- * chroma_sys_t: chroma method descriptor
- *****************************************************************************
- * This structure is part of the chroma transformation descriptor, it
- * describes the chroma plugin specific properties.
- *****************************************************************************/
-struct chroma_sys_t
-{
- filter_t *p_swscaler;
-};
-
-/*****************************************************************************
- * Video Filter2 functions
- *****************************************************************************/
-struct filter_owner_sys_t
-{
- vout_thread_t *p_vout;
-};
-
-static void PictureRelease( picture_t *p_pic )
-{
- free( p_pic->p_data_orig );
-}
-
-static picture_t *video_new_buffer_filter( filter_t *p_filter )
-{
- picture_t *p_picture = malloc( sizeof(picture_t) );
- if( !p_picture ) return NULL;
- if( vout_AllocatePicture( p_filter, p_picture,
- p_filter->fmt_out.video.i_chroma,
- p_filter->fmt_out.video.i_width,
- p_filter->fmt_out.video.i_height,
- p_filter->fmt_out.video.i_aspect )
- != VLC_SUCCESS )
- {
- free( p_picture );
- return NULL;
- }
-
- p_picture->pf_release = PictureRelease;
-
- return p_picture;
-}
-
-static void video_del_buffer_filter( filter_t *p_filter, picture_t *p_pic )
-{
- VLC_UNUSED(p_filter);
- if( p_pic )
- {
- free( p_pic->p_data_orig );
- free( p_pic );
- }
-}
-
-/*****************************************************************************
- * OpenChroma: allocate a chroma function
- *****************************************************************************
- * This function allocates and initializes a chroma function
- *****************************************************************************/
-int OpenChroma( vlc_object_t *p_this )
-{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
- chroma_sys_t *p_sys = p_vout->chroma.p_sys;
-
- p_vout->chroma.p_sys = p_sys = malloc( sizeof( chroma_sys_t ) );
- if( p_vout->chroma.p_sys == NULL )
- {
- return VLC_ENOMEM;
- }
- p_vout->chroma.pf_convert = ChromaConversion;
-
- p_sys->p_swscaler = vlc_object_create( p_vout, VLC_OBJECT_FILTER );
- vlc_object_attach( p_sys->p_swscaler, p_vout );
-
- p_sys->p_swscaler->pf_vout_buffer_new = video_new_buffer_filter;
- p_sys->p_swscaler->pf_vout_buffer_del = video_del_buffer_filter;
-
- p_sys->p_swscaler->fmt_out.video.i_x_offset =
- p_sys->p_swscaler->fmt_out.video.i_y_offset = 0;
- p_sys->p_swscaler->fmt_in.video = p_vout->fmt_in;
- p_sys->p_swscaler->fmt_out.video = p_vout->fmt_out;
- p_sys->p_swscaler->fmt_out.video.i_aspect = p_vout->render.i_aspect;
- p_sys->p_swscaler->fmt_in.video.i_chroma = p_vout->render.i_chroma;
- p_sys->p_swscaler->fmt_out.video.i_chroma = p_vout->output.i_chroma;
-
- p_sys->p_swscaler->p_module = module_Need( p_sys->p_swscaler,
- "video filter2", 0, 0 );
-
- if( p_sys->p_swscaler->p_module )
- {
- p_sys->p_swscaler->p_owner =
- malloc( sizeof( filter_owner_sys_t ) );
- if( p_sys->p_swscaler->p_owner )
- p_sys->p_swscaler->p_owner->p_vout = p_vout;
- }
-
- if( !p_sys->p_swscaler->p_module || !p_sys->p_swscaler->p_owner )
- {
- vlc_object_detach( p_sys->p_swscaler );
- vlc_object_release( p_sys->p_swscaler );
- free( p_vout->chroma.p_sys );
- return VLC_EGENERIC;
- }
-
- return VLC_SUCCESS;
-}
-
-/*****************************************************************************
- * ChromaConversion: actual chroma conversion function
- *****************************************************************************/
-static void ChromaConversion( vout_thread_t *p_vout,
- picture_t *p_src, picture_t *p_dest )
-{
- chroma_sys_t *p_sys = (chroma_sys_t *) p_vout->chroma.p_sys;
-
- if( p_sys && p_src && p_dest &&
- p_sys->p_swscaler && p_sys->p_swscaler->p_module )
- {
- picture_t *p_pic;
-
- p_sys->p_swscaler->fmt_in.video = p_vout->fmt_in;
- p_sys->p_swscaler->fmt_out.video = p_vout->fmt_out;
-
-#if 0
- msg_Dbg( p_vout, "chroma %4.4s (%d) to %4.4s (%d)",
- (char *)&p_vout->fmt_in.i_chroma, p_src->i_planes,
- (char *)&p_vout->fmt_out.i_chroma, p_dest->i_planes );
#endif
- p_pic = p_sys->p_swscaler->pf_vout_buffer_new( p_sys->p_swscaler );
- if( p_pic )
- {
- picture_t *p_dst_pic;
- vout_CopyPicture( p_vout, p_pic, p_src );
- p_dst_pic = p_sys->p_swscaler->pf_video_filter( p_sys->p_swscaler, p_pic );
- vout_CopyPicture( p_vout, p_dest, p_dst_pic );
- p_dst_pic->pf_release( p_dst_pic );
- }
- }
-}
-
-/*****************************************************************************
- * CloseChroma: free the chroma function
- *****************************************************************************
- * This function frees the previously allocated chroma function
- *****************************************************************************/
-void CloseChroma( vlc_object_t *p_this )
-{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
- chroma_sys_t *p_sys = (chroma_sys_t *)p_vout->chroma.p_sys;
- if( p_sys->p_swscaler && p_sys->p_swscaler->p_module )
- {
- free( p_sys->p_swscaler->p_owner );
- module_Unneed( p_sys->p_swscaler, p_sys->p_swscaler->p_module );
- vlc_object_detach( p_sys->p_swscaler );
- vlc_object_release( p_sys->p_swscaler );
- p_sys->p_swscaler= NULL;
- }
- free( p_vout->chroma.p_sys );
-}
-
-#endif /* !defined(HAVE_LIBSWSCALE_SWSCALE_H) && !defined(HAVE_FFMPEG_SWSCALE_H) */
set_capability( "crop padd", 10 );
set_callbacks( OpenCropPadd, CloseFilter );
set_description( N_("FFmpeg crop padd filter") );
-#endif
/* chroma conversion submodule */
add_submodule();
set_capability( "chroma", 50 );
set_callbacks( OpenChroma, CloseChroma );
set_description( N_("FFmpeg chroma conversion") );
+#endif
/* video filter submodule */
add_submodule();
/*****************************************************************************
* chain.c : chain multiple chroma modules as a last resort solution
*****************************************************************************
- * Copyright (C) 2007 the VideoLAN team
+ * Copyright (C) 2007-2008 the VideoLAN team
* $Id$
*
* Authors: Antoine Cellerier <dionoea at videolan dot org>
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
/*****************************************************************************
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void Destroy ( vlc_object_t * );
-static void Chain ( vout_thread_t *, picture_t *, picture_t * );
+static void Chain ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
#define MAX_CHROMAS 2
-struct chroma_sys_t
+struct filter_sys_t
{
vlc_fourcc_t i_chroma;
- vout_chroma_t chroma1;
- vout_chroma_t chroma2;
+ filter_t *p_chroma1;
+ filter_t *p_chroma2;
- picture_t *p_tmp;
+ picture_t *p_tmp;
};
static const vlc_fourcc_t pi_allowed_chromas[] = {
VLC_FOURCC('I','4','2','0'),
VLC_FOURCC('I','4','2','2'),
+ VLC_FOURCC('R','V','3','2'),
+ VLC_FOURCC('R','V','2','4'),
0
};
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
+#if 0
static int hack = 1;
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
hack++;
if( hack > MAX_CHROMAS )
return VLC_EGENERIC;
}
- chroma_sys_t *p_sys = (chroma_sys_t *)malloc( sizeof( chroma_sys_t ) );
+ filter_sys_t *p_sys = (filter_sys_t *)malloc( sizeof( filter_sys_t ) );
if( !p_sys )
{
hack--;
return VLC_ENOMEM;
}
- memset( p_sys, 0, sizeof( chroma_sys_t ) );
+ memset( p_sys, 0, sizeof( filter_sys_t ) );
int i;
- vlc_fourcc_t i_output_chroma = p_vout->output.i_chroma;
- vlc_fourcc_t i_render_chroma = p_vout->render.i_chroma;
+ vlc_fourcc_t i_output_chroma = p_filter->fmt_in.video.i_chroma;
+ vlc_fourcc_t i_render_chroma = p_filter->fmt_out.video.i_chroma;
for( i = 0; pi_allowed_chromas[i]; i++ )
{
- msg_Warn( p_vout, "Trying %4s as a chroma chain",
+ msg_Warn( p_filter, "Trying %4s as a chroma chain",
(const char *)&pi_allowed_chromas[i] );
- p_vout->output.i_chroma = pi_allowed_chromas[i];
- p_vout->chroma.p_module = module_Need( p_vout, "chroma", NULL, 0 );
- p_vout->output.i_chroma = i_output_chroma;
+ p_filter->output.i_chroma = pi_allowed_chromas[i];
+ p_filter->p_chroma1.p_module = module_Need( p_vout, "chroma", NULL, 0 );
+ p_filter->output.i_chroma = i_output_chroma;
if( !p_vout->chroma.p_module )
continue;
free( p_sys );
hack--;
+#endif
return VLC_EGENERIC;
}
static void Destroy( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+#if 0
+ filter_t *p_filter = (filter_t *)p_this;
vout_chroma_t chroma = p_vout->chroma;
-
p_vout->chroma = chroma.p_sys->chroma1;
module_Unneed( p_vout, p_vout->chroma.p_module );
p_vout->chroma = chroma.p_sys->chroma2;
}
free( chroma.p_sys );
chroma.p_sys = NULL;
+#endif
}
/*****************************************************************************
* Chain
*****************************************************************************/
-static void Chain( vout_thread_t *p_vout, picture_t *p_source,
+static void Chain( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
+#if 0
chroma_sys_t *p_sys = p_vout->chroma.p_sys;
if( !p_sys->p_tmp )
p_vout->chroma = p_sys->chroma2;
p_sys->chroma2.pf_convert( p_vout, p_sys->p_tmp, p_dest );
p_vout->chroma = chroma;
+#endif
}
/*****************************************************************************
* grey_yuv.c : grayscale to others conversion module for vlc
*****************************************************************************
- * Copyright (C) 2007 the VideoLAN team
+ * Copyright (C) 2007, 2008 the VideoLAN team
* $Id$
*
* Authors: Sam Hocevar <sam@zoy.org>
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "GREY"
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void GREY_I420 ( vout_thread_t *, picture_t *, picture_t * );
-static void GREY_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
+static void GREY_I420( filter_t *, picture_t *, picture_t * );
+static void GREY_YUY2( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor.
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_out.video.i_width & 1
+ || p_filter->fmt_out.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','8','0','0'):
- p_vout->render.i_chroma = VLC_FOURCC('G','R','E','Y');
+ p_filter->fmt_in.video.i_chroma = VLC_FOURCC('G','R','E','Y');
case VLC_FOURCC('G','R','E','Y'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
- p_vout->chroma.pf_convert = GREY_I420;
+ p_filter->pf_video_filter_io = GREY_I420;
break;
case VLC_FOURCC('Y','U','Y','2'):
- p_vout->chroma.pf_convert = GREY_YUY2;
+ p_filter->pf_video_filter_io = GREY_YUY2;
break;
default:
return -1;
/*****************************************************************************
* GREY_I420: 8-bit grayscale to planar YUV 4:2:0
*****************************************************************************/
-static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void GREY_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
uint8_t *p_y = p_dest->Y_PIXELS;
const int i_dest_margin_c = p_dest->p[1].i_pitch
- p_dest->p[1].i_visible_pitch;
- for( i_y = p_vout->render.i_height / 2; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2; i_y-- ; )
{
memset(p_u, 0x80, p_dest->p[1].i_visible_pitch);
p_u += i_dest_margin_c;
p_v += i_dest_margin_c;
}
- for( i_y = p_vout->render.i_height; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8; i_x-- ; )
{
*p_y++ = *p_line++; *p_y++ = *p_line++;
*p_y++ = *p_line++; *p_y++ = *p_line++;
*p_y++ = *p_line++; *p_y++ = *p_line++;
}
- for( i_x = p_vout->render.i_width % 8; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width % 8; i_x-- ; )
{
*p_y++ = *p_line++;
}
/*****************************************************************************
* GREY_YUY2: 8-bit grayscale to packed YUY2
*****************************************************************************/
-static void GREY_YUY2( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void GREY_YUY2( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_in = p_source->p->p_pixels;
uint8_t *p_out = p_dest->p->p_pixels;
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
- for( i_y = p_vout->render.i_height; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8; i_x-- ; )
{
*p_out++ = *p_in++; *p_out++ = 0x80;
*p_out++ = *p_in++; *p_out++ = 0x80;
*p_out++ = *p_in++; *p_out++ = 0x80;
}
- for( i_x = (p_vout->render.i_width % 8) / 2; i_x-- ; )
+ for( i_x = (p_filter->fmt_out.video.i_width % 8) / 2; i_x-- ; )
{
*p_out++ = *p_in++; *p_out++ = 0x80;
*p_out++ = *p_in++; *p_out++ = 0x80;
/*****************************************************************************
* i420_rgb.c : YUV to bitmap RGB conversion module for vlc
*****************************************************************************
- * Copyright (C) 2000, 2001, 2004 the VideoLAN team
+ * Copyright (C) 2000, 2001, 2004, 2008 the VideoLAN team
* $Id$
*
* Authors: Sam Hocevar <sam@zoy.org>
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
/*****************************************************************************
* RGB2PIXEL: assemble RGB components to a pixel value, returns a uint32_t
*****************************************************************************/
-#define RGB2PIXEL( p_vout, i_r, i_g, i_b ) \
- (((((uint32_t)i_r) >> p_vout->output.i_rrshift) \
- << p_vout->output.i_lrshift) \
- | ((((uint32_t)i_g) >> p_vout->output.i_rgshift) \
- << p_vout->output.i_lgshift) \
- | ((((uint32_t)i_b) >> p_vout->output.i_rbshift) \
- << p_vout->output.i_lbshift))
+#define RGB2PIXEL( p_filter, i_r, i_g, i_b ) \
+ (((((uint32_t)i_r) >> p_filter->fmt_out.video.i_rrshift) \
+ << p_filter->fmt_out.video.i_lrshift) \
+ | ((((uint32_t)i_g) >> p_filter->fmt_out.video.i_rgshift) \
+ << p_filter->fmt_out.video.i_lgshift) \
+ | ((((uint32_t)i_b) >> p_filter->fmt_out.video.i_rbshift) \
+ << p_filter->fmt_out.video.i_lbshift))
/*****************************************************************************
* Local and extern prototypes.
#if defined (MODULE_NAME_IS_i420_rgb)
static void SetGammaTable ( int *pi_table, double f_gamma );
-static void SetYUV ( vout_thread_t * );
-static void Set8bppPalette ( vout_thread_t *, uint8_t * );
+static void SetYUV ( filter_t * );
+static void Set8bppPalette ( filter_t *, uint8_t * );
#endif
/*****************************************************************************
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
#if defined (MODULE_NAME_IS_i420_rgb)
size_t i_tables_size;
#endif
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_out.video.i_width & 1
+ || p_filter->fmt_out.video.i_height & 1 )
{
- return -1;
+ return VLC_EGENERIC;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
#if defined (MODULE_NAME_IS_i420_rgb)
case VLC_FOURCC('R','G','B','2'):
- p_vout->chroma.pf_convert = I420_RGB8;
+ p_filter->pf_video_filter_io = I420_RGB8;
break;
#endif
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
#if ! defined (MODULE_NAME_IS_i420_rgb)
/* If we don't have support for the bitmasks, bail out */
- if( ( p_vout->output.i_rmask == 0x7c00
- && p_vout->output.i_gmask == 0x03e0
- && p_vout->output.i_bmask == 0x001f ) )
+ if( ( p_filter->fmt_out.video.i_rmask == 0x7c00
+ && p_filter->fmt_out.video.i_gmask == 0x03e0
+ && p_filter->fmt_out.video.i_bmask == 0x001f ) )
{
/* R5G5B6 pixel format */
msg_Dbg(p_this, "RGB pixel format is R5G5B5");
- p_vout->chroma.pf_convert = I420_R5G5B5;
+ p_filter->pf_video_filter_io = I420_R5G5B5;
}
- else if( ( p_vout->output.i_rmask == 0xf800
- && p_vout->output.i_gmask == 0x07e0
- && p_vout->output.i_bmask == 0x001f ) )
+ else if( ( p_filter->fmt_out.video.i_rmask == 0xf800
+ && p_filter->fmt_out.video.i_gmask == 0x07e0
+ && p_filter->fmt_out.video.i_bmask == 0x001f ) )
{
/* R5G6B5 pixel format */
msg_Dbg(p_this, "RGB pixel format is R5G6B5");
- p_vout->chroma.pf_convert = I420_R5G6B5;
+ p_filter->pf_video_filter_io = I420_R5G6B5;
}
else
- return -1;
+ return VLC_EGENERIC;
#else
// generic C chroma converter */
- p_vout->chroma.pf_convert = I420_RGB16;
+ p_filter->pf_video_filter_io = I420_RGB16;
#endif
break;
case VLC_FOURCC('R','V','3','2'):
#if ! defined (MODULE_NAME_IS_i420_rgb)
/* If we don't have support for the bitmasks, bail out */
- if( p_vout->output.i_rmask == 0x00ff0000
- && p_vout->output.i_gmask == 0x0000ff00
- && p_vout->output.i_bmask == 0x000000ff )
+ if( p_filter->fmt_out.video.i_rmask == 0x00ff0000
+ && p_filter->fmt_out.video.i_gmask == 0x0000ff00
+ && p_filter->fmt_out.video.i_bmask == 0x000000ff )
{
/* A8R8G8B8 pixel format */
msg_Dbg(p_this, "RGB pixel format is A8R8G8B8");
- p_vout->chroma.pf_convert = I420_A8R8G8B8;
+ p_filter->pf_video_filter_io = I420_A8R8G8B8;
}
- else if( p_vout->output.i_rmask == 0xff000000
- && p_vout->output.i_gmask == 0x00ff0000
- && p_vout->output.i_bmask == 0x0000ff00 )
+ else if( p_filter->fmt_out.video.i_rmask == 0xff000000
+ && p_filter->fmt_out.video.i_gmask == 0x00ff0000
+ && p_filter->fmt_out.video.i_bmask == 0x0000ff00 )
{
/* R8G8B8A8 pixel format */
msg_Dbg(p_this, "RGB pixel format is R8G8B8A8");
- p_vout->chroma.pf_convert = I420_R8G8B8A8;
+ p_filter->pf_video_filter_io = I420_R8G8B8A8;
}
- else if( p_vout->output.i_rmask == 0x0000ff00
- && p_vout->output.i_gmask == 0x00ff0000
- && p_vout->output.i_bmask == 0xff000000 )
+ else if( p_filter->fmt_out.video.i_rmask == 0x0000ff00
+ && p_filter->fmt_out.video.i_gmask == 0x00ff0000
+ && p_filter->fmt_out.video.i_bmask == 0xff000000 )
{
/* B8G8R8A8 pixel format */
msg_Dbg(p_this, "RGB pixel format is B8G8R8A8");
- p_vout->chroma.pf_convert = I420_B8G8R8A8;
+ p_filter->pf_video_filter_io = I420_B8G8R8A8;
}
- else if( p_vout->output.i_rmask == 0x000000ff
- && p_vout->output.i_gmask == 0x0000ff00
- && p_vout->output.i_bmask == 0x00ff0000 )
+ else if( p_filter->fmt_out.video.i_rmask == 0x000000ff
+ && p_filter->fmt_out.video.i_gmask == 0x0000ff00
+ && p_filter->fmt_out.video.i_bmask == 0x00ff0000 )
{
/* A8B8G8R8 pixel format */
msg_Dbg(p_this, "RGB pixel format is A8B8G8R8");
- p_vout->chroma.pf_convert = I420_A8B8G8R8;
+ p_filter->pf_video_filter_io = I420_A8B8G8R8;
}
else
- return -1;
+ return VLC_EGENERIC;
#else
/* generic C chroma converter */
- p_vout->chroma.pf_convert = I420_RGB32;
+ p_filter->pf_video_filter_io = I420_RGB32;
#endif
break;
default:
- return -1;
+ return VLC_EGENERIC;
}
break;
default:
- return -1;
+ return VLC_EGENERIC;
}
- p_vout->chroma.p_sys = malloc( sizeof( chroma_sys_t ) );
- if( p_vout->chroma.p_sys == NULL )
+ p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
+ if( p_filter->p_sys == NULL )
{
- return -1;
+ return VLC_EGENERIC;
}
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
#if defined (MODULE_NAME_IS_i420_rgb)
case VLC_FOURCC('R','G','B','2'):
- p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH );
+ p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH );
break;
#endif
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
- p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 2 );
+ p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 2 );
break;
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
- p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 4 );
+ p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 4 );
break;
default:
- p_vout->chroma.p_sys->p_buffer = NULL;
+ p_filter->p_sys->p_buffer = NULL;
break;
}
- if( p_vout->chroma.p_sys->p_buffer == NULL )
+ if( p_filter->p_sys->p_buffer == NULL )
{
- free( p_vout->chroma.p_sys );
- return -1;
+ free( p_filter->p_sys );
+ return VLC_EGENERIC;
}
- p_vout->chroma.p_sys->p_offset = malloc( p_vout->output.i_width
- * ( ( p_vout->output.i_chroma
+ p_filter->p_sys->p_offset = malloc( p_filter->fmt_out.video.i_width
+ * ( ( p_filter->fmt_out.video.i_chroma
== VLC_FOURCC('R','G','B','2') ) ? 2 : 1 )
* sizeof( int ) );
- if( p_vout->chroma.p_sys->p_offset == NULL )
+ if( p_filter->p_sys->p_offset == NULL )
{
- free( p_vout->chroma.p_sys->p_buffer );
- free( p_vout->chroma.p_sys );
- return -1;
+ free( p_filter->p_sys->p_buffer );
+ free( p_filter->p_sys );
+ return VLC_EGENERIC;
}
#if defined (MODULE_NAME_IS_i420_rgb)
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('R','G','B','2'):
i_tables_size = sizeof( uint8_t ) * PALETTE_TABLE_SIZE;
break;
}
- p_vout->chroma.p_sys->p_base = malloc( i_tables_size );
- if( p_vout->chroma.p_sys->p_base == NULL )
+ p_filter->p_sys->p_base = malloc( i_tables_size );
+ if( p_filter->p_sys->p_base == NULL )
{
- free( p_vout->chroma.p_sys->p_offset );
- free( p_vout->chroma.p_sys->p_buffer );
- free( p_vout->chroma.p_sys );
+ free( p_filter->p_sys->p_offset );
+ free( p_filter->p_sys->p_buffer );
+ free( p_filter->p_sys );
return -1;
}
- SetYUV( p_vout );
+ SetYUV( p_filter );
#endif
return 0;
*****************************************************************************/
static void Deactivate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
#if defined (MODULE_NAME_IS_i420_rgb)
- free( p_vout->chroma.p_sys->p_base );
+ free( p_filter->p_sys->p_base );
#endif
- free( p_vout->chroma.p_sys->p_offset );
- free( p_vout->chroma.p_sys->p_buffer );
- free( p_vout->chroma.p_sys );
+ free( p_filter->p_sys->p_offset );
+ free( p_filter->p_sys->p_buffer );
+ free( p_filter->p_sys );
}
#if defined (MODULE_NAME_IS_i420_rgb)
/*****************************************************************************
* SetYUV: compute tables and set function pointers
*****************************************************************************/
-static void SetYUV( vout_thread_t *p_vout )
+static void SetYUV( filter_t *p_filter )
{
int pi_gamma[256]; /* gamma table */
volatile int i_index; /* index in tables */
* optimization bug */
/* Build gamma table */
- SetGammaTable( pi_gamma, p_vout->f_gamma );
+ SetGammaTable( pi_gamma, 0 ); //p_filter/*FIXME wasn't used anywhere anyway*/->f_gamma );
/*
* Set pointers and build YUV tables
*/
/* Color: build red, green and blue tables */
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('R','G','B','2'):
- p_vout->chroma.p_sys->p_rgb8 = (uint8_t *)p_vout->chroma.p_sys->p_base;
- Set8bppPalette( p_vout, p_vout->chroma.p_sys->p_rgb8 );
+ p_filter->p_sys->p_rgb8 = (uint8_t *)p_filter->p_sys->p_base;
+ Set8bppPalette( p_filter, p_filter->p_sys->p_rgb8 );
break;
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
- p_vout->chroma.p_sys->p_rgb16 = (uint16_t *)p_vout->chroma.p_sys->p_base;
+ p_filter->p_sys->p_rgb16 = (uint16_t *)p_filter->p_sys->p_base;
for( i_index = 0; i_index < RED_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb16[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_vout, pi_gamma[0], 0, 0 );
- p_vout->chroma.p_sys->p_rgb16[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, pi_gamma[255], 0, 0 );
+ p_filter->p_sys->p_rgb16[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_filter, pi_gamma[0], 0, 0 );
+ p_filter->p_sys->p_rgb16[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, pi_gamma[255], 0, 0 );
}
for( i_index = 0; i_index < GREEN_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[0], 0 );
- p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[255], 0 );
+ p_filter->p_sys->p_rgb16[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[0], 0 );
+ p_filter->p_sys->p_rgb16[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[255], 0 );
}
for( i_index = 0; i_index < BLUE_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[0] );
- p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[255] );
+ p_filter->p_sys->p_rgb16[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[0] );
+ p_filter->p_sys->p_rgb16[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[255] );
}
for( i_index = 0; i_index < 256; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb16[RED_OFFSET + i_index] = RGB2PIXEL( p_vout, pi_gamma[ i_index ], 0, 0 );
- p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[ i_index ], 0 );
- p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[ i_index ] );
+ p_filter->p_sys->p_rgb16[RED_OFFSET + i_index] = RGB2PIXEL( p_filter, pi_gamma[ i_index ], 0, 0 );
+ p_filter->p_sys->p_rgb16[GREEN_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[ i_index ], 0 );
+ p_filter->p_sys->p_rgb16[BLUE_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[ i_index ] );
}
break;
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
- p_vout->chroma.p_sys->p_rgb32 = (uint32_t *)p_vout->chroma.p_sys->p_base;
+ p_filter->p_sys->p_rgb32 = (uint32_t *)p_filter->p_sys->p_base;
for( i_index = 0; i_index < RED_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb32[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_vout, pi_gamma[0], 0, 0 );
- p_vout->chroma.p_sys->p_rgb32[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, pi_gamma[255], 0, 0 );
+ p_filter->p_sys->p_rgb32[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_filter, pi_gamma[0], 0, 0 );
+ p_filter->p_sys->p_rgb32[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, pi_gamma[255], 0, 0 );
}
for( i_index = 0; i_index < GREEN_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[0], 0 );
- p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[255], 0 );
+ p_filter->p_sys->p_rgb32[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[0], 0 );
+ p_filter->p_sys->p_rgb32[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[255], 0 );
}
for( i_index = 0; i_index < BLUE_MARGIN; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[0] );
- p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[255] );
+ p_filter->p_sys->p_rgb32[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[0] );
+ p_filter->p_sys->p_rgb32[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[255] );
}
for( i_index = 0; i_index < 256; i_index++ )
{
- p_vout->chroma.p_sys->p_rgb32[RED_OFFSET + i_index] = RGB2PIXEL( p_vout, pi_gamma[ i_index ], 0, 0 );
- p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[ i_index ], 0 );
- p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[ i_index ] );
+ p_filter->p_sys->p_rgb32[RED_OFFSET + i_index] = RGB2PIXEL( p_filter, pi_gamma[ i_index ], 0, 0 );
+ p_filter->p_sys->p_rgb32[GREEN_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[ i_index ], 0 );
+ p_filter->p_sys->p_rgb32[BLUE_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[ i_index ] );
}
break;
}
}
-static void Set8bppPalette( vout_thread_t *p_vout, uint8_t *p_rgb8 )
+static void Set8bppPalette( filter_t *p_filter, uint8_t *p_rgb8 )
{
#define CLIP( x ) ( ((x < 0) ? 0 : (x > 255) ? 255 : x) << 8 )
int y,u,v;
int r,g,b;
int i = 0, j = 0;
- uint16_t *p_cmap_r=p_vout->chroma.p_sys->p_rgb_r;
- uint16_t *p_cmap_g=p_vout->chroma.p_sys->p_rgb_g;
- uint16_t *p_cmap_b=p_vout->chroma.p_sys->p_rgb_b;
+ uint16_t *p_cmap_r = p_filter->p_sys->p_rgb_r;
+ uint16_t *p_cmap_g = p_filter->p_sys->p_rgb_g;
+ uint16_t *p_cmap_b = p_filter->p_sys->p_rgb_b;
unsigned char p_lookup[PALETTE_TABLE_SIZE];
* fscked up my code */
if( j == 256 )
{
- msg_Err( p_vout, "no colors left in palette" );
+ msg_Err( p_filter, "no colors left in palette" );
break;
}
}
/* The colors have been allocated, we can set the palette */
- p_vout->output.pf_setpalette( p_vout, p_cmap_r, p_cmap_g, p_cmap_b );
+ /* FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME
+ p_filter->fmt_out.video.pf_setpalette( p_filter, p_cmap_r, p_cmap_g, p_cmap_b );*/
#if 0
/* There will eventually be a way to know which colors
#define CMAP_RGB2_SIZE 256
/**
- * chroma_sys_t: chroma method descriptor
+ * filter_sys_t: chroma method descriptor
* This structure is part of the chroma transformation descriptor, it
* describes the yuv2rgb specific properties.
*/
-struct chroma_sys_t
+struct filter_sys_t
{
uint8_t *p_buffer;
int *p_offset;
* Prototypes
*****************************************************************************/
#ifdef MODULE_NAME_IS_i420_rgb
-void I420_RGB8 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_RGB16_dither ( vout_thread_t *, picture_t *, picture_t * );
-void I420_RGB16 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_RGB32 ( vout_thread_t *, picture_t *, picture_t * );
+void I420_RGB8 ( filter_t *, picture_t *, picture_t * );
+void I420_RGB16_dither ( filter_t *, picture_t *, picture_t * );
+void I420_RGB16 ( filter_t *, picture_t *, picture_t * );
+void I420_RGB32 ( filter_t *, picture_t *, picture_t * );
#else // if defined(MODULE_NAME_IS_i420_rgb_mmx)
-void I420_R5G5B5 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_R5G6B5 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_A8R8G8B8 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_R8G8B8A8 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_B8G8R8A8 ( vout_thread_t *, picture_t *, picture_t * );
-void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
+void I420_R5G5B5 ( filter_t *, picture_t *, picture_t * );
+void I420_R5G6B5 ( filter_t *, picture_t *, picture_t * );
+void I420_A8R8G8B8 ( filter_t *, picture_t *, picture_t * );
+void I420_R8G8B8A8 ( filter_t *, picture_t *, picture_t * );
+void I420_B8G8R8A8 ( filter_t *, picture_t *, picture_t * );
+void I420_A8B8G8R8 ( filter_t *, picture_t *, picture_t * );
#endif
/*****************************************************************************
* Rewind buffer and offset, then copy and scale line */ \
p_buffer = p_buffer_start; \
p_offset = p_offset_start; \
- for( i_x = p_vout->output.i_width / 16; i_x--; ) \
+ for( i_x = p_filter->fmt_out.video.i_width / 16; i_x--; ) \
{ \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
} \
- for( i_x = p_vout->output.i_width & 15; i_x--; ) \
+ for( i_x = p_filter->fmt_out.video.i_width & 15; i_x--; ) \
{ \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
} \
{ \
/* Horizontal scaling - we can't use a buffer due to dithering */ \
p_offset = p_offset_start; \
- for( i_x = p_vout->output.i_width / 16; i_x--; ) \
+ for( i_x = p_filter->fmt_out.video.i_width / 16; i_x--; ) \
{ \
CONVERT_4YUV_PIXEL_SCALE( CHROMA ) \
CONVERT_4YUV_PIXEL_SCALE( CHROMA ) \
} \
else \
{ \
- for( i_x = p_vout->render.i_width / 16; i_x--; ) \
+ for( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; ) \
{ \
CONVERT_4YUV_PIXEL( CHROMA ) \
CONVERT_4YUV_PIXEL( CHROMA ) \
switch( i_vscale ) \
{ \
case -1: /* vertical scaling factor is < 1 */ \
- while( (i_scale_count -= p_vout->output.i_height) > 0 ) \
+ while( (i_scale_count -= p_filter->fmt_out.video.i_height) > 0 ) \
{ \
/* Height reduction: skip next source line */ \
- p_y += p_vout->render.i_width; \
+ p_y += p_filter->fmt_in.video.i_width; \
i_y++; \
if( (CHROMA == 420) || (CHROMA == 422) ) \
{ \
} \
else if( CHROMA == 444 ) \
{ \
- p_u += p_vout->render.i_width; \
- p_v += p_vout->render.i_width; \
+ p_u += p_filter->fmt_in.video.i_width; \
+ p_v += p_filter->fmt_in.video.i_width; \
} \
} \
- i_scale_count += p_vout->render.i_height; \
+ i_scale_count += p_filter->fmt_in.video.i_height; \
break; \
case 1: /* vertical scaling factor is > 1 */ \
- while( (i_scale_count -= p_vout->render.i_height) > 0 ) \
+ while( (i_scale_count -= p_filter->fmt_in.video.i_height) > 0 ) \
{ \
/* Height increment: copy previous picture line */ \
- vlc_memcpy( p_pic, p_pic_start, p_vout->output.i_width * BPP ); \
+ vlc_memcpy( p_pic, p_pic_start, p_filter->fmt_out.video.i_width * BPP ); \
p_pic = (void*)((uint8_t*)p_pic + p_dest->p->i_pitch ); \
} \
- i_scale_count += p_vout->output.i_height; \
+ i_scale_count += p_filter->fmt_out.video.i_height; \
break; \
} \
switch( i_vscale ) \
{ \
case -1: /* vertical scaling factor is < 1 */ \
- while( (i_scale_count -= p_vout->output.i_height) > 0 ) \
+ while( (i_scale_count -= p_filter->fmt_out.video.i_height) > 0 ) \
{ \
/* Height reduction: skip next source line */ \
- p_y += p_vout->render.i_width; \
+ p_y += p_filter->fmt_in.video.i_width; \
i_y++; \
if( (CHROMA == 420) || (CHROMA == 422) ) \
{ \
} \
else if( CHROMA == 444 ) \
{ \
- p_u += p_vout->render.i_width; \
- p_v += p_vout->render.i_width; \
+ p_u += p_filter->fmt_in.video.i_width; \
+ p_v += p_filter->fmt_in.video.i_width; \
} \
} \
- i_scale_count += p_vout->render.i_height; \
+ i_scale_count += p_filter->fmt_in.video.i_height; \
break; \
case 1: /* vertical scaling factor is > 1 */ \
- while( (i_scale_count -= p_vout->render.i_height) > 0 ) \
+ while( (i_scale_count -= p_filter->fmt_in.video.i_height) > 0 ) \
{ \
- p_y -= p_vout->render.i_width; \
+ p_y -= p_filter->fmt_in.video.i_width; \
p_u -= i_chroma_width; \
p_v -= i_chroma_width; \
SCALE_WIDTH_DITHER( CHROMA ); \
} \
- i_scale_count += p_vout->output.i_height; \
+ i_scale_count += p_filter->fmt_out.video.i_height; \
break; \
} \
# include "config.h"
#endif
-#include <vlc_common.h>
+#include <vlc/vlc.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
* - input: 2 lines (2 Y lines, 1 U/V line)
* - output: 1 line
*****************************************************************************/
-void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
- picture_t *p_dest )
+void I420_RGB16_dither( filter_t *p_filter, picture_t *p_src,
+ picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
- uint16_t * p_yuv = p_vout->chroma.p_sys->p_rgb16;
- uint16_t * p_ybase; /* Y dependent conversion table */
+ uint16_t * p_yuv = p_filter->p_sys->p_rgb16;
+ uint16_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
- uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
+ uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
for(i_x = 0; i_x < 4; i_x++)
{
- dither10[i_x] = dither10[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
- dither11[i_x] = dither11[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
- dither12[i_x] = dither12[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
- dither13[i_x] = dither13[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
+ dither10[i_x] = dither10[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
+ dither11[i_x] = dither11[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
+ dither12[i_x] = dither12[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
+ dither13[i_x] = dither13[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
}
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
i_real_y = i_y & 0x3;
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
int *p_dither = dither10;
CONVERT_YUV_PIXEL_DITHER(2);
#if defined (MODULE_NAME_IS_i420_rgb)
-void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
- picture_t *p_dest )
+void I420_RGB16( filter_t *p_filter, picture_t *p_src,
+ picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
- uint16_t * p_yuv = p_vout->chroma.p_sys->p_rgb16;
- uint16_t * p_ybase; /* Y dependent conversion table */
+ uint16_t * p_yuv = p_filter->p_sys->p_rgb16;
+ uint16_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
- uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
+ uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
CONVERT_YUV_PIXEL(2); CONVERT_Y_PIXEL(2);
CONVERT_YUV_PIXEL(2); CONVERT_Y_PIXEL(2);
#else // ! defined (MODULE_NAME_IS_i420_rgb)
-void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
- picture_t *p_dest )
+void I420_R5G5B5( filter_t *p_filter, picture_t *p_src,
+ picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
+ uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width/16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width/16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_UNALIGNED
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
#endif
}
-void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
- picture_t *p_dest )
+void I420_R5G6B5( filter_t *p_filter, picture_t *p_src,
+ picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
+ uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width/16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width/16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL(
SSE2_INIT_16_UNALIGNED
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
#if defined (MODULE_NAME_IS_i420_rgb)
-void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
- picture_t *p_dest )
+void I420_RGB32( filter_t *p_filter, picture_t *p_src,
+ picture_t *p_dest )
{
/* We got this one from the old arguments */
uint32_t *p_pic = (uint32_t*)p_dest->p->p_pixels;
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
- uint32_t * p_yuv = p_vout->chroma.p_sys->p_rgb32;
- uint32_t * p_ybase; /* Y dependent conversion table */
+ uint32_t * p_yuv = p_filter->p_sys->p_rgb32;
+ uint32_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
- uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
+ uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
CONVERT_YUV_PIXEL(4); CONVERT_Y_PIXEL(4);
CONVERT_YUV_PIXEL(4); CONVERT_Y_PIXEL(4);
#else // defined (MODULE_NAME_IS_i420_rgb_mmx) || defined (MODULE_NAME_IS_i420_rgb_sse2)
-void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
+void I420_A8R8G8B8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
+ uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
#endif
}
-void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
+void I420_R8G8B8A8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
+ uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
#endif
}
-void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
+void I420_B8G8R8A8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
+ uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
#else
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
#endif
}
-void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
+void I420_A8B8G8R8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
- int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
+ int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
- uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
+ uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
- if( p_vout->render.i_width & 15 )
+ if( p_filter->fmt_in.video.i_width & 15 )
{
- i_rewind = 16 - ( p_vout->render.i_width & 15 );
+ i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 16; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
#else
- if( p_vout->render.i_width & 7 )
+ if( p_filter->fmt_in.video.i_width & 7 )
{
- i_rewind = 8 - ( p_vout->render.i_width & 7 );
+ i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
- for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
- for ( i_x = p_vout->render.i_width / 8; i_x--; )
+ for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
# include "config.h"
#endif
-#include <vlc_common.h>
+#include <vlc/vlc.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
/*****************************************************************************
* I420_RGB8: color YUV 4:2:0 to RGB 8 bpp
*****************************************************************************/
-void I420_RGB8( vout_thread_t *p_vout, picture_t *p_src, picture_t *p_dest )
+void I420_RGB8( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
{
/* We got this one from the old arguments */
uint8_t *p_pic = (uint8_t*)p_dest->p->p_pixels;
unsigned int i_real_y; /* y % 4 */
int i_right_margin;
int i_scale_count; /* scale modulo counter */
- unsigned int i_chroma_width = p_vout->render.i_width / 2;/* chroma width */
+ unsigned int i_chroma_width = p_filter->fmt_in.video.i_width / 2;/* chroma width */
/* Lookup table */
- uint8_t * p_lookup = p_vout->chroma.p_sys->p_base;
+ uint8_t * p_lookup = p_filter->p_sys->p_base;
/* Offset array pointer */
- int * p_offset_start = p_vout->chroma.p_sys->p_offset;
+ int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
static int dither22[4] = { 0x6, 0x16, 0x2, 0x12 };
static int dither23[4] = { 0x1e, 0xe, 0x1a, 0xa };
- SetOffset( p_vout->render.i_width, p_vout->render.i_height,
- p_vout->output.i_width, p_vout->output.i_height,
+ SetOffset( p_filter->fmt_in.video.i_width,
+ p_filter->fmt_in.video.i_height,
+ p_filter->fmt_out.video.i_width,
+ p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
- p_vout->output.i_height : p_vout->render.i_height;
- for( i_y = 0, i_real_y = 0; i_y < p_vout->render.i_height; i_y++ )
+ p_filter->fmt_out.video.i_height :
+ p_filter->fmt_in.video.i_height;
+ for( i_y = 0, i_real_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
/* Do horizontal and vertical scaling */
SCALE_WIDTH_DITHER( 420 );
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "I420,IYUV,YV12"
* Local and extern prototypes.
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void I420_YMGA ( vout_thread_t *, picture_t *, picture_t * );
+static void I420_YMGA ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','M','G','A'):
- p_vout->chroma.pf_convert = I420_YMGA;
+ p_filter->pf_video_filter_io = I420_YMGA;
break;
default:
/*****************************************************************************
* I420_YMGA: planar YUV 4:2:0 to Matrox's planar/packed YUV 4:2:0
*****************************************************************************/
-static void I420_YMGA( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_YMGA( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_uv = p_dest->U_PIXELS;
uint8_t *p_u = p_source->U_PIXELS;
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#if defined (MODULE_NAME_IS_i420_yuy2_altivec) && defined(HAVE_ALTIVEC_H)
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void I420_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
-static void I420_YVYU ( vout_thread_t *, picture_t *, picture_t * );
-static void I420_UYVY ( vout_thread_t *, picture_t *, picture_t * );
+static void I420_YUY2 ( filter_t *, picture_t *, picture_t * );
+static void I420_YVYU ( filter_t *, picture_t *, picture_t * );
+static void I420_UYVY ( filter_t *, picture_t *, picture_t * );
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
-static void I420_IUYV ( vout_thread_t *, picture_t *, picture_t * );
-static void I420_cyuv ( vout_thread_t *, picture_t *, picture_t * );
+static void I420_IUYV ( filter_t *, picture_t *, picture_t * );
+static void I420_cyuv ( filter_t *, picture_t *, picture_t * );
#endif
#if defined (MODULE_NAME_IS_i420_yuy2)
-static void I420_Y211 ( vout_thread_t *, picture_t *, picture_t * );
+static void I420_Y211 ( filter_t *, picture_t *, picture_t * );
#endif
#ifdef MODULE_NAME_IS_i420_yuy2_mmx
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
- p_vout->chroma.pf_convert = I420_YUY2;
+ p_filter->pf_video_filter_io = I420_YUY2;
break;
case VLC_FOURCC('Y','V','Y','U'):
- p_vout->chroma.pf_convert = I420_YVYU;
+ p_filter->pf_video_filter_io = I420_YVYU;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
- p_vout->chroma.pf_convert = I420_UYVY;
+ p_filter->pf_video_filter_io = I420_UYVY;
break;
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
case VLC_FOURCC('I','U','Y','V'):
- p_vout->chroma.pf_convert = I420_IUYV;
+ p_filter->pf_video_filter_io = I420_IUYV;
break;
case VLC_FOURCC('c','y','u','v'):
- p_vout->chroma.pf_convert = I420_cyuv;
+ p_filter->pf_video_filter_io = I420_cyuv;
break;
#endif
#if defined (MODULE_NAME_IS_i420_yuy2)
case VLC_FOURCC('Y','2','1','1'):
- p_vout->chroma.pf_convert = I420_Y211;
+ p_filter->pf_video_filter_io = I420_Y211;
break;
#endif
/*****************************************************************************
* I420_YUY2: planar YUV 4:2:0 to packed YUYV 4:2:2
*****************************************************************************/
-static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_YUY2( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
vector unsigned char uv_vec;
vector unsigned char y_vec;
- if( !( ( p_vout->render.i_width % 32 ) |
- ( p_vout->render.i_height % 2 ) ) )
+ if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
+ ( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
}
}
}
- else if( !( ( p_vout->render.i_width % 16 ) |
- ( p_vout->render.i_height % 4 ) ) )
+ else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
+ ( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
- for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_fiter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y2 += p_source->p[Y_PLANE].i_pitch;
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
- for( i_x = p_vout->render.i_width / 8; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8; i_x-- ; )
{
C_YUV420_YUYV( );
C_YUV420_YUYV( );
C_YUV420_YUYV( );
}
#else
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
MMX_CALL( MMX_YUV420_YUYV );
}
#endif
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
/*****************************************************************************
* I420_YVYU: planar YUV 4:2:0 to packed YVYU 4:2:2
*****************************************************************************/
-static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_YVYU( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
vector unsigned char vu_vec;
vector unsigned char y_vec;
- if( !( ( p_vout->render.i_width % 32 ) |
- ( p_vout->render.i_height % 2 ) ) )
+ if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
+ ( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
}
}
}
- else if( !( ( p_vout->render.i_width % 16 ) |
- ( p_vout->render.i_height % 4 ) ) )
+ else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
+ ( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
- for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_YVYU( );
MMX_CALL( MMX_YUV420_YVYU );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
/*****************************************************************************
* I420_UYVY: planar YUV 4:2:0 to packed UYVY 4:2:2
*****************************************************************************/
-static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_UYVY( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
vector unsigned char uv_vec;
vector unsigned char y_vec;
- if( !( ( p_vout->render.i_width % 32 ) |
- ( p_vout->render.i_height % 2 ) ) )
+ if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
+ ( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
}
}
}
- else if( !( ( p_vout->render.i_width % 16 ) |
- ( p_vout->render.i_height % 4 ) ) )
+ else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
+ ( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
- for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
- for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
MMX_CALL( MMX_YUV420_UYVY );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x--; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x--; )
{
C_YUV420_UYVY( );
}
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
/*****************************************************************************
* I420_IUYV: planar YUV 4:2:0 to interleaved packed UYVY 4:2:2
*****************************************************************************/
-static void I420_IUYV( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_IUYV( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
VLC_UNUSED(p_source); VLC_UNUSED(p_dest);
/* FIXME: TODO ! */
- msg_Err( p_vout, "I420_IUYV unimplemented, please harass <sam@zoy.org>" );
+ msg_Err( p_filter, "I420_IUYV unimplemented, please harass <sam@zoy.org>" );
}
/*****************************************************************************
* I420_cyuv: planar YUV 4:2:0 to upside-down packed UYVY 4:2:2
*****************************************************************************/
-static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_cyuv( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line1 = p_dest->p->p_pixels +
p_dest->p->i_visible_lines * p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 -= 3 * p_dest->p->i_pitch;
p_line2 -= 3 * p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
MMX_CALL( MMX_YUV420_UYVY );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
else
{
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
* I420_Y211: planar YUV 4:2:0 to packed YUYV 2:1:1
*****************************************************************************/
#if defined (MODULE_NAME_IS_i420_yuy2)
-static void I420_Y211( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I420_Y211( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
- for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
C_YUV420_Y211( );
C_YUV420_Y211( );
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "I422,J422"
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void I422_I420( vout_thread_t *, picture_t *, picture_t * );
-static void I422_YV12( vout_thread_t *, picture_t *, picture_t * );
-static void I422_YUVA( vout_thread_t *, picture_t *, picture_t * );
+static void I422_I420( filter_t *, picture_t *, picture_t * );
+static void I422_YV12( filter_t *, picture_t *, picture_t * );
+static void I422_YUVA( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
case VLC_FOURCC('J','4','2','2'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
case VLC_FOURCC('J','4','2','0'):
- p_vout->chroma.pf_convert = I422_I420;
+ p_filter->pf_video_filter_io = I422_I420;
break;
case VLC_FOURCC('Y','V','1','2'):
- p_vout->chroma.pf_convert = I422_YV12;
+ p_filter->pf_video_filter_io = I422_YV12;
break;
case VLC_FOURCC('Y','U','V','A'):
- p_vout->chroma.pf_convert = I422_YUVA;
+ p_filter->pf_video_filter_io = I422_YUVA;
break;
default:
/*****************************************************************************
* I422_I420: planar YUV 4:2:2 to planar I420 4:2:0 Y:U:V
*****************************************************************************/
-static void I422_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint16_t i_dpy = p_dest->p[Y_PLANE].i_pitch;
uint16_t i_spy = p_source->p[Y_PLANE].i_pitch;
uint16_t i_dpuv = p_dest->p[U_PLANE].i_pitch;
uint16_t i_spuv = p_source->p[U_PLANE].i_pitch;
- uint16_t i_width = p_vout->render.i_width;
- uint16_t i_y = p_vout->render.i_height;
+ uint16_t i_width = p_filter->fmt_in.video.i_width;
+ uint16_t i_y = p_filter->fmt_in.video.i_height;
uint8_t *p_dy = p_dest->Y_PIXELS + (i_y-1)*i_dpy;
uint8_t *p_y = p_source->Y_PIXELS + (i_y-1)*i_spy;
uint8_t *p_du = p_dest->U_PIXELS + (i_y/2-1)*i_dpuv;
/*****************************************************************************
* I422_YV12: planar YUV 4:2:2 to planar YV12 4:2:0 Y:V:U
*****************************************************************************/
-static void I422_YV12( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_YV12( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint16_t i_dpy = p_dest->p[Y_PLANE].i_pitch;
uint16_t i_spy = p_source->p[Y_PLANE].i_pitch;
uint16_t i_dpuv = p_dest->p[U_PLANE].i_pitch;
uint16_t i_spuv = p_source->p[U_PLANE].i_pitch;
- uint16_t i_width = p_vout->render.i_width;
- uint16_t i_y = p_vout->render.i_height;
+ uint16_t i_width = p_filter->fmt_in.video.i_width;
+ uint16_t i_y = p_filter->fmt_in.video.i_height;
uint8_t *p_dy = p_dest->Y_PIXELS + (i_y-1)*i_dpy;
uint8_t *p_y = p_source->Y_PIXELS + (i_y-1)*i_spy;
uint8_t *p_du = p_dest->V_PIXELS + (i_y/2-1)*i_dpuv; /* U and V are swapped */
/*****************************************************************************
* I422_YUVA: planar YUV 4:2:2 to planar YUVA 4:2:0:4 Y:U:V:A
*****************************************************************************/
-static void I422_YUVA( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_YUVA( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
- I422_I420( p_vout, p_source, p_dest );
+ I422_I420( p_filter, p_source, p_dest );
vlc_memset( p_dest->p[A_PLANE].p_pixels, 0xff,
p_dest->p[A_PLANE].i_lines * p_dest->p[A_PLANE].i_pitch );
}
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i422_yuy2.h"
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void I422_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
-static void I422_YVYU ( vout_thread_t *, picture_t *, picture_t * );
-static void I422_UYVY ( vout_thread_t *, picture_t *, picture_t * );
-static void I422_IUYV ( vout_thread_t *, picture_t *, picture_t * );
-static void I422_cyuv ( vout_thread_t *, picture_t *, picture_t * );
+static void I422_YUY2 ( filter_t *, picture_t *, picture_t * );
+static void I422_YVYU ( filter_t *, picture_t *, picture_t * );
+static void I422_UYVY ( filter_t *, picture_t *, picture_t * );
+static void I422_IUYV ( filter_t *, picture_t *, picture_t * );
+static void I422_cyuv ( filter_t *, picture_t *, picture_t * );
#if defined (MODULE_NAME_IS_i422_yuy2)
-static void I422_Y211 ( vout_thread_t *, picture_t *, picture_t * );
-static void I422_Y211 ( vout_thread_t *, picture_t *, picture_t * );
+static void I422_Y211 ( filter_t *, picture_t *, picture_t * );
+static void I422_Y211 ( filter_t *, picture_t *, picture_t * );
#endif
/*****************************************************************************
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
- p_vout->chroma.pf_convert = I422_YUY2;
+ p_filter->pf_video_filter_io = I422_YUY2;
break;
case VLC_FOURCC('Y','V','Y','U'):
- p_vout->chroma.pf_convert = I422_YVYU;
+ p_filter->pf_video_filter_io = I422_YVYU;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
- p_vout->chroma.pf_convert = I422_UYVY;
+ p_filter->pf_video_filter_io = I422_UYVY;
break;
case VLC_FOURCC('I','U','Y','V'):
- p_vout->chroma.pf_convert = I422_IUYV;
+ p_filter->pf_video_filter_io = I422_IUYV;
break;
case VLC_FOURCC('c','y','u','v'):
- p_vout->chroma.pf_convert = I422_cyuv;
+ p_filter->pf_video_filter_io = I422_cyuv;
break;
#if defined (MODULE_NAME_IS_i422_yuy2)
case VLC_FOURCC('Y','2','1','1'):
- p_vout->chroma.pf_convert = I422_Y211;
+ p_filter->pf_video_filter_io = I422_Y211;
break;
#endif
/*****************************************************************************
* I422_YUY2: planar YUV 4:2:2 to packed YUY2 4:2:2
*****************************************************************************/
-static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_YUY2( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YUYV_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
}
else {
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YUYV_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
#else
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
MMX_CALL( MMX_YUV422_YUYV );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* I422_YVYU: planar YUV 4:2:2 to packed YVYU 4:2:2
*****************************************************************************/
-static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_YVYU( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YVYU_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
}
else {
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YVYU_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
#else
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
MMX_CALL( MMX_YUV422_YVYU );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* I422_UYVY: planar YUV 4:2:2 to packed UYVY 4:2:2
*****************************************************************************/
-static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_UYVY( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
}
else {
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
#else
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
MMX_CALL( MMX_YUV422_UYVY );
#endif
}
- for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* I422_IUYV: planar YUV 4:2:2 to interleaved packed IUYV 4:2:2
*****************************************************************************/
-static void I422_IUYV( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_IUYV( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
VLC_UNUSED(p_source); VLC_UNUSED(p_dest);
/* FIXME: TODO ! */
- msg_Err( p_vout, "I422_IUYV unimplemented, please harass <sam@zoy.org>" );
+ msg_Err( p_filter, "I422_IUYV unimplemented, please harass <sam@zoy.org>" );
}
/*****************************************************************************
* I422_cyuv: planar YUV 4:2:2 to upside-down packed UYVY 4:2:2
*****************************************************************************/
-static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_cyuv( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels + p_dest->p->i_visible_lines * p_dest->p->i_pitch;
uint8_t *p_y = p_source->Y_PIXELS;
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_ALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
}
else {
/* use slower SSE2 unaligned fetch and store */
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
- for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_UNALIGNED );
}
- for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
#else
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
* I422_Y211: planar YUV 4:2:2 to packed YUYV 2:1:1
*****************************************************************************/
#if defined (MODULE_NAME_IS_i422_yuy2)
-static void I422_Y211( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void I422_Y211( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels + p_dest->p->i_visible_lines * p_dest->p->i_pitch;
uint8_t *p_y = p_source->Y_PIXELS;
int i_x, i_y;
- for( i_y = p_vout->render.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
C_YUV422_Y211( p_line, p_y, p_u, p_v );
C_YUV422_Y211( p_line, p_y, p_u, p_v );
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,cyuv"
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void YUY2_I420 ( vout_thread_t *, picture_t *, picture_t * );
-static void YVYU_I420 ( vout_thread_t *, picture_t *, picture_t * );
-static void UYVY_I420 ( vout_thread_t *, picture_t *, picture_t * );
-static void cyuv_I420 ( vout_thread_t *, picture_t *, picture_t * );
+static void YUY2_I420 ( filter_t *, picture_t *, picture_t * );
+static void YVYU_I420 ( filter_t *, picture_t *, picture_t * );
+static void UYVY_I420 ( filter_t *, picture_t *, picture_t * );
+static void cyuv_I420 ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
- p_vout->chroma.pf_convert = YUY2_I420;
+ p_filter->pf_video_filter_io = YUY2_I420;
break;
case VLC_FOURCC('Y','V','Y','U'):
- p_vout->chroma.pf_convert = YVYU_I420;
+ p_filter->pf_video_filter_io = YVYU_I420;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
- p_vout->chroma.pf_convert = UYVY_I420;
+ p_filter->pf_video_filter_io = UYVY_I420;
break;
case VLC_FOURCC('c','y','u','v'):
- p_vout->chroma.pf_convert = cyuv_I420;
+ p_filter->pf_video_filter_io = cyuv_I420;
break;
default:
/*****************************************************************************
* YUY2_I420: packed YUY2 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
-static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void YUY2_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
bool b_skip = false;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_u++ = *p_line++; \
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* YVYU_I420: packed YVYU 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
-static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void YVYU_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
bool b_skip = false;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* UYVY_I420: packed UYVY 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
-static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void UYVY_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
bool b_skip = false;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; p_line++; \
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; *p_y++ = *p_line++; \
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
* cyuv_I420: upside-down packed UYVY 4:2:2 to planar YUV 4:2:0
* FIXME
*****************************************************************************/
-static void cyuv_I420( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void cyuv_I420( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
bool b_skip = false;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
#include <vlc_common.h>
#include <vlc_plugin.h>
+#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,cyuv"
*****************************************************************************/
static int Activate ( vlc_object_t * );
-static void YUY2_I422 ( vout_thread_t *, picture_t *, picture_t * );
-static void YVYU_I422 ( vout_thread_t *, picture_t *, picture_t * );
-static void UYVY_I422 ( vout_thread_t *, picture_t *, picture_t * );
-static void cyuv_I422 ( vout_thread_t *, picture_t *, picture_t * );
+static void YUY2_I422 ( filter_t *, picture_t *, picture_t * );
+static void YVYU_I422 ( filter_t *, picture_t *, picture_t * );
+static void UYVY_I422 ( filter_t *, picture_t *, picture_t * );
+static void cyuv_I422 ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
- vout_thread_t *p_vout = (vout_thread_t *)p_this;
+ filter_t *p_filter = (filter_t *)p_this;
- if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
+ if( p_filter->fmt_in.video.i_width & 1
+ || p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
- switch( p_vout->output.i_chroma )
+ switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
- switch( p_vout->render.i_chroma )
+ switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
- p_vout->chroma.pf_convert = YUY2_I422;
+ p_filter->pf_video_filter_io = YUY2_I422;
break;
case VLC_FOURCC('Y','V','Y','U'):
- p_vout->chroma.pf_convert = YVYU_I422;
+ p_filter->pf_video_filter_io = YVYU_I422;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
- p_vout->chroma.pf_convert = UYVY_I422;
+ p_filter->pf_video_filter_io = UYVY_I422;
break;
case VLC_FOURCC('c','y','u','v'):
- p_vout->chroma.pf_convert = cyuv_I422;
+ p_filter->pf_video_filter_io = cyuv_I422;
break;
default:
/*****************************************************************************
* YUY2_I422: packed YUY2 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
-static void YUY2_I422( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void YUY2_I422( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_u++ = *p_line++; \
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* YVYU_I422: packed YVYU 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
-static void YVYU_I422( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void YVYU_I422( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
/*****************************************************************************
* UYVY_I422: packed UYVY 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
-static void UYVY_I422( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void UYVY_I422( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; *p_y++ = *p_line++; \
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
* cyuv_I422: upside-down packed UYVY 4:2:2 to planar YUV 4:2:2
* FIXME
*****************************************************************************/
-static void cyuv_I422( vout_thread_t *p_vout, picture_t *p_source,
- picture_t *p_dest )
+static void cyuv_I422( filter_t *p_filter, picture_t *p_source,
+ picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
- for( i_y = p_vout->output.i_height ; i_y-- ; )
+ for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
- for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
+ for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
- for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
+ for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
p_vout->b_direct = 0;
/* Choose the best module */
- p_vout->chroma.p_module = module_Need( p_vout, "chroma", NULL, 0 );
-
- if( p_vout->chroma.p_module == NULL )
+ p_vout->p_chroma = vlc_object_create( p_vout, VLC_OBJECT_FILTER );
+ filter_t *p_chroma = p_vout->p_chroma;
+ vlc_object_attach( p_chroma, p_vout );
+ /* TODO: Set the fmt_in and fmt_out stuff here */
+ p_chroma->fmt_in.video = p_vout->fmt_render;
+ p_chroma->fmt_out.video = p_vout->fmt_out;
+
+ /* TODO: put in a function */
+ p_chroma->fmt_out.video.i_rmask = p_vout->output.i_rmask;
+ p_chroma->fmt_out.video.i_gmask = p_vout->output.i_gmask;
+ p_chroma->fmt_out.video.i_bmask = p_vout->output.i_bmask;
+ p_chroma->fmt_out.video.i_rrshift = p_vout->output.i_rrshift;
+ p_chroma->fmt_out.video.i_lrshift = p_vout->output.i_lrshift;
+ p_chroma->fmt_out.video.i_rgshift = p_vout->output.i_rgshift;
+ p_chroma->fmt_out.video.i_lgshift = p_vout->output.i_lgshift;
+ p_chroma->fmt_out.video.i_rbshift = p_vout->output.i_rbshift;
+ p_chroma->fmt_out.video.i_lbshift = p_vout->output.i_lbshift;
+ msg_Err( p_vout, "HOLA! %4.4s\n", (char*)&p_chroma->fmt_in.video.i_chroma );
+ msg_Err( p_vout, "HOLA! %4.4s\n", (char*)&p_chroma->fmt_out.video.i_chroma );
+ p_chroma->p_module = module_Need( p_chroma, "chroma", NULL, 0 );
+
+ if( p_chroma->p_module == NULL )
{
msg_Err( p_vout, "no chroma module for %4.4s to %4.4s",
(char*)&p_vout->render.i_chroma,
(char*)&p_vout->output.i_chroma );
+ vlc_object_detach( p_vout->p_chroma );
+ p_vout->p_chroma = NULL;
p_vout->pf_end( p_vout );
vlc_mutex_unlock( &p_vout->change_lock );
return VLC_EGENERIC;
}
/* Need to reinitialise the chroma plugin */
- if( p_vout->chroma.p_module )
+ if( p_vout->p_chroma->p_module )
{
- if( p_vout->chroma.p_module->pf_deactivate )
- p_vout->chroma.p_module->pf_deactivate( VLC_OBJECT(p_vout) );
- p_vout->chroma.p_module->pf_activate( VLC_OBJECT(p_vout) );
+ if( p_vout->p_chroma->p_module->pf_deactivate )
+ p_vout->p_chroma->p_module->pf_deactivate( VLC_OBJECT(p_vout->p_chroma) );
+ p_vout->p_chroma->p_module->pf_activate( VLC_OBJECT(p_vout->p_chroma) );
}
}
if( !p_vout->b_direct )
{
- module_Unneed( p_vout, p_vout->chroma.p_module );
+ module_Unneed( p_vout->p_chroma, p_vout->p_chroma->p_module );
+ p_vout->p_chroma = NULL;
}
vlc_mutex_lock( &p_vout->picture_lock );
if( !p_vout->b_direct )
{
- module_Unneed( p_vout, p_vout->chroma.p_module );
+ module_Unneed( p_vout->p_chroma, p_vout->p_chroma->p_module );
+ p_vout->p_chroma->p_module = NULL;
}
/* Destroy all remaining pictures */
#include <vlc_common.h>
#include <vlc_vout.h>
#include <vlc_osd.h>
+#include <vlc_filter.h>
#include "vout_pictures.h"
#include <assert.h>
}
/* Convert image to the first direct buffer */
- p_vout->chroma.pf_convert( p_vout, p_pic, p_tmp_pic );
+ p_vout->p_chroma->pf_video_filter_io( p_vout->p_chroma, p_pic, p_tmp_pic );
/* Render subpictures on the first direct buffer */
spu_RenderSubpictures( p_vout->p_spu, &p_vout->fmt_out, p_tmp_pic,
return NULL;
/* Convert image to the first direct buffer */
- p_vout->chroma.pf_convert( p_vout, p_pic, &p_vout->p_picture[0] );
+ p_vout->p_chroma->pf_video_filter_io( p_vout->p_chroma, p_pic, &p_vout->p_picture[0] );
/* Render subpictures on the first direct buffer */
spu_RenderSubpictures( p_vout->p_spu, &p_vout->fmt_out,