X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Fcore%2Ftransition_composite.c;h=fdad2cfec6e1fa140aebb428621ec05108baf045;hb=bf3264b9e340ba5c11cbf59835a8af3db94e0cc2;hp=11fd958569f87d3bac74773acded53a3f14ddace;hpb=2a04e5dceebd174e24be42da8643a815640db27d;p=mlt diff --git a/src/modules/core/transition_composite.c b/src/modules/core/transition_composite.c index 11fd9585..fdad2cfe 100644 --- a/src/modules/core/transition_composite.c +++ b/src/modules/core/transition_composite.c @@ -3,19 +3,19 @@ * Copyright (C) 2003-2004 Ushodaya Enterprises Limited * Author: Dan Dennedy * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * - * This program is distributed in the hope that it will be useful, + * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "transition_composite.h" @@ -27,7 +27,7 @@ #include #include -typedef void ( *composite_line_fn )( uint8_t *dest, uint8_t *src, int width_src, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven ); +typedef void ( *composite_line_fn )( uint8_t *dest, uint8_t *src, int width_src, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness ); /** Geometry struct. */ @@ -366,136 +366,82 @@ static void luma_read_yuv422( uint8_t *image, uint16_t **map, int width, int hei *p++ = ( image[ i ] - 16 ) * 299; // 299 = 65535 / 219 } +static inline int calculate_mix( uint16_t *luma, int j, int soft, int weight, int alpha ) +{ + return ( ( ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + soft, weight + soft ) ) * alpha ) >> 8; +} + +static inline uint8_t sample_mix( uint8_t dest, uint8_t src, int mix ) +{ + return ( src * mix + dest * ( ( 1 << 16 ) - mix ) ) >> 16; +} /** Composite a source line over a destination line */ -static void composite_line_yuv( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x ) +static void composite_line_yuv( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft ) { register int j; - register int a; register int mix; - int uneven_w = width % 2; for ( j = 0; j < width; j ++ ) { - a = *alpha_b ++; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ ); + *dest = sample_mix( *dest, *src++, mix ); dest++; - *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; - dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; - } - - if ( uneven_w ) - { - a = *alpha_b ++; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src ++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + *dest = sample_mix( *dest, *src++, mix ); dest++; - *alpha_a = mix | *alpha_a; + *alpha_a = ( mix >> 8 ) | *alpha_a; alpha_a ++; } } -static void composite_line_yuv_or( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x ) +static void composite_line_yuv_or( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft ) { register int j; - register int a; register int mix; - int uneven_w = width % 2; for ( j = 0; j < width; j ++ ) { - a = *alpha_b ++ | *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ | *alpha_a ); + *dest = sample_mix( *dest, *src++, mix ); dest++; - *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + *dest = sample_mix( *dest, *src++, mix ); dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; + *alpha_a ++ = mix >> 8; } - - if ( uneven_w ) - { - a = *alpha_b ++ | *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; - dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; - } } -static void composite_line_yuv_and( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x ) +static void composite_line_yuv_and( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft ) { register int j; - register int a; register int mix; - int uneven_w = width % 2; for ( j = 0; j < width; j ++ ) { - a = *alpha_b ++ & *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ & *alpha_a ); + *dest = sample_mix( *dest, *src++, mix ); dest++; - *dest = ( *( src ++ ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + *dest = sample_mix( *dest, *src++, mix ); dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; + *alpha_a ++ = mix >> 8; } - - if ( uneven_w ) - { - a = *alpha_b ++ & *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src ++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; - dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; - } } -static void composite_line_yuv_xor( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x ) +static void composite_line_yuv_xor( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft ) { register int j; - register int a; register int mix; - int uneven_w = width % 2; for ( j = 0; j < width; j ++ ) { - a = *alpha_b ++ ^ *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ ^ *alpha_a ); + *dest = sample_mix( *dest, *src++, mix ); dest++; - *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; + *dest = sample_mix( *dest, *src++, mix ); dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; + *alpha_a ++ = mix >> 8; } - - if ( uneven_w ) - { - a = *alpha_b ++ ^ *alpha_a; - mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness ); - mix = ( mix * a ) >> 8; - *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16; - dest++; - *alpha_a = mix | *alpha_a; - alpha_a ++; - } } /** Composite function. @@ -506,7 +452,7 @@ static int composite_yuv( uint8_t *p_dest, int width_dest, int height_dest, uint int ret = 0; int i; int x_src = 0, y_src = 0; - int32_t weight = ( 1 << 16 ) * ( geometry.item.mix / 100 ); + int32_t weight = ( ( 1 << 16 ) - 1 ) * ( geometry.item.mix / 100 ); int step = ( field > -1 ) ? 2 : 1; int bpp = 2; int stride_src = width_src * bpp; @@ -515,7 +461,7 @@ static int composite_yuv( uint8_t *p_dest, int width_dest, int height_dest, uint // Adjust to consumer scale int x = rint( 0.5 + geometry.item.x * width_dest / geometry.nw ); int y = rint( 0.5 + geometry.item.y * height_dest / geometry.nh ); - int uneven_x = 2 * ( x % 2 ); + int uneven_x = ( x % 2 ); // optimization points - no work to do if ( width_src <= 0 || height_src <= 0 ) @@ -588,13 +534,15 @@ static int composite_yuv( uint8_t *p_dest, int width_dest, int height_dest, uint int alpha_b_stride = stride_src / bpp; int alpha_a_stride = stride_dest / bpp; - // Incorrect, but keeps noise away? - height_src --; + p_src += uneven_x * 2; + width_src -= 2 * uneven_x; + alpha_b += uneven_x; + uneven_x = 0; // now do the compositing only to cropped extents for ( i = 0; i < height_src; i += step ) { - line_fn( p_dest, p_src, width_src, alpha_b, alpha_a, weight, p_luma, softness, uneven_x ); + line_fn( p_dest, p_src, width_src, alpha_b, alpha_a, weight, p_luma, softness ); p_src += stride_src; p_dest += stride_dest; @@ -767,8 +715,9 @@ static int get_b_frame_image( mlt_transition this, mlt_frame b_frame, uint8_t ** // Get the properties objects mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame ); mlt_properties properties = MLT_TRANSITION_PROPERTIES( this ); + uint8_t resize_alpha = mlt_properties_get_int( b_props, "resize_alpha" ); - if ( mlt_properties_get_int( properties, "distort" ) == 0 && mlt_properties_get_int( b_props, "distort" ) == 0 && geometry->item.distort == 0 ) + if ( mlt_properties_get_int( properties, "aligned" ) && mlt_properties_get_int( properties, "distort" ) == 0 && mlt_properties_get_int( b_props, "distort" ) == 0 && geometry->item.distort == 0 ) { // Adjust b_frame pixel aspect int normalised_width = geometry->item.w; @@ -776,19 +725,21 @@ static int get_b_frame_image( mlt_transition this, mlt_frame b_frame, uint8_t ** int real_width = get_value( b_props, "real_width", "width" ); int real_height = get_value( b_props, "real_height", "height" ); double input_ar = mlt_properties_get_double( b_props, "aspect_ratio" ); - double output_ar = mlt_properties_get_double( b_props, "consumer_aspect_ratio" ); - int scaled_width = ( input_ar == 0.0 ? output_ar : input_ar ) / output_ar * real_width; + double consumer_ar = mlt_properties_get_double( b_props, "consumer_aspect_ratio" ); + double background_ar = mlt_properties_get_double( b_props, "output_ratio" ); + double output_ar = background_ar != 0.0 ? background_ar : consumer_ar; + int scaled_width = rint( 0.5 + ( input_ar == 0.0 ? output_ar : input_ar ) / output_ar * real_width ); int scaled_height = real_height; // Now ensure that our images fit in the normalised frame if ( scaled_width > normalised_width ) { - scaled_height = scaled_height * normalised_width / scaled_width; + scaled_height = rint( 0.5 + scaled_height * normalised_width / scaled_width ); scaled_width = normalised_width; } if ( scaled_height > normalised_height ) { - scaled_width = scaled_width * normalised_height / scaled_height; + scaled_width = rint( 0.5 + scaled_width * normalised_height / scaled_height ); scaled_height = normalised_height; } @@ -796,14 +747,14 @@ static int get_b_frame_image( mlt_transition this, mlt_frame b_frame, uint8_t ** // ????: Shouln't this be the default behaviour? if ( mlt_properties_get_int( properties, "fill" ) && scaled_width > 0 && scaled_height > 0 ) { - if ( scaled_height < normalised_height && scaled_width * normalised_height / scaled_height < normalised_width ) + if ( scaled_height < normalised_height && scaled_width * normalised_height / scaled_height <= normalised_width ) { - scaled_width = scaled_width * normalised_height / scaled_height; + scaled_width = rint( 0.5 + scaled_width * normalised_height / scaled_height ); scaled_height = normalised_height; } else if ( scaled_width < normalised_width && scaled_height * normalised_width / scaled_width < normalised_height ) { - scaled_height = scaled_height * normalised_width / scaled_width; + scaled_height = rint( 0.5 + scaled_height * normalised_width / scaled_width ); scaled_width = normalised_width; } } @@ -819,18 +770,26 @@ static int get_b_frame_image( mlt_transition this, mlt_frame b_frame, uint8_t ** } // We want to ensure that we bypass resize now... - mlt_properties_set_int( b_props, "distort", 1 ); + if ( resize_alpha == 0 ) + mlt_properties_set_int( b_props, "distort", mlt_properties_get_int( properties, "distort" ) ); + + // If we're not aligned, we want a non-transparent background + if ( mlt_properties_get_int( properties, "aligned" ) == 0 ) + mlt_properties_set_int( b_props, "resize_alpha", 255 ); - // Take into consideration alignment for optimisation + // Take into consideration alignment for optimisation (titles are a special case) if ( !mlt_properties_get_int( properties, "titles" ) ) alignment_calculate( geometry ); // Adjust to consumer scale - *width = geometry->sw * *width / geometry->nw; - *height = geometry->sh * *height / geometry->nh; + *width = rint( 0.5 + geometry->sw * *width / geometry->nw ); + *height = rint( 0.5 + geometry->sh * *height / geometry->nh ); ret = mlt_frame_get_image( b_frame, image, &format, width, height, 1 ); + // Set the frame back + mlt_properties_set_int( b_props, "resize_alpha", resize_alpha ); + return ret && image != NULL; } @@ -895,16 +854,6 @@ static mlt_geometry composite_calculate( mlt_transition this, struct geometry_s return start; } -static inline void inline_memcpy( uint8_t *dest, uint8_t *src, int length ) -{ - uint8_t *end = src + length; - while ( src < end ) - { - *dest ++ = *src ++; - *dest ++ = *src ++; - } -} - mlt_frame composite_copy_region( mlt_transition this, mlt_frame a_frame, mlt_position frame_position ) { // Create a frame to return @@ -1004,7 +953,7 @@ mlt_frame composite_copy_region( mlt_transition this, mlt_frame a_frame, mlt_pos while ( h -- ) { - inline_memcpy( dest, p, w * 2 ); + memcpy( dest, p, w * 2 ); dest += ds; p += ss; } @@ -1188,9 +1137,9 @@ static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_f if ( mlt_properties_get_int( properties, "titles" ) ) { - result.item.w = *width * ( result.item.w / result.nw ); + result.item.w = rint( 0.5 + *width * ( result.item.w / result.nw ) ); result.nw = result.item.w; - result.item.h = *height * ( result.item.h / result.nh ); + result.item.h = rint( 0.5 + *height * ( result.item.h / result.nh ) ); result.nh = *height; result.sw = width_b; result.sh = height_b; @@ -1256,12 +1205,11 @@ mlt_transition transition_composite_init( char *arg ) // Default factory mlt_properties_set( properties, "factory", "fezzik" ); + // Use alignment (and hence alpha of b frame) + mlt_properties_set_int( properties, "aligned", 1 ); + // Inform apps and framework that this is a video only transition mlt_properties_set_int( properties, "_transition_type", 1 ); - -#ifdef USE_MMX - //mlt_properties_set_int( properties, "_MMX", composite_have_mmx() ); -#endif } return this; }