* Copyright (C) 2003-2004 Ushodaya Enterprises Limited
* Author: Dan Dennedy <dan@dennedy.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "transition_composite.h"
#include <string.h>
#include <math.h>
-typedef void ( *composite_line_fn )( uint8_t *dest, uint8_t *src, int width_src, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven );
+typedef void ( *composite_line_fn )( uint8_t *dest, uint8_t *src, int width_src, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness );
/** Geometry struct.
*/
*p++ = ( image[ i ] - 16 ) * 299; // 299 = 65535 / 219
}
+static inline int calculate_mix( uint16_t *luma, int j, int soft, int weight, int alpha )
+{
+ return ( ( ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + soft, weight + soft ) ) * alpha ) >> 8;
+}
+
+static inline uint8_t sample_mix( uint8_t dest, uint8_t src, int mix )
+{
+ return ( src * mix + dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+}
/** Composite a source line over a destination line
*/
-static void composite_line_yuv( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x )
+static void composite_line_yuv( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft )
{
register int j;
- register int a;
register int mix;
for ( j = 0; j < width; j ++ )
{
- a = *alpha_b ++;
- mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness );
- mix = ( mix * a ) >> 8;
- *dest = ( ( *src++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ );
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *alpha_a = mix | *alpha_a;
+ *alpha_a = ( mix >> 8 ) | *alpha_a;
alpha_a ++;
}
}
-static void composite_line_yuv_or( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x )
+static void composite_line_yuv_or( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft )
{
register int j;
- register int a;
register int mix;
for ( j = 0; j < width; j ++ )
{
- a = *alpha_b ++ | *alpha_a;
- mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness );
- mix = ( mix * a ) >> 8;
- *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ | *alpha_a );
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *alpha_a = mix | *alpha_a;
- alpha_a ++;
+ *alpha_a ++ = mix >> 8;
}
}
-static void composite_line_yuv_and( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x )
+static void composite_line_yuv_and( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft )
{
register int j;
- register int a;
register int mix;
for ( j = 0; j < width; j ++ )
{
- a = *alpha_b ++ & *alpha_a;
- mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness );
- mix = ( mix * a ) >> 8;
- *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ & *alpha_a );
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *dest = ( *( src ++ ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *alpha_a = mix | *alpha_a;
- alpha_a ++;
+ *alpha_a ++ = mix >> 8;
}
}
-static void composite_line_yuv_xor( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int softness, int uneven_x )
+static void composite_line_yuv_xor( uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight, uint16_t *luma, int soft )
{
register int j;
- register int a;
register int mix;
for ( j = 0; j < width; j ++ )
{
- a = *alpha_b ++ ^ *alpha_a;
- mix = ( luma == NULL ) ? weight : smoothstep( luma[ j ], luma[ j ] + softness, weight + softness );
- mix = ( mix * a ) >> 8;
- *dest = ( *src++ * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ mix = calculate_mix( luma, j, soft, weight, *alpha_b ++ ^ *alpha_a );
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *dest = ( *( src ++ + uneven_x ) * mix + *dest * ( ( 1 << 16 ) - mix ) ) >> 16;
+ *dest = sample_mix( *dest, *src++, mix );
dest++;
- *alpha_a = mix | *alpha_a;
- alpha_a ++;
+ *alpha_a ++ = mix >> 8;
}
}
int ret = 0;
int i;
int x_src = 0, y_src = 0;
- int32_t weight = ( 1 << 16 ) * ( geometry.item.mix / 100 );
+ int32_t weight = ( ( 1 << 16 ) - 1 ) * ( geometry.item.mix / 100 );
int step = ( field > -1 ) ? 2 : 1;
int bpp = 2;
int stride_src = width_src * bpp;
// Adjust to consumer scale
int x = rint( 0.5 + geometry.item.x * width_dest / geometry.nw );
int y = rint( 0.5 + geometry.item.y * height_dest / geometry.nh );
- int uneven_x = 2 * ( x % 2 );
+ int uneven_x = ( x % 2 );
// optimization points - no work to do
if ( width_src <= 0 || height_src <= 0 )
int alpha_b_stride = stride_src / bpp;
int alpha_a_stride = stride_dest / bpp;
+ p_src += uneven_x * 2;
+ width_src -= 2 * uneven_x;
+ alpha_b += uneven_x;
+ uneven_x = 0;
+
// now do the compositing only to cropped extents
for ( i = 0; i < height_src; i += step )
{
- line_fn( p_dest, p_src, width_src, alpha_b, alpha_a, weight, p_luma, softness, uneven_x );
+ line_fn( p_dest, p_src, width_src, alpha_b, alpha_a, weight, p_luma, softness );
p_src += stride_src;
p_dest += stride_dest;
// Get the properties objects
mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );
mlt_properties properties = MLT_TRANSITION_PROPERTIES( this );
+ uint8_t resize_alpha = mlt_properties_get_int( b_props, "resize_alpha" );
- if ( mlt_properties_get_int( properties, "distort" ) == 0 && mlt_properties_get_int( b_props, "distort" ) == 0 && geometry->item.distort == 0 )
+ if ( mlt_properties_get_int( properties, "aligned" ) && mlt_properties_get_int( properties, "distort" ) == 0 && mlt_properties_get_int( b_props, "distort" ) == 0 && geometry->item.distort == 0 )
{
// Adjust b_frame pixel aspect
int normalised_width = geometry->item.w;
// ????: Shouln't this be the default behaviour?
if ( mlt_properties_get_int( properties, "fill" ) && scaled_width > 0 && scaled_height > 0 )
{
- if ( scaled_height < normalised_height && scaled_width * normalised_height / scaled_height < normalised_width )
+ if ( scaled_height < normalised_height && scaled_width * normalised_height / scaled_height <= normalised_width )
{
scaled_width = rint( 0.5 + scaled_width * normalised_height / scaled_height );
scaled_height = normalised_height;
}
// We want to ensure that we bypass resize now...
- mlt_properties_set_int( b_props, "distort", mlt_properties_get_int( properties, "distort" ) );
+ if ( resize_alpha == 0 )
+ mlt_properties_set_int( b_props, "distort", mlt_properties_get_int( properties, "distort" ) );
+
+ // If we're not aligned, we want a non-transparent background
+ if ( mlt_properties_get_int( properties, "aligned" ) == 0 )
+ mlt_properties_set_int( b_props, "resize_alpha", 255 );
// Take into consideration alignment for optimisation (titles are a special case)
if ( !mlt_properties_get_int( properties, "titles" ) )
ret = mlt_frame_get_image( b_frame, image, &format, width, height, 1 );
+ // Set the frame back
+ mlt_properties_set_int( b_props, "resize_alpha", resize_alpha );
+
return ret && image != NULL;
}
// Default factory
mlt_properties_set( properties, "factory", "fezzik" );
+ // Use alignment (and hence alpha of b frame)
+ mlt_properties_set_int( properties, "aligned", 1 );
+
// Inform apps and framework that this is a video only transition
mlt_properties_set_int( properties, "_transition_type", 1 );
}