Cleanup license declarations and remove dv1394d references.
[melted] / src / modules / avformat / producer_avformat.c
index c36c890..885e84b 100644 (file)
@@ -3,19 +3,19 @@
  * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
  * Author: Charles Yates <charles.yates@pandora.be>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
  *
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 // Local header files
@@ -26,6 +26,9 @@
 
 // ffmpeg Header files
 #include <avformat.h>
+#ifdef SWSCALE
+#include <swscale.h>
+#endif
 
 // System header files
 #include <stdlib.h>
@@ -57,7 +60,7 @@ mlt_producer producer_avformat_init( char *file )
                if ( mlt_producer_init( this, NULL ) == 0 )
                {
                        // Get the properties
-                       mlt_properties properties = mlt_producer_properties( this );
+                       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
                        // Set the resource property (required for all producers)
                        mlt_properties_set( properties, "resource", file );
@@ -89,7 +92,10 @@ static void find_default_streams( AVFormatContext *context, int *audio_index, in
        for( i = 0; i < context->nb_streams; i++ ) 
        {
                // Get the codec context
-               AVCodecContext *codec_context = &context->streams[ i ]->codec;
+               AVCodecContext *codec_context = context->streams[ i ]->codec;
+
+               if ( avcodec_find_decoder( codec_context->codec_id ) == NULL )
+                       continue;
 
                // Determine the type and obtain the first index of each type
                switch( codec_context->codec_type ) 
@@ -156,7 +162,7 @@ static int producer_open( mlt_producer this, char *file )
        AVFormatContext *context = NULL;
 
        // Get the properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // We will treat everything with the producer fps
        double fps = mlt_properties_get_double( properties, "fps" );
@@ -196,8 +202,7 @@ static int producer_open( mlt_producer this, char *file )
                        // These are required by video4linux (defaults)
                        params->width = 640;
                        params->height = 480;
-                       params->frame_rate = 25;
-                       params->frame_rate_base = 1;
+                       params->time_base= (AVRational){1,25};
                        params->device = file;
                        params->channels = 2;
                        params->sample_rate = 48000;
@@ -218,9 +223,9 @@ static int producer_open( mlt_producer this, char *file )
                                if ( t )
                                        t[0] = 0;
                                if ( !strcmp( name, "frame_rate" ) )
-                                       params->frame_rate = atoi( value );
+                                       params->time_base.den = atoi( value );
                                else if ( !strcmp( name, "frame_rate_base" ) )
-                                       params->frame_rate_base = atoi( value );
+                                       params->time_base.num = atoi( value );
                                else if ( !strcmp( name, "sample_rate" ) )
                                        params->sample_rate = atoi( value );
                                else if ( !strcmp( name, "channels" ) )
@@ -243,8 +248,7 @@ static int producer_open( mlt_producer this, char *file )
        }
 
        // Now attempt to open the file
-       error = av_open_input_file( &context, file, format, 0, params );
-       error = error < 0;
+       error = av_open_input_file( &context, file, format, 0, params ) < 0;
        
        // Cleanup AVFormatParameters
        free( standard );
@@ -277,23 +281,50 @@ static int producer_open( mlt_producer this, char *file )
                        find_default_streams( context, &audio_index, &video_index );
 
             if ( context->start_time != AV_NOPTS_VALUE )
-                mlt_properties_set_double( properties, "start_time", context->start_time );
+                mlt_properties_set_double( properties, "_start_time", context->start_time );
                        
                        // Check if we're seekable (something funny about mpeg here :-/)
                        if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) )
-                               mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 );
+                               mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
                        else
                                av_bypass = 1;
 
                        // Store selected audio and video indexes on properties
                        mlt_properties_set_int( properties, "audio_index", audio_index );
                        mlt_properties_set_int( properties, "video_index", video_index );
+                       mlt_properties_set_int( properties, "_last_position", -1 );
+
+                       // Fetch the width, height and aspect ratio
+                       if ( video_index != -1 )
+                       {
+                               AVCodecContext *codec_context = context->streams[ video_index ]->codec;
+                               mlt_properties_set_int( properties, "width", codec_context->width );
+                               mlt_properties_set_int( properties, "height", codec_context->height );
+                               mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
+                       }
+
+                       // Read Metadata
+                       if (context->title != NULL) 
+                               mlt_properties_set(properties, "meta.attr.title.markup", context->title );
+                       if (context->author != NULL) 
+                               mlt_properties_set(properties, "meta.attr.author.markup", context->author );
+                       if (context->copyright != NULL) 
+                               mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
+                       if (context->comment != NULL) 
+                               mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
+                       if (context->album != NULL) 
+                               mlt_properties_set(properties, "meta.attr.album.markup", context->album );
+                       if (context->year != 0) 
+                               mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
+                       if (context->track != 0) 
+                               mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
                        
                        // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
                        if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 )
                        {
                                // We'll use the open one as our video_context
                                mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
+                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
 
                                // And open again for our audio context
                                av_open_input_file( &context, file, NULL, 0, NULL );
@@ -306,6 +337,7 @@ static int producer_open( mlt_producer this, char *file )
                        {
                                // We only have a video context
                                mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
+                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
                        }
                        else if ( audio_index != -1 )
                        {
@@ -334,7 +366,7 @@ static int producer_open( mlt_producer this, char *file )
 static double producer_time_of_frame( mlt_producer this, mlt_position position )
 {
        // Get the properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // Obtain the fps
        double fps = mlt_properties_get_double( properties, "fps" );
@@ -345,51 +377,44 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position )
 
 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
 {
-       // EXPERIMENTAL IMAGE NORMALISATIONS
-       if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 )
+#ifdef SWSCALE
+       if ( format == mlt_image_yuv420p )
        {
-               register int i, j;
-               register int half = width >> 1;
-               register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ];
-               register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ];
-               register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ];
-               register uint8_t *d = buffer;
-               register uint8_t *y, *u, *v;
-
-               i = height >> 1;
-               while ( i -- )
-               {
-                       y = Y;
-                       u = U;
-                       v = V;
-                       j = half;
-                       while ( j -- )
-                       {
-                               *d ++ = *y ++;
-                               *d ++ = *u ++;
-                               *d ++ = *y ++;
-                               *d ++ = *v ++;
-                       }
-
-                       Y += ( ( AVPicture * )frame )->linesize[ 0 ];
-                       y = Y;
-                       u = U;
-                       v = V;
-                       j = half;
-                       while ( j -- )
-                       {
-                               *d ++ = *y ++;
-                               *d ++ = *u ++;
-                               *d ++ = *y ++;
-                               *d ++ = *v ++;
-                       }
-
-                       Y += ( ( AVPicture * )frame )->linesize[ 0 ];
-                       U += ( ( AVPicture * )frame )->linesize[ 1 ];
-                       V += ( ( AVPicture * )frame )->linesize[ 2 ];
-               }
+               struct SwsContext *context = sws_getContext( width, height, pix_fmt,
+                       width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+               AVPicture output;
+               output.data[0] = buffer;
+               output.data[1] = buffer + width * height;
+               output.data[2] = buffer + ( 3 * width * height ) / 2;
+               output.linesize[0] = width;
+               output.linesize[1] = width >> 1;
+               output.linesize[2] = width >> 1;
+               sws_scale( context, frame->data, frame->linesize, 0, height,
+                       output.data, output.linesize);
+               sws_freeContext( context );
+       }
+       else if ( format == mlt_image_rgb24 )
+       {
+               struct SwsContext *context = sws_getContext( width, height, pix_fmt,
+                       width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+               AVPicture output;
+               avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
+               sws_scale( context, frame->data, frame->linesize, 0, height,
+                       output.data, output.linesize);
+               sws_freeContext( context );
+       }
+       else
+       {
+               struct SwsContext *context = sws_getContext( width, height, pix_fmt,
+                       width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+               AVPicture output;
+               avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
+               sws_scale( context, frame->data, frame->linesize, 0, height,
+                       output.data, output.linesize);
+               sws_freeContext( context );
        }
-       else if ( format == mlt_image_yuv420p )
+#else
+       if ( format == mlt_image_yuv420p )
        {
                AVPicture pict;
                pict.data[0] = buffer;
@@ -412,6 +437,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height );
                img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height );
        }
+#endif
 }
 
 /** Get an image from a frame.
@@ -420,7 +446,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
 {
        // Get the properties from the frame
-       mlt_properties frame_properties = mlt_frame_properties( frame );
+       mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        // Obtain the frame number of this frame
        mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
@@ -429,7 +455,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
 
        // Get the producer properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // Fetch the video_context
        AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
@@ -438,7 +464,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        int index = mlt_properties_get_int( properties, "video_index" );
 
        // Obtain the expected frame numer
-       mlt_position expected = mlt_properties_get_position( properties, "video_expected" );
+       mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
 
        // Calculate the real time code
        double real_timecode = producer_time_of_frame( this, position );
@@ -447,7 +473,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        AVStream *stream = context->streams[ index ];
 
        // Get codec context
-       AVCodecContext *codec_context = &stream->codec;
+       AVCodecContext *codec_context = stream->codec;
 
        // Packet
        AVPacket pkt;
@@ -462,10 +488,14 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        int ignore = 0;
 
        // Current time calcs
-       double current_time = mlt_properties_get_double( properties, "current_time" );
+       int current_position = mlt_properties_get_double( properties, "_current_position" );
 
        // We may want to use the source fps if available
        double source_fps = mlt_properties_get_double( properties, "source_fps" );
+       double fps = mlt_properties_get_double( properties, "fps" );
+
+       // This is the physical frame position in the source
+       int req_position = ( int )( position / fps * source_fps );
 
        // Get the seekable status
        int seekable = mlt_properties_get_int( properties, "seekable" );
@@ -476,6 +506,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Hopefully provide better support for streams...
        int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
 
+       // Determines if we have to decode all frames in a sequence
+       int must_decode = 1;
+
        // Set the result arguments that we know here (only *buffer is now required)
        *width = codec_context->width;
        *height = codec_context->height;
@@ -501,10 +534,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Construct the output image
        *buffer = mlt_pool_alloc( size );
 
+       // Temporary hack to improve intra frame only
+       must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
+                                 strcmp( codec_context->codec->name, "rawvideo" ) &&
+                                 strcmp( codec_context->codec->name, "dvvideo" );
+
        // Seek if necessary
        if ( position != expected )
        {
-               if ( position + 1 == expected )
+               if ( av_frame != NULL && position + 1 == expected )
                {
                        // We're paused - use last image
                        paused = 1;
@@ -512,24 +550,32 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                else if ( !seekable && position > expected && ( position - expected ) < 250 )
                {
                        // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       ignore = position - expected;
+                       ignore = ( int )( ( position - expected ) / fps * source_fps );
                }
                else if ( seekable && ( position < expected || position - expected >= 12 ) )
                {
-                       // Set to the real timecode
-                       av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 );
+                       // Calculate the timestamp for the requested frame
+                       int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE );
+                       if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
+                               timestamp += context->start_time;
+                       if ( must_decode )
+                               timestamp -= AV_TIME_BASE;
+                       if ( timestamp < 0 )
+                               timestamp = 0;
+
+                       // Set to the timestamp
+                       av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
        
                        // Remove the cached info relating to the previous position
-                       mlt_properties_set_double( properties, "current_time", real_timecode );
-
+                       mlt_properties_set_int( properties, "_current_position", -1 );
+                       mlt_properties_set_int( properties, "_last_position", -1 );
                        mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
                        av_frame = NULL;
                }
        }
-       
-       // Duplicate the last image if necessary
-       if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) &&
-                av_bypass == 0 )
+
+       // Duplicate the last image if necessary (see comment on rawvideo below)
+       if ( av_frame != NULL && ( paused || mlt_properties_get_int( properties, "_current_position" ) >= req_position ) && av_bypass == 0 )
        {
                // Duplicate it
                convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
@@ -541,19 +587,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        {
                int ret = 0;
                int got_picture = 0;
-               int must_decode = 1;
-
-               // Temporary hack to improve intra frame only
-               if ( !strcmp( codec_context->codec->name, "mjpeg" ) )
-                       must_decode = 0;
+               int int_position = 0;
 
-               memset( &pkt, 0, sizeof( pkt ) );
+               av_init_packet( &pkt );
 
                // Construct an AVFrame for YUV422 conversion
                if ( av_frame == NULL )
                {
-                       av_frame = calloc( 1, sizeof( AVFrame ) );
-                       mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL );
+                       av_frame = avcodec_alloc_frame( );
+                       mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
                }
 
                while( ret >= 0 && !got_picture )
@@ -565,24 +607,28 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
                        {
                                // Determine time code of the packet
-                               if ( pkt.pts != AV_NOPTS_VALUE )
-                                       current_time = ( double )pkt.pts / 1000000.0;
-                               else
-                                       current_time = real_timecode;
+                               int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps );
+                               if ( context->start_time != AV_NOPTS_VALUE )
+                                       int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE );
+
+                               int last_position = mlt_properties_get_int( properties, "_last_position" );
+                               if ( int_position == last_position )
+                                       int_position = last_position + 1;
+                               mlt_properties_set_int( properties, "_last_position", int_position );
 
                                // Decode the image
-                               if ( must_decode || current_time >= real_timecode )
+                               if ( must_decode || int_position >= req_position )
                                        ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
 
                                if ( got_picture )
                                {
                                        // Handle ignore
-                                       if ( ( int )( current_time * 100 ) < ( int )( real_timecode * 100 ) - 7 )
+                                       if ( int_position < req_position )
                                        {
                                                ignore = 0;
                                                got_picture = 0;
                                        }
-                                       else if ( current_time >= real_timecode )
+                                       else if ( int_position >= req_position )
                                        {
                                                ignore = 0;
                                        }
@@ -590,40 +636,34 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        {
                                                got_picture = 0;
                                        }
-                                       mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
                                }
                        }
 
+                       // Now handle the picture if we have one
+                       if ( got_picture )
+                       {
+                               mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
+                               mlt_properties_set_int( frame_properties, "top_field_first", av_frame->top_field_first );
+                               convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
+                               mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
+                               mlt_properties_set_double( properties, "_current_position", int_position );
+                       }
+
                        // We're finished with this packet regardless
                        av_free_packet( &pkt );
                }
+       }
 
-               // Now handle the picture if we have one
-               if ( got_picture )
-               {
-                       convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
-
-                       mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
+       // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
+       // above will break the pause behaviour - so we wipe the frame now
+       if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
+               mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
 
-                       if ( current_time == 0 && source_fps != 0 )
-                       {
-                               double fps = mlt_properties_get_double( properties, "fps" );
-                               current_time = ceil( source_fps * ( double )position / fps ) * ( 1 / source_fps );
-                               mlt_properties_set_double( properties, "current_time", current_time );
-                       }
-                       else
-                       {
-                               mlt_properties_set_double( properties, "current_time", current_time );
-                       }
-               }
-       }
-       
        // Set the field order property for this frame
-       mlt_properties_set_int( frame_properties, "top_field_first", 
-               mlt_properties_get_int( properties, "top_field_first" ) );
+       mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
 
        // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
-       mlt_properties_set_position( properties, "video_expected", position + 1 );
+       mlt_properties_set_position( properties, "_video_expected", position + 1 );
 
        return 0;
 }
@@ -634,7 +674,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
 {
        // Get the properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // Fetch the video_context
        AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
@@ -643,7 +683,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame )
        int index = mlt_properties_get_int( properties, "video_index" );
 
        // Get the frame properties
-       mlt_properties frame_properties = mlt_frame_properties( frame );
+       mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        if ( context != NULL && index != -1 )
        {
@@ -651,7 +691,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame )
                AVStream *stream = context->streams[ index ];
 
                // Get codec context
-               AVCodecContext *codec_context = &stream->codec;
+               AVCodecContext *codec_context = stream->codec;
 
                // Get the codec
                AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
@@ -679,32 +719,41 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame )
                if ( codec != NULL )
                {
                        double source_fps = 0;
+                       int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" );
+                       double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
+                       double aspect_ratio;
 
                        // XXX: We won't know the real aspect ratio until an image is decoded
                        // but we do need it now (to satisfy filter_resize) - take a guess based
                        // on pal/ntsc
-                       if ( codec_context->sample_aspect_ratio.num > 0 )
+                       if ( force_aspect_ratio > 0.0 )
                        {
-                               mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
+                               aspect_ratio = force_aspect_ratio;
+                       }
+                       else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 )
+                       {
+                               aspect_ratio = av_q2d( codec_context->sample_aspect_ratio );
                        }
                        else
                        {
                                int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0;
-                               mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 128.0/117.0 : 72.0/79.0 );
+                               aspect_ratio = is_pal ? 59.0/54.0 : 10.0/11.0;
                        }
 
-                       //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height );
-
                        // Determine the fps
-                       source_fps = ( double )codec_context->frame_rate / ( codec_context->frame_rate_base == 0 ? 1 : codec_context->frame_rate_base );
+                       source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
 
                        // We'll use fps if it's available
                        if ( source_fps > 0 && source_fps < 30 )
                                mlt_properties_set_double( properties, "source_fps", source_fps );
+                       else
+                               mlt_properties_set_double( properties, "source_fps", mlt_properties_get_double( properties, "fps" ) );
+                       mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
                        
                        // Set the width and height
                        mlt_properties_set_int( frame_properties, "width", codec_context->width );
                        mlt_properties_set_int( frame_properties, "height", codec_context->height );
+                       mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
 
                        mlt_frame_push_get_image( frame, producer_get_image );
                        mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
@@ -726,7 +775,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame )
 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
 {
        // Get the properties from the frame
-       mlt_properties frame_properties = mlt_frame_properties( frame );
+       mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        // Obtain the frame number of this frame
        mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
@@ -735,7 +784,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
        mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
 
        // Get the producer properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // Fetch the audio_context
        AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
@@ -747,7 +796,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
        int seekable = mlt_properties_get_int( properties, "seekable" );
 
        // Obtain the expected frame numer
-       mlt_position expected = mlt_properties_get_position( properties, "audio_expected" );
+       mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
 
        // Obtain the resample context if it exists (not always needed)
        ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
@@ -756,7 +805,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
        int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
 
        // Get amount of audio used
-       int audio_used =  mlt_properties_get_int( properties, "audio_used" );
+       int audio_used =  mlt_properties_get_int( properties, "_audio_used" );
 
        // Calculate the real time code
        double real_timecode = producer_time_of_frame( this, position );
@@ -765,7 +814,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
        AVStream *stream = context->streams[ index ];
 
        // Get codec context
-       AVCodecContext *codec_context = &stream->codec;
+       AVCodecContext *codec_context = stream->codec;
 
        // Packet
        AVPacket pkt;
@@ -817,7 +866,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
                else if ( position < expected || position - expected >= 12 )
                {
                        // Set to the real timecode
-                       if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ) != 0 )
+                       if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 )
                                paused = 1;
 
                        // Clear the usage in the audio buffer
@@ -832,7 +881,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
                int got_audio = 0;
                int16_t *temp = mlt_pool_alloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE );
 
-               memset( &pkt, 0, sizeof( pkt ) );
+               av_init_packet( &pkt );
 
                while( ret >= 0 && !got_audio )
                {
@@ -887,7 +936,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
                                }
 
                                // If we're behind, ignore this packet
-                               float current_pts = (float)pkt.pts / 1000000.0;
+                               float current_pts = av_q2d( stream->time_base ) * pkt.pts;
                                if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) )
                                        ignore = 1;
                        }
@@ -912,7 +961,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
                }
                
                // Store the number of audio samples still available
-               mlt_properties_set_int( properties, "audio_used", audio_used );
+               mlt_properties_set_int( properties, "_audio_used", audio_used );
 
                // Release the temporary audio
                mlt_pool_release( temp );
@@ -920,13 +969,12 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
        else
        {
                // Get silence and don't touch the context
-               frame->get_audio = NULL;
                mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
        }
 
        // Regardless of speed (other than paused), we expect to get the next frame
        if ( !paused )
-               mlt_properties_set_position( properties, "audio_expected", position + 1 );
+               mlt_properties_set_position( properties, "_audio_expected", position + 1 );
 
        return 0;
 }
@@ -937,7 +985,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form
 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
 {
        // Get the properties
-       mlt_properties properties = mlt_producer_properties( this );
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
 
        // Fetch the audio_context
        AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
@@ -949,13 +997,13 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
        if ( context != NULL && index != -1 )
        {
                // Get the frame properties
-               mlt_properties frame_properties = mlt_frame_properties( frame );
+               mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
                // Get the audio stream
                AVStream *stream = context->streams[ index ];
 
                // Get codec context
-               AVCodecContext *codec_context = &stream->codec;
+               AVCodecContext *codec_context = stream->codec;
 
                // Get the codec
                AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
@@ -983,8 +1031,10 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
                // No codec, no show...
                if ( codec != NULL )
                {
-                       frame->get_audio = producer_get_audio;
+                       mlt_frame_push_audio( frame, producer_get_audio );
                        mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
+                       mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
+                       mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
                }
        }
 }
@@ -1001,7 +1051,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index
        mlt_frame_set_position( *frame, mlt_producer_position( this ) );
 
        // Set the position of this producer
-       mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_frame( this ) );
+       mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
 
        // Set up the video
        producer_set_up_video( this, *frame );
@@ -1010,7 +1060,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index
        producer_set_up_audio( this, *frame );
 
        // Set the aspect_ratio
-       mlt_properties_set_double( mlt_frame_properties( *frame ), "aspect_ratio", mlt_properties_get_double( mlt_producer_properties( this ), "aspect_ratio" ) );
+       mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
 
        // Calculate the next timecode
        mlt_producer_prepare_next( this );