X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=885e84be7117d136521276e2faba2abe0a6ba6b6;hb=bf3264b9e340ba5c11cbf59835a8af3db94e0cc2;hp=943b8476d12f8722c04ad40d6c0c3159d34f1107;hpb=19d5d8f23b0930d8d10b0c9075830415cd5fec4d;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index 943b847..885e84b 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -3,19 +3,19 @@ * Copyright (C) 2003-2004 Ushodaya Enterprises Limited * Author: Charles Yates * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * - * This program is distributed in the hope that it will be useful, + * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ // Local header files @@ -25,21 +25,24 @@ #include // ffmpeg Header files -#include +#include +#ifdef SWSCALE +#include +#endif // System header files #include #include #include +#include + +void avformat_lock( ); +void avformat_unlock( ); // Forward references. static int producer_open( mlt_producer this, char *file ); static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index ); -// A static flag used to determine if avformat has been initialised -static int avformat_initialised = 0; -static pthread_mutex_t avformat_mutex; - /** Constructor for libavformat. */ @@ -57,25 +60,14 @@ mlt_producer producer_avformat_init( char *file ) if ( mlt_producer_init( this, NULL ) == 0 ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Set the resource property (required for all producers) mlt_properties_set( properties, "resource", file ); - // TEST: audio sync tweaking - mlt_properties_set_double( properties, "discrepancy", 1 ); - // Register our get_frame implementation this->get_frame = producer_get_frame; - // Initialise avformat if necessary - if ( avformat_initialised == 0 ) - { - pthread_mutex_init( &avformat_mutex, NULL ); - avformat_initialised = 1; - av_register_all( ); - } - // Open the file if ( producer_open( this, file ) != 0 ) { @@ -92,7 +84,7 @@ mlt_producer producer_avformat_init( char *file ) /** Find the default streams. */ -void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index ) +static void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index ) { int i; @@ -100,7 +92,10 @@ void find_default_streams( AVFormatContext *context, int *audio_index, int *vide for( i = 0; i < context->nb_streams; i++ ) { // Get the codec context - AVCodecContext *codec_context = &context->streams[ i ]->codec; + AVCodecContext *codec_context = context->streams[ i ]->codec; + + if ( avcodec_find_decoder( codec_context->codec_id ) == NULL ) + continue; // Determine the type and obtain the first index of each type switch( codec_context->codec_type ) @@ -122,48 +117,40 @@ void find_default_streams( AVFormatContext *context, int *audio_index, int *vide /** Producer file destructor. */ -void producer_file_close( void *context ) +static void producer_file_close( void *context ) { if ( context != NULL ) { // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); + avformat_lock( ); // Close the file av_close_input_file( context ); // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); + avformat_unlock( ); } } /** Producer file destructor. */ -void producer_codec_close( void *codec ) +static void producer_codec_close( void *codec ) { if ( codec != NULL ) { // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); + avformat_lock( ); // Close the file avcodec_close( codec ); // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); + avformat_unlock( ); } } /** Open the file. - - NOTE: We need to have a valid [PAL or NTSC] frame rate before we can determine the - number of frames in the file. However, this is at odds with the way things work - the - constructor needs to provide in/out points before the user of the producer is able - to specify properties :-/. However, the PAL/NTSC distinction applies to all producers - and while we currently accept whatever the producer provides, this will not work in - the more general case. Plans are afoot... and this one will work without modification - (in theory anyway ;-)). */ static int producer_open( mlt_producer this, char *file ) @@ -175,16 +162,97 @@ static int producer_open( mlt_producer this, char *file ) AVFormatContext *context = NULL; // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps double fps = mlt_properties_get_double( properties, "fps" ); // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); + avformat_lock( ); + + // If "MRL", then create AVInputFormat + AVInputFormat *format = NULL; + AVFormatParameters *params = NULL; + char *standard = NULL; + char *mrl = strchr( file, ':' ); + + // AV option (0 = both, 1 = video, 2 = audio) + int av = 0; + + // Setting lowest log level + av_log_set_level( -1 ); + + // Only if there is not a protocol specification that avformat can handle + if ( mrl && !url_exist( file ) ) + { + // 'file' becomes format abbreviation + mrl[0] = 0; + + // Lookup the format + format = av_find_input_format( file ); + + // Eat the format designator + file = ++mrl; + + if ( format ) + { + // Allocate params + params = calloc( sizeof( AVFormatParameters ), 1 ); + + // These are required by video4linux (defaults) + params->width = 640; + params->height = 480; + params->time_base= (AVRational){1,25}; + params->device = file; + params->channels = 2; + params->sample_rate = 48000; + } + + // Parse out params + mrl = strchr( file, '?' ); + while ( mrl ) + { + mrl[0] = 0; + char *name = strdup( ++mrl ); + char *value = strchr( name, ':' ); + if ( value ) + { + value[0] = 0; + value++; + char *t = strchr( value, '&' ); + if ( t ) + t[0] = 0; + if ( !strcmp( name, "frame_rate" ) ) + params->time_base.den = atoi( value ); + else if ( !strcmp( name, "frame_rate_base" ) ) + params->time_base.num = atoi( value ); + else if ( !strcmp( name, "sample_rate" ) ) + params->sample_rate = atoi( value ); + else if ( !strcmp( name, "channels" ) ) + params->channels = atoi( value ); + else if ( !strcmp( name, "width" ) ) + params->width = atoi( value ); + else if ( !strcmp( name, "height" ) ) + params->height = atoi( value ); + else if ( !strcmp( name, "standard" ) ) + { + standard = strdup( value ); + params->standard = standard; + } + else if ( !strcmp( name, "av" ) ) + av = atoi( value ); + } + free( name ); + mrl = strchr( mrl, '&' ); + } + } // Now attempt to open the file - error = av_open_input_file( &context, file, NULL, 0, NULL ) < 0; + error = av_open_input_file( &context, file, format, 0, params ) < 0; + + // Cleanup AVFormatParameters + free( standard ); + free( params ); // If successful, then try to get additional info if ( error == 0 ) @@ -198,28 +266,65 @@ static int producer_open( mlt_producer this, char *file ) // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; + int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps ); - mlt_properties_set_position( properties, "out", frames - 1 ); - mlt_properties_set_position( properties, "length", frames ); + mlt_properties_set_position( properties, "out", frames - 2 ); + mlt_properties_set_position( properties, "length", frames - 1 ); } // Find default audio and video streams find_default_streams( context, &audio_index, &video_index ); + if ( context->start_time != AV_NOPTS_VALUE ) + mlt_properties_set_double( properties, "_start_time", context->start_time ); + + // Check if we're seekable (something funny about mpeg here :-/) + if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) ) + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); + else + av_bypass = 1; + // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); + mlt_properties_set_int( properties, "_last_position", -1 ); + // Fetch the width, height and aspect ratio + if ( video_index != -1 ) + { + AVCodecContext *codec_context = context->streams[ video_index ]->codec; + mlt_properties_set_int( properties, "width", codec_context->width ); + mlt_properties_set_int( properties, "height", codec_context->height ); + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } + + // Read Metadata + if (context->title != NULL) + mlt_properties_set(properties, "meta.attr.title.markup", context->title ); + if (context->author != NULL) + mlt_properties_set(properties, "meta.attr.author.markup", context->author ); + if (context->copyright != NULL) + mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright ); + if (context->comment != NULL) + mlt_properties_set(properties, "meta.attr.comment.markup", context->comment ); + if (context->album != NULL) + mlt_properties_set(properties, "meta.attr.album.markup", context->album ); + if (context->year != 0) + mlt_properties_set_int(properties, "meta.attr.year.markup", context->year ); + if (context->track != 0) + mlt_properties_set_int(properties, "meta.attr.track.markup", context->track ); + // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( audio_index != -1 && video_index != -1 ) + if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); + av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD ); // And open again for our audio context av_open_input_file( &context, file, NULL, 0, NULL ); @@ -228,10 +333,11 @@ static int producer_open( mlt_producer this, char *file ) // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } - else if ( video_index != -1 ) + else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); + av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD ); } else if ( audio_index != -1 ) { @@ -243,11 +349,13 @@ static int producer_open( mlt_producer this, char *file ) // Something has gone wrong error = -1; } + + mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); + avformat_unlock( ); return error; } @@ -258,7 +366,7 @@ static int producer_open( mlt_producer this, char *file ) static double producer_time_of_frame( mlt_producer this, mlt_position position ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Obtain the fps double fps = mlt_properties_get_double( properties, "fps" ); @@ -267,13 +375,78 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) return ( double )position / fps; } +static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) +{ +#ifdef SWSCALE + if ( format == mlt_image_yuv420p ) + { + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + output.data[0] = buffer; + output.data[1] = buffer + width * height; + output.data[2] = buffer + ( 3 * width * height ) / 2; + output.linesize[0] = width; + output.linesize[1] = width >> 1; + output.linesize[2] = width >> 1; + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); + } + else if ( format == mlt_image_rgb24 ) + { + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); + } + else + { + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); + } +#else + if ( format == mlt_image_yuv420p ) + { + AVPicture pict; + pict.data[0] = buffer; + pict.data[1] = buffer + width * height; + pict.data[2] = buffer + ( 3 * width * height ) / 2; + pict.linesize[0] = width; + pict.linesize[1] = width >> 1; + pict.linesize[2] = width >> 1; + img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); + } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } + else + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); + img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); + } +#endif +} + /** Get an image from a frame. */ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -282,7 +455,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -291,7 +464,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int index = mlt_properties_get_int( properties, "video_index" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "video_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_video_expected" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -300,13 +473,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; // Get the conversion frame - AVPicture *output = mlt_properties_get_data( properties, "video_output_frame", NULL ); + AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL ); // Special case pause handling flag int paused = 0; @@ -315,78 +488,115 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int ignore = 0; // Current time calcs - double current_time = 0; + int current_position = mlt_properties_get_double( properties, "_current_position" ); + + // We may want to use the source fps if available + double source_fps = mlt_properties_get_double( properties, "source_fps" ); + double fps = mlt_properties_get_double( properties, "fps" ); + + // This is the physical frame position in the source + int req_position = ( int )( position / fps * source_fps ); + + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + + // Generate the size in bytes + int size = 0; + + // Hopefully provide better support for streams... + int av_bypass = mlt_properties_get_int( properties, "av_bypass" ); + + // Determines if we have to decode all frames in a sequence + int must_decode = 1; // Set the result arguments that we know here (only *buffer is now required) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } + // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); mlt_properties_set_int( frame_properties, "height", *height ); - // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); + // Construct the output image + *buffer = mlt_pool_alloc( size ); - // Construct an AVFrame for YUV422 conversion - if ( output == NULL ) - { - int size = avpicture_get_size( PIX_FMT_YUV422, *width, *height ); - uint8_t *buf = malloc( size ); - output = malloc( sizeof( AVPicture ) ); - avpicture_fill( output, buf, PIX_FMT_YUV422, *width, *height ); - mlt_properties_set_data( properties, "video_output_frame", output, 0, av_free, NULL ); - mlt_properties_set_data( properties, "video_output_buffer", buf, 0, free, NULL ); - } + // Temporary hack to improve intra frame only + must_decode = strcmp( codec_context->codec->name, "mjpeg" ) && + strcmp( codec_context->codec->name, "rawvideo" ) && + strcmp( codec_context->codec->name, "dvvideo" ); // Seek if necessary if ( position != expected ) { - if ( position + 1 == expected ) + if ( av_frame != NULL && position + 1 == expected ) { // We're paused - use last image paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames - ignore = position - expected; + ignore = ( int )( ( position - expected ) / fps * source_fps ); } - else + else if ( seekable && ( position < expected || position - expected >= 12 ) ) { - // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + // Calculate the timestamp for the requested frame + int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE ); + if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE ) + timestamp += context->start_time; + if ( must_decode ) + timestamp -= AV_TIME_BASE; + if ( timestamp < 0 ) + timestamp = 0; + + // Set to the timestamp + av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD ); // Remove the cached info relating to the previous position - mlt_properties_set_double( properties, "current_time", 0 ); - mlt_properties_set_data( properties, "current_image", NULL, 0, NULL, NULL ); + mlt_properties_set_int( properties, "_current_position", -1 ); + mlt_properties_set_int( properties, "_last_position", -1 ); + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + av_frame = NULL; } } - - // Duplicate the last image if necessary - if ( mlt_properties_get_data( properties, "current_image", NULL ) != NULL && - ( paused || mlt_properties_get_double( properties, "current_time" ) > real_timecode ) ) - { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); + // Duplicate the last image if necessary (see comment on rawvideo below) + if ( av_frame != NULL && ( paused || mlt_properties_get_int( properties, "_current_position" ) >= req_position ) && av_bypass == 0 ) + { // Duplicate it - *buffer = malloc( size ); - memcpy( *buffer, image, size ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); // Set this on the frame properties - mlt_properties_set_data( frame_properties, "image", *buffer, size, free, NULL ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); } else { int ret = 0; int got_picture = 0; - AVFrame frame; + int int_position = 0; + + av_init_packet( &pkt ); - memset( &pkt, 0, sizeof( pkt ) ); - memset( &frame, 0, sizeof( frame ) ); + // Construct an AVFrame for YUV422 conversion + if ( av_frame == NULL ) + { + av_frame = avcodec_alloc_frame( ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL ); + } while( ret >= 0 && !got_picture ) { @@ -396,61 +606,64 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We only deal with video from the selected video_index if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { + // Determine time code of the packet + int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps ); + if ( context->start_time != AV_NOPTS_VALUE ) + int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE ); + + int last_position = mlt_properties_get_int( properties, "_last_position" ); + if ( int_position == last_position ) + int_position = last_position + 1; + mlt_properties_set_int( properties, "_last_position", int_position ); + // Decode the image - // Wouldn't it be great if I could use this... - //if ( (float)pkt.pts / 1000000.0 >= real_timecode ) - ret = avcodec_decode_video( codec_context, &frame, &got_picture, pkt.data, pkt.size ); + if ( must_decode || int_position >= req_position ) + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); - // Handle ignore - if ( (float)pkt.pts / 1000000.0 < real_timecode ) - { - ignore = 0; - got_picture = 0; - } - else if ( (float)pkt.pts / 1000000.0 >= real_timecode ) + if ( got_picture ) { - ignore = 0; - } - else if ( got_picture && ignore -- ) - { - got_picture = 0; + // Handle ignore + if ( int_position < req_position ) + { + ignore = 0; + got_picture = 0; + } + else if ( int_position >= req_position ) + { + ignore = 0; + } + else if ( ignore -- ) + { + got_picture = 0; + } } + } - current_time = ( double )pkt.pts / 1000000.0; + // Now handle the picture if we have one + if ( got_picture ) + { + mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame ); + mlt_properties_set_int( frame_properties, "top_field_first", av_frame->top_field_first ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); + mlt_properties_set_double( properties, "_current_position", int_position ); } // We're finished with this packet regardless av_free_packet( &pkt ); } + } - // Now handle the picture if we have one - if ( got_picture ) - { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); + // Very untidy - for rawvideo, the packet contains the frame, hence the free packet + // above will break the pause behaviour - so we wipe the frame now + if ( !strcmp( codec_context->codec->name, "rawvideo" ) ) + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); - if ( image == NULL || size != *width * *height * 2 ) - { - size = *width * *height * 2; - image = malloc( size ); - mlt_properties_set_data( properties, "current_image", image, size, free, NULL ); - } - - *buffer = malloc( size ); - img_convert( output, PIX_FMT_YUV422, (AVPicture *)&frame, codec_context->pix_fmt, *width, *height ); - memcpy( image, output->data[ 0 ], size ); - memcpy( *buffer, output->data[ 0 ], size ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, free, NULL ); - mlt_properties_set_double( properties, "current_time", current_time ); - } - } + // Set the field order property for this frame + mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) ); // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "video_expected", position + 1 ); - - // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); + mlt_properties_set_position( properties, "_video_expected", position + 1 ); return 0; } @@ -461,7 +674,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -469,19 +682,16 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // Get the video_index int index = mlt_properties_get_int( properties, "video_index" ); - // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); + // Get the frame properties + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); if ( context != NULL && index != -1 ) { - // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); - // Get the video stream AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL ); @@ -495,24 +705,8 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // If we don't have a codec and we can't initialise it, we can't do much more... if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 ) { - double aspect_ratio = 0; - - // Set aspect ratio - if ( codec_context->sample_aspect_ratio.num == 0) - aspect_ratio = 0; - else - aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ) * codec_context->width / codec_context->height; - - if (aspect_ratio <= 0.0) - aspect_ratio = ( double )codec_context->width / ( double )codec_context->height; - - mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); - // Now store the codec with its destructor mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL ); - - // Set to the real timecode - av_seek_frame( context, -1, 0 ); } else { @@ -524,13 +718,55 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { + double source_fps = 0; + int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" ); + double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" ); + double aspect_ratio; + + // XXX: We won't know the real aspect ratio until an image is decoded + // but we do need it now (to satisfy filter_resize) - take a guess based + // on pal/ntsc + if ( force_aspect_ratio > 0.0 ) + { + aspect_ratio = force_aspect_ratio; + } + else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + { + aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); + } + else + { + int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; + aspect_ratio = is_pal ? 59.0/54.0 : 10.0/11.0; + } + + // Determine the fps + source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ); + + // We'll use fps if it's available + if ( source_fps > 0 && source_fps < 30 ) + mlt_properties_set_double( properties, "source_fps", source_fps ); + else + mlt_properties_set_double( properties, "source_fps", mlt_properties_get_double( properties, "fps" ) ); + mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); + + // Set the width and height + mlt_properties_set_int( frame_properties, "width", codec_context->width ); + mlt_properties_set_int( frame_properties, "height", codec_context->height ); + mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio ); + mlt_frame_push_get_image( frame, producer_get_image ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); } + else + { + mlt_properties_set_int( frame_properties, "test_image", 1 ); + } + } + else + { + mlt_properties_set_int( frame_properties, "test_image", 1 ); } - - // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); } /** Get the audio from a frame. @@ -539,7 +775,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -548,7 +784,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -556,8 +792,11 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "audio_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" ); // Obtain the resample context if it exists (not always needed) ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL ); @@ -566,7 +805,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL ); // Get amount of audio used - int audio_used = mlt_properties_get_int( properties, "audio_used" ); + int audio_used = mlt_properties_get_int( properties, "_audio_used" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -575,7 +814,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -586,11 +825,8 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Flag for paused (silence) int paused = 0; - // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); - // Check for resample and create if necessary - if ( resample == NULL ) + if ( resample == NULL && codec_context->channels <= 2 ) { // Create the resampler resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate ); @@ -598,15 +834,20 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // And store it on properties mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL ); } + else if ( resample == NULL ) + { + *channels = codec_context->channels; + *frequency = codec_context->sample_rate; + } // Check for audio buffer and create if necessary if ( audio_buffer == NULL ) { // Allocate the audio buffer - audio_buffer = malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) ); + audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) ); // And store it on properties for reuse - mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, free, NULL ); + mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL ); } // Seek if necessary @@ -617,15 +858,16 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // We're paused - silence required paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else + else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) + paused = 1; // Clear the usage in the audio buffer audio_used = 0; @@ -637,9 +879,9 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form { int ret = 0; int got_audio = 0; - int16_t temp[ AVCODEC_MAX_AUDIO_FRAME_SIZE / 2 ]; + int16_t *temp = mlt_pool_alloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE ); - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); while( ret >= 0 && !got_audio ) { @@ -657,26 +899,32 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form uint8_t *ptr = pkt.data; int data_size; - if ( ptr == NULL || len == 0 ) - break; - - // We only deal with video from the selected video_index - while ( ret >= 0 && pkt.stream_index == index && len > 0 ) + // We only deal with audio from the selected audio_index + while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 ) { // Decode the audio ret = avcodec_decode_audio( codec_context, temp, &data_size, ptr, len ); if ( ret < 0 ) + { + ret = 0; break; + } len -= ret; ptr += ret; if ( data_size > 0 ) { - int size_out = audio_resample( resample, &audio_buffer[ audio_used * *channels ], temp, data_size / ( codec_context->channels * sizeof( int16_t ) ) ); - - audio_used += size_out; + if ( resample != NULL ) + { + audio_used += audio_resample( resample, &audio_buffer[ audio_used * *channels ], temp, data_size / ( codec_context->channels * sizeof( int16_t ) ) ); + } + else + { + memcpy( &audio_buffer[ audio_used * *channels ], temp, data_size ); + audio_used += data_size / ( codec_context->channels * sizeof( int16_t ) ); + } // Handle ignore while ( ignore && audio_used > *samples ) @@ -688,9 +936,8 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // If we're behind, ignore this packet - float current_pts = (float)pkt.pts / 1000000.0; - double discrepancy = mlt_properties_get_double( properties, "discrepancy" ); - if ( discrepancy * current_pts < real_timecode ) + float current_pts = av_q2d( stream->time_base ) * pkt.pts; + if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) ) ignore = 1; } @@ -698,37 +945,36 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form av_free_packet( &pkt ); } + *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) ); + mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL ); + // Now handle the audio if we have enough if ( audio_used >= *samples ) { - *buffer = malloc( *samples * *channels * sizeof( int16_t ) ); memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) ); audio_used -= *samples; memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) ); - mlt_properties_set_data( frame_properties, "audio", *buffer, 0, free, NULL ); } else { - frame->get_audio = NULL; - mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); - audio_used = 0; + memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) ); } // Store the number of audio samples still available - mlt_properties_set_int( properties, "audio_used", audio_used ); + mlt_properties_set_int( properties, "_audio_used", audio_used ); + + // Release the temporary audio + mlt_pool_release( temp ); } else { // Get silence and don't touch the context - frame->get_audio = NULL; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } - // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "audio_expected", position + 1 ); - - // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); + // Regardless of speed (other than paused), we expect to get the next frame + if ( !paused ) + mlt_properties_set_position( properties, "_audio_expected", position + 1 ); return 0; } @@ -739,7 +985,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -747,20 +993,17 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); - // Lock the mutex now - pthread_mutex_lock( &avformat_mutex ); - // Deal with audio context if ( context != NULL && index != -1 ) { // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Get the audio stream AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL ); @@ -776,6 +1019,7 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) { // Now store the codec with its destructor mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL ); + } else { @@ -787,13 +1031,12 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - frame->get_audio = producer_get_audio; + mlt_frame_push_audio( frame, producer_get_audio ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); + mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate ); + mlt_properties_set_int( frame_properties, "channels", codec_context->channels ); } } - - // Unlock the mutex now - pthread_mutex_unlock( &avformat_mutex ); } /** Our get frame implementation. @@ -808,7 +1051,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index mlt_frame_set_position( *frame, mlt_producer_position( this ) ); // Set the position of this producer - mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_get_in( this ) + mlt_producer_position( this ) ); + mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) ); // Set up the video producer_set_up_video( this, *frame ); @@ -817,7 +1060,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index producer_set_up_audio( this, *frame ); // Set the aspect_ratio - mlt_properties_set_double( mlt_frame_properties( *frame ), "aspect_ratio", mlt_properties_get_double( mlt_producer_properties( this ), "aspect_ratio" ) ); + mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) ); // Calculate the next timecode mlt_producer_prepare_next( this );