X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=a800708a1c220135aebfc30109b7395e59577cb3;hb=b75d860fffba41cd36fa3b3031cb0e6fedd750af;hp=9da63d480c0aedc1ed28178d0779fab7d1b805f6;hpb=757e8d55530954c3002b71f78d5027a222e810f7;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index 9da63d4..a800708 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -57,14 +57,11 @@ mlt_producer producer_avformat_init( char *file ) if ( mlt_producer_init( this, NULL ) == 0 ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Set the resource property (required for all producers) mlt_properties_set( properties, "resource", file ); - // TEST: audio sync tweaking - mlt_properties_set_double( properties, "discrepancy", 1 ); - // Register our get_frame implementation this->get_frame = producer_get_frame; @@ -92,7 +89,10 @@ static void find_default_streams( AVFormatContext *context, int *audio_index, in for( i = 0; i < context->nb_streams; i++ ) { // Get the codec context - AVCodecContext *codec_context = &context->streams[ i ]->codec; + AVCodecContext *codec_context = context->streams[ i ]->codec; + + if ( avcodec_find_decoder( codec_context->codec_id ) == NULL ) + continue; // Determine the type and obtain the first index of each type switch( codec_context->codec_type ) @@ -159,7 +159,7 @@ static int producer_open( mlt_producer this, char *file ) AVFormatContext *context = NULL; // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps double fps = mlt_properties_get_double( properties, "fps" ); @@ -172,7 +172,13 @@ static int producer_open( mlt_producer this, char *file ) AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); + + // AV option (0 = both, 1 = video, 2 = audio) + int av = 0; + // Setting lowest log level + av_log_set_level( -1 ); + // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { @@ -193,8 +199,7 @@ static int producer_open( mlt_producer this, char *file ) // These are required by video4linux (defaults) params->width = 640; params->height = 480; - params->frame_rate = 25; - params->frame_rate_base = 1; + params->time_base= (AVRational){1,25}; params->device = file; params->channels = 2; params->sample_rate = 48000; @@ -215,9 +220,9 @@ static int producer_open( mlt_producer this, char *file ) if ( t ) t[0] = 0; if ( !strcmp( name, "frame_rate" ) ) - params->frame_rate = atoi( value ); + params->time_base.den = atoi( value ); else if ( !strcmp( name, "frame_rate_base" ) ) - params->frame_rate_base = atoi( value ); + params->time_base.num = atoi( value ); else if ( !strcmp( name, "sample_rate" ) ) params->sample_rate = atoi( value ); else if ( !strcmp( name, "channels" ) ) @@ -231,6 +236,8 @@ static int producer_open( mlt_producer this, char *file ) standard = strdup( value ); params->standard = standard; } + else if ( !strcmp( name, "av" ) ) + av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); @@ -238,8 +245,7 @@ static int producer_open( mlt_producer this, char *file ) } // Now attempt to open the file - error = av_open_input_file( &context, file, format, 0, params ); - error = error < 0; + error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); @@ -257,6 +263,7 @@ static int producer_open( mlt_producer this, char *file ) // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; + int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) @@ -270,15 +277,35 @@ static int producer_open( mlt_producer this, char *file ) // Find default audio and video streams find_default_streams( context, &audio_index, &video_index ); + if ( context->start_time != AV_NOPTS_VALUE ) + mlt_properties_set_double( properties, "_start_time", context->start_time ); + + // Check if we're seekable (something funny about mpeg here :-/) + if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) ) + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); + else + av_bypass = 1; + // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); + mlt_properties_set_int( properties, "_last_position", -1 ); + + // Fetch the width, height and aspect ratio + if ( video_index != -1 ) + { + AVCodecContext *codec_context = context->streams[ video_index ]->codec; + mlt_properties_set_int( properties, "width", codec_context->width ); + mlt_properties_set_int( properties, "height", codec_context->height ); + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( audio_index != -1 && video_index != -1 ) + if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); + av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD ); // And open again for our audio context av_open_input_file( &context, file, NULL, 0, NULL ); @@ -287,10 +314,11 @@ static int producer_open( mlt_producer this, char *file ) // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } - else if ( video_index != -1 ) + else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); + av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD ); } else if ( audio_index != -1 ) { @@ -302,6 +330,8 @@ static int producer_open( mlt_producer this, char *file ) // Something has gone wrong error = -1; } + + mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } @@ -317,7 +347,7 @@ static int producer_open( mlt_producer this, char *file ) static double producer_time_of_frame( mlt_producer this, mlt_position position ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Obtain the fps double fps = mlt_properties_get_double( properties, "fps" ); @@ -326,13 +356,40 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) return ( double )position / fps; } +static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) +{ + if ( format == mlt_image_yuv420p ) + { + AVPicture pict; + pict.data[0] = buffer; + pict.data[1] = buffer + width * height; + pict.data[2] = buffer + ( 3 * width * height ) / 2; + pict.linesize[0] = width; + pict.linesize[1] = width >> 1; + pict.linesize[2] = width >> 1; + img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); + } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } + else + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); + img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); + } +} + /** Get an image from a frame. */ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -341,7 +398,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -350,7 +407,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int index = mlt_properties_get_int( properties, "video_index" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "video_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_video_expected" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -359,13 +416,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; // Get the conversion frame - AVPicture *output = mlt_properties_get_data( properties, "video_output_frame", NULL ); + AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL ); // Special case pause handling flag int paused = 0; @@ -374,69 +431,97 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int ignore = 0; // Current time calcs - double current_time = mlt_properties_get_double( properties, "current_time" ); + int current_position = mlt_properties_get_double( properties, "_current_position" ); // We may want to use the source fps if available double source_fps = mlt_properties_get_double( properties, "source_fps" ); + double fps = mlt_properties_get_double( properties, "fps" ); + + // This is the physical frame position in the source + int req_position = ( int )( position / fps * source_fps ); + + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + + // Generate the size in bytes + int size = 0; + + // Hopefully provide better support for streams... + int av_bypass = mlt_properties_get_int( properties, "av_bypass" ); + + // Determines if we have to decode all frames in a sequence + int must_decode = 1; // Set the result arguments that we know here (only *buffer is now required) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } + // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); mlt_properties_set_int( frame_properties, "height", *height ); - // Lock the mutex now - avformat_lock( ); + // Construct the output image + *buffer = mlt_pool_alloc( size ); - // Construct an AVFrame for YUV422 conversion - if ( output == NULL ) - { - int size = avpicture_get_size( PIX_FMT_YUV422, *width, *height + 1 ); - uint8_t *buf = mlt_pool_alloc( size ); - output = mlt_pool_alloc( sizeof( AVPicture ) ); - avpicture_fill( output, buf, PIX_FMT_YUV422, *width, *height ); - mlt_properties_set_data( properties, "video_output_frame", output, 0, ( mlt_destructor )mlt_pool_release, NULL ); - mlt_properties_set_data( properties, "video_output_buffer", buf, 0, ( mlt_destructor )mlt_pool_release, NULL ); - } + // Temporary hack to improve intra frame only + must_decode = strcmp( codec_context->codec->name, "mjpeg" ) && + strcmp( codec_context->codec->name, "rawvideo" ) && + strcmp( codec_context->codec->name, "dvvideo" ); // Seek if necessary if ( position != expected ) { - if ( position + 1 == expected ) + if ( av_frame != NULL && position + 1 == expected ) { // We're paused - use last image paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames - ignore = position - expected; + ignore = ( int )( ( position - expected ) / fps * source_fps ); } - else + else if ( seekable && ( position < expected || position - expected >= 12 ) ) { - // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + // Calculate the timestamp for the requested frame + int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE ); + if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE ) + timestamp += context->start_time; + if ( must_decode ) + timestamp -= AV_TIME_BASE; + if ( timestamp < 0 ) + timestamp = 0; + + // Set to the timestamp + av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD ); // Remove the cached info relating to the previous position - mlt_properties_set_double( properties, "current_time", real_timecode ); - mlt_properties_set_data( properties, "current_image", NULL, 0, NULL, NULL ); + mlt_properties_set_int( properties, "_current_position", -1 ); + mlt_properties_set_int( properties, "_last_position", -1 ); + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + av_frame = NULL; } } - - // Duplicate the last image if necessary - if ( mlt_properties_get_data( properties, "current_image", NULL ) != NULL && - ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) ) - { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); + // Duplicate the last image if necessary (see comment on rawvideo below) + if ( av_frame != NULL && ( paused || mlt_properties_get_int( properties, "_current_position" ) >= req_position ) && av_bypass == 0 ) + { // Duplicate it - *buffer = mlt_pool_alloc( size ); - memcpy( *buffer, image, size ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); // Set this on the frame properties mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); @@ -445,10 +530,16 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { int ret = 0; int got_picture = 0; - AVFrame frame; + int int_position = 0; + + av_init_packet( &pkt ); - memset( &pkt, 0, sizeof( pkt ) ); - memset( &frame, 0, sizeof( frame ) ); + // Construct an AVFrame for YUV422 conversion + if ( av_frame == NULL ) + { + av_frame = avcodec_alloc_frame( ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL ); + } while( ret >= 0 && !got_picture ) { @@ -458,130 +549,64 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We only deal with video from the selected video_index if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { + // Determine time code of the packet + int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps ); + if ( context->start_time != AV_NOPTS_VALUE ) + int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE ); + + int last_position = mlt_properties_get_int( properties, "_last_position" ); + if ( int_position == last_position ) + int_position = last_position + 1; + mlt_properties_set_int( properties, "_last_position", int_position ); + // Decode the image - ret = avcodec_decode_video( codec_context, &frame, &got_picture, pkt.data, pkt.size ); + if ( must_decode || int_position >= req_position ) + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); if ( got_picture ) { - if ( pkt.pts != AV_NOPTS_VALUE && pkt.pts != 0 ) - current_time = ( double )pkt.pts / 1000000.0; - else - current_time = real_timecode; - // Handle ignore - if ( ( int )( current_time * 100 ) < ( int )( real_timecode * 100 ) - 7 ) + if ( int_position < req_position ) { ignore = 0; got_picture = 0; } - else if ( current_time >= real_timecode ) + else if ( int_position >= req_position ) { - //current_time = real_timecode; ignore = 0; } else if ( ignore -- ) { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", frame.top_field_first ); } } - // We're finished with this packet regardless - av_free_packet( &pkt ); - } - - // Now handle the picture if we have one - if ( got_picture ) - { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - - if ( image == NULL || size != *width * *height * 2 ) + // Now handle the picture if we have one + if ( got_picture ) { - size = *width * ( *height + 1 ) * 2; - image = mlt_pool_alloc( size ); - mlt_properties_set_data( properties, "current_image", image, size, ( mlt_destructor )mlt_pool_release, NULL ); + mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame ); + mlt_properties_set_int( frame_properties, "top_field_first", av_frame->top_field_first ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); + mlt_properties_set_double( properties, "_current_position", int_position ); } - *buffer = mlt_pool_alloc( size ); - - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( codec_context->pix_fmt == PIX_FMT_YUV420P ) - { - register int i, j; - register int half = *width >> 1; - register uint8_t *Y = ( ( AVPicture * )&frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )&frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )&frame )->data[ 2 ]; - register uint8_t *d = *buffer; - register uint8_t *y, *u, *v; - - i = *height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - U += ( ( AVPicture * )&frame )->linesize[ 1 ]; - V += ( ( AVPicture * )&frame )->linesize[ 2 ]; - } - } - else - { - img_convert( output, PIX_FMT_YUV422, (AVPicture *)&frame, codec_context->pix_fmt, *width, *height ); - memcpy( *buffer, output->data[ 0 ], size ); - } - - memcpy( image, *buffer, size ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); - - if ( current_time == 0 && source_fps != 0 ) - { - double fps = mlt_properties_get_double( properties, "fps" ); - current_time = ceil( source_fps * ( double )position / fps ) * ( 1 / source_fps ); - mlt_properties_set_double( properties, "current_time", current_time ); - } - else - { - mlt_properties_set_double( properties, "current_time", current_time ); - } + // We're finished with this packet regardless + av_free_packet( &pkt ); } } - + + // Very untidy - for rawvideo, the packet contains the frame, hence the free packet + // above will break the pause behaviour - so we wipe the frame now + if ( !strcmp( codec_context->codec->name, "rawvideo" ) ) + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + // Set the field order property for this frame - mlt_properties_set_int( frame_properties, "top_field_first", - mlt_properties_get_int( properties, "top_field_first" ) ); + mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) ); // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "video_expected", position + 1 ); - - // Unlock the mutex now - avformat_unlock( ); + mlt_properties_set_position( properties, "_video_expected", position + 1 ); return 0; } @@ -592,7 +617,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -601,10 +626,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) int index = mlt_properties_get_int( properties, "video_index" ); // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); - - // Lock the mutex now - avformat_lock( ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); if ( context != NULL && index != -1 ) { @@ -612,7 +634,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL ); @@ -639,26 +661,42 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - double aspect_ratio = 1; double source_fps = 0; - - // Set aspect ratio - if ( codec_context->sample_aspect_ratio.num > 0 ) + int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" ); + double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" ); + double aspect_ratio; + + // XXX: We won't know the real aspect ratio until an image is decoded + // but we do need it now (to satisfy filter_resize) - take a guess based + // on pal/ntsc + if ( force_aspect_ratio > 0.0 ) + { + aspect_ratio = force_aspect_ratio; + } + else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + { aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); - - mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); - //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height ); + } + else + { + int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; + aspect_ratio = is_pal ? 59.0/54.0 : 10.0/11.0; + } // Determine the fps - source_fps = ( double )codec_context->frame_rate / ( codec_context->frame_rate_base == 0 ? 1 : codec_context->frame_rate_base ); + source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ); // We'll use fps if it's available if ( source_fps > 0 && source_fps < 30 ) mlt_properties_set_double( properties, "source_fps", source_fps ); + else + mlt_properties_set_double( properties, "source_fps", mlt_properties_get_double( properties, "fps" ) ); + mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); // Set the width and height mlt_properties_set_int( frame_properties, "width", codec_context->width ); mlt_properties_set_int( frame_properties, "height", codec_context->height ); + mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio ); mlt_frame_push_get_image( frame, producer_get_image ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); @@ -672,9 +710,6 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { mlt_properties_set_int( frame_properties, "test_image", 1 ); } - - // Unlock the mutex now - avformat_unlock( ); } /** Get the audio from a frame. @@ -683,7 +718,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -692,7 +727,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -700,8 +735,11 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "audio_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" ); // Obtain the resample context if it exists (not always needed) ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL ); @@ -710,7 +748,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL ); // Get amount of audio used - int audio_used = mlt_properties_get_int( properties, "audio_used" ); + int audio_used = mlt_properties_get_int( properties, "_audio_used" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -719,7 +757,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -729,10 +767,6 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Flag for paused (silence) int paused = 0; - int locked = 0; - - // Lock the mutex now - avformat_lock( ); // Check for resample and create if necessary if ( resample == NULL && codec_context->channels <= 2 ) @@ -767,20 +801,19 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // We're paused - silence required paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else + else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) + paused = 1; // Clear the usage in the audio buffer audio_used = 0; - - locked = 1; } } @@ -791,7 +824,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int got_audio = 0; int16_t *temp = mlt_pool_alloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE ); - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); while( ret >= 0 && !got_audio ) { @@ -846,26 +879,8 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // If we're behind, ignore this packet - float current_pts = (float)pkt.pts / 1000000.0; - double discrepancy = mlt_properties_get_double( properties, "discrepancy" ); - if ( current_pts != 0 && real_timecode != 0 ) - { - if ( discrepancy != 1 ) - discrepancy = ( discrepancy + ( real_timecode / current_pts ) ) / 2; - else - discrepancy = real_timecode / current_pts; - if ( discrepancy > 0.9 && discrepancy < 1.1 ) - discrepancy = 1.0; - else - discrepancy = floor( discrepancy + 0.5 ); - - if ( discrepancy == 0 ) - discrepancy = 1.0; - - mlt_properties_set_double( properties, "discrepancy", discrepancy ); - } - - if ( !ignore && discrepancy * current_pts <= ( real_timecode - 0.02 ) ) + float current_pts = av_q2d( stream->time_base ) * pkt.pts; + if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) ) ignore = 1; } @@ -889,7 +904,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // Store the number of audio samples still available - mlt_properties_set_int( properties, "audio_used", audio_used ); + mlt_properties_set_int( properties, "_audio_used", audio_used ); // Release the temporary audio mlt_pool_release( temp ); @@ -897,15 +912,12 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form else { // Get silence and don't touch the context - frame->get_audio = NULL; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } - // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "audio_expected", position + 1 ); - - // Unlock the mutex now - avformat_unlock( ); + // Regardless of speed (other than paused), we expect to get the next frame + if ( !paused ) + mlt_properties_set_position( properties, "_audio_expected", position + 1 ); return 0; } @@ -916,7 +928,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -924,20 +936,17 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); - // Lock the mutex now - avformat_lock( ); - // Deal with audio context if ( context != NULL && index != -1 ) { // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Get the audio stream AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL ); @@ -965,13 +974,12 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - frame->get_audio = producer_get_audio; + mlt_frame_push_audio( frame, producer_get_audio ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); + mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate ); + mlt_properties_set_int( frame_properties, "channels", codec_context->channels ); } } - - // Unlock the mutex now - avformat_unlock( ); } /** Our get frame implementation. @@ -986,7 +994,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index mlt_frame_set_position( *frame, mlt_producer_position( this ) ); // Set the position of this producer - mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_get_in( this ) + mlt_producer_position( this ) ); + mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) ); // Set up the video producer_set_up_video( this, *frame ); @@ -995,7 +1003,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index producer_set_up_audio( this, *frame ); // Set the aspect_ratio - mlt_properties_set_double( mlt_frame_properties( *frame ), "aspect_ratio", mlt_properties_get_double( mlt_producer_properties( this ), "aspect_ratio" ) ); + mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) ); // Calculate the next timecode mlt_producer_prepare_next( this );