X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=7a176d88ae53edb7a447df7a9afe16027eb7f806;hb=3e021eb2f553fada306bc4879fabda445e75450d;hp=ed7694d5b0a8c5e5fc06a214ed422f4b026e4eee;hpb=bd208d01a2a792e698a9b4884b43602b2f245a8f;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index ed7694d..7a176d8 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -25,7 +25,7 @@ #include // ffmpeg Header files -#include +#include // System header files #include @@ -62,9 +62,6 @@ mlt_producer producer_avformat_init( char *file ) // Set the resource property (required for all producers) mlt_properties_set( properties, "resource", file ); - // TEST: audio sync tweaking - mlt_properties_set_double( properties, "discrepancy", 1 ); - // Register our get_frame implementation this->get_frame = producer_get_frame; @@ -172,6 +169,9 @@ static int producer_open( mlt_producer this, char *file ) AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); + + // AV option (0 = both, 1 = video, 2 = audio) + int av = 0; // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) @@ -231,6 +231,8 @@ static int producer_open( mlt_producer this, char *file ) standard = strdup( value ); params->standard = standard; } + else if ( !strcmp( name, "av" ) ) + av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); @@ -270,12 +272,19 @@ static int producer_open( mlt_producer this, char *file ) // Find default audio and video streams find_default_streams( context, &audio_index, &video_index ); + if ( context->start_time != AV_NOPTS_VALUE ) + mlt_properties_set_double( properties, "start_time", context->start_time ); + + // Check if we're seekable (something funny about mpeg here :-/) + if ( strcmp( file, "pipe:" ) ) + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 ); + // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( audio_index != -1 && video_index != -1 ) + if ( av == 0 && strcmp( file, "pipe:" ) && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -287,7 +296,7 @@ static int producer_open( mlt_producer this, char *file ) // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } - else if ( video_index != -1 ) + else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -326,6 +335,77 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) return ( double )position / fps; } +static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) +{ + // EXPERIMENTAL IMAGE NORMALISATIONS + if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 ) + { + register int i, j; + register int half = width >> 1; + register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ]; + register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ]; + register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ]; + register uint8_t *d = buffer; + register uint8_t *y, *u, *v; + + i = height >> 1; + while ( i -- ) + { + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + U += ( ( AVPicture * )frame )->linesize[ 1 ]; + V += ( ( AVPicture * )frame )->linesize[ 2 ]; + } + } + else if ( format == mlt_image_yuv420p ) + { + AVPicture pict; + pict.data[0] = buffer; + pict.data[1] = buffer + width * height; + pict.data[2] = buffer + ( 3 * width * height ) / 2; + pict.linesize[0] = width; + pict.linesize[1] = width >> 1; + pict.linesize[2] = width >> 1; + img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); + } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } + else + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); + img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); + } +} + /** Get an image from a frame. */ @@ -365,7 +445,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVPacket pkt; // Get the conversion frame - AVPicture *output = mlt_properties_get_data( properties, "video_output_frame", NULL ); + AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL ); // Special case pause handling flag int paused = 0; @@ -379,28 +459,36 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We may want to use the source fps if available double source_fps = mlt_properties_get_double( properties, "source_fps" ); + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + + // Generate the size in bytes + int size = 0; + // Set the result arguments that we know here (only *buffer is now required) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } + // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); mlt_properties_set_int( frame_properties, "height", *height ); - // Lock the mutex now - avformat_lock( ); - - // Construct an AVFrame for YUV422 conversion - if ( output == NULL ) - { - int size = avpicture_get_size( PIX_FMT_YUV422, *width, *height + 1 ); - uint8_t *buf = mlt_pool_alloc( size ); - output = mlt_pool_alloc( sizeof( AVPicture ) ); - avpicture_fill( output, buf, PIX_FMT_YUV422, *width, *height ); - mlt_properties_set_data( properties, "video_output_frame", output, 0, ( mlt_destructor )mlt_pool_release, NULL ); - mlt_properties_set_data( properties, "video_output_buffer", buf, 0, ( mlt_destructor )mlt_pool_release, NULL ); - } + // Construct the output image + *buffer = mlt_pool_alloc( size ); // Seek if necessary if ( position != expected ) @@ -410,33 +498,30 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We're paused - use last image paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else + else if ( codec_context->gop_size == 0 || ( position < expected || position - expected >= 12 ) ) { // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ); // Remove the cached info relating to the previous position mlt_properties_set_double( properties, "current_time", real_timecode ); - mlt_properties_set_data( properties, "current_image", NULL, 0, NULL, NULL ); + + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + av_frame = NULL; } } // Duplicate the last image if necessary - if ( mlt_properties_get_data( properties, "current_image", NULL ) != NULL && - ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) ) + if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) && + strcmp( mlt_properties_get( properties, "resource" ), "pipe:" ) ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - // Duplicate it - *buffer = mlt_pool_alloc( size ); - memcpy( *buffer, image, size ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); // Set this on the frame properties mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); @@ -445,10 +530,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { int ret = 0; int got_picture = 0; - AVFrame frame; memset( &pkt, 0, sizeof( pkt ) ); - memset( &frame, 0, sizeof( frame ) ); + + // Construct an AVFrame for YUV422 conversion + if ( av_frame == NULL ) + { + av_frame = calloc( 1, sizeof( AVFrame ) ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL ); + } while( ret >= 0 && !got_picture ) { @@ -459,11 +549,11 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { // Decode the image - ret = avcodec_decode_video( codec_context, &frame, &got_picture, pkt.data, pkt.size ); + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); if ( got_picture ) { - if ( pkt.pts != AV_NOPTS_VALUE && pkt.pts != 0 ) + if ( pkt.pts != AV_NOPTS_VALUE ) current_time = ( double )pkt.pts / 1000000.0; else current_time = real_timecode; @@ -483,7 +573,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", frame.top_field_first ); + mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); } } @@ -494,71 +584,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Now handle the picture if we have one if ( got_picture ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - - if ( image == NULL || size != *width * *height * 2 ) - { - size = *width * ( *height + 1 ) * 2; - image = mlt_pool_alloc( size ); - mlt_properties_set_data( properties, "current_image", image, size, ( mlt_destructor )mlt_pool_release, NULL ); - } - - *buffer = mlt_pool_alloc( size ); - - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( codec_context->pix_fmt == PIX_FMT_YUV420P ) - { - register int i, j; - register int half = *width >> 1; - register uint8_t *Y = ( ( AVPicture * )&frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )&frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )&frame )->data[ 2 ]; - register uint8_t *d = *buffer; - register uint8_t *y, *u, *v; - - i = *height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - U += ( ( AVPicture * )&frame )->linesize[ 1 ]; - V += ( ( AVPicture * )&frame )->linesize[ 2 ]; - } - } - else - { - img_convert( output, PIX_FMT_YUV422, (AVPicture *)&frame, codec_context->pix_fmt, *width, *height ); - memcpy( *buffer, output->data[ 0 ], size ); - } + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); - memcpy( image, *buffer, size ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); if ( current_time == 0 && source_fps != 0 ) { @@ -580,9 +608,6 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Regardless of speed, we expect to get the next frame (cos we ain't too bright) mlt_properties_set_position( properties, "video_expected", position + 1 ); - // Unlock the mutex now - avformat_unlock( ); - return 0; } @@ -603,9 +628,6 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // Get the frame properties mlt_properties frame_properties = mlt_frame_properties( frame ); - // Lock the mutex now - avformat_lock( ); - if ( context != NULL && index != -1 ) { // Get the video stream @@ -639,14 +661,21 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - double aspect_ratio = 1; double source_fps = 0; - // Set aspect ratio + // XXX: We won't know the real aspect ratio until an image is decoded + // but we do need it now (to satisfy filter_resize) - take a guess based + // on pal/ntsc if ( codec_context->sample_aspect_ratio.num > 0 ) - aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); + { + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } + else + { + int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; + mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 128.0/117.0 : 72.0/79.0 ); + } - mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height ); // Determine the fps @@ -672,9 +701,6 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { mlt_properties_set_int( frame_properties, "test_image", 1 ); } - - // Unlock the mutex now - avformat_unlock( ); } /** Get the audio from a frame. @@ -700,6 +726,9 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); + // Get the seekable status + int seekable = mlt_properties_get_int( properties, "seekable" ); + // Obtain the expected frame numer mlt_position expected = mlt_properties_get_position( properties, "audio_expected" ); @@ -729,10 +758,6 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Flag for paused (silence) int paused = 0; - int locked = 0; - - // Lock the mutex now - avformat_lock( ); // Check for resample and create if necessary if ( resample == NULL && codec_context->channels <= 2 ) @@ -767,20 +792,19 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // We're paused - silence required paused = 1; } - else if ( position > expected && ( position - expected ) < 250 ) + else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else + else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - av_seek_frame( context, -1, real_timecode * 1000000.0 ); + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ) != 0 ) + paused = 1; // Clear the usage in the audio buffer audio_used = 0; - - locked = 1; } } @@ -847,25 +871,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // If we're behind, ignore this packet float current_pts = (float)pkt.pts / 1000000.0; - double discrepancy = mlt_properties_get_double( properties, "discrepancy" ); - if ( current_pts != 0 && real_timecode != 0 ) - { - if ( discrepancy != 1 ) - discrepancy = ( discrepancy + ( real_timecode / current_pts ) ) / 2; - else - discrepancy = real_timecode / current_pts; - if ( discrepancy > 0.9 && discrepancy < 1.1 ) - discrepancy = 1.0; - else - discrepancy = floor( discrepancy + 0.5 ); - - if ( discrepancy == 0 ) - discrepancy = 1.0; - - mlt_properties_set_double( properties, "discrepancy", discrepancy ); - } - - if ( !ignore && discrepancy * current_pts <= ( real_timecode - 0.02 ) ) + if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) ) ignore = 1; } @@ -901,11 +907,9 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } - // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "audio_expected", position + 1 ); - - // Unlock the mutex now - avformat_unlock( ); + // Regardless of speed (other than paused), we expect to get the next frame + if ( !paused ) + mlt_properties_set_position( properties, "audio_expected", position + 1 ); return 0; } @@ -924,9 +928,6 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // Get the audio_index int index = mlt_properties_get_int( properties, "audio_index" ); - // Lock the mutex now - avformat_lock( ); - // Deal with audio context if ( context != NULL && index != -1 ) { @@ -969,9 +970,6 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); } } - - // Unlock the mutex now - avformat_unlock( ); } /** Our get frame implementation. @@ -986,7 +984,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index mlt_frame_set_position( *frame, mlt_producer_position( this ) ); // Set the position of this producer - mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_get_in( this ) + mlt_producer_position( this ) ); + mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_position( this ) ); // Set up the video producer_set_up_video( this, *frame );