X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=85ee4a5a2b17f0d256932fe4d293fc50f3ef9a34;hb=112bed9a4d15f4b52835dfc55fdf301f0a39f6d1;hp=1a5e2ee7ca7d36a312e54829cae21e385108c99b;hpb=cf78109dc7e67f65d74f8139369b583ad0f6398e;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index 1a5e2ee..85ee4a5 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -169,7 +169,13 @@ static int producer_open( mlt_producer this, char *file ) AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); + + // AV option (0 = both, 1 = video, 2 = audio) + int av = 0; + // Setting lowest log level + av_log_set_level( -1 ); + // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { @@ -228,6 +234,8 @@ static int producer_open( mlt_producer this, char *file ) standard = strdup( value ); params->standard = standard; } + else if ( !strcmp( name, "av" ) ) + av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); @@ -254,6 +262,7 @@ static int producer_open( mlt_producer this, char *file ) // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; + int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) @@ -271,14 +280,17 @@ static int producer_open( mlt_producer this, char *file ) mlt_properties_set_double( properties, "start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) - mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 ); + if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) ) + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); + else + av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( audio_index != -1 && video_index != -1 ) + if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -290,7 +302,7 @@ static int producer_open( mlt_producer this, char *file ) // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } - else if ( video_index != -1 ) + else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -305,6 +317,8 @@ static int producer_open( mlt_producer this, char *file ) // Something has gone wrong error = -1; } + + mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } @@ -329,6 +343,77 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) return ( double )position / fps; } +static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) +{ + // EXPERIMENTAL IMAGE NORMALISATIONS + if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 ) + { + register int i, j; + register int half = width >> 1; + register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ]; + register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ]; + register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ]; + register uint8_t *d = buffer; + register uint8_t *y, *u, *v; + + i = height >> 1; + while ( i -- ) + { + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + U += ( ( AVPicture * )frame )->linesize[ 1 ]; + V += ( ( AVPicture * )frame )->linesize[ 2 ]; + } + } + else if ( format == mlt_image_yuv420p ) + { + AVPicture pict; + pict.data[0] = buffer; + pict.data[1] = buffer + width * height; + pict.data[2] = buffer + ( 3 * width * height ) / 2; + pict.linesize[0] = width; + pict.linesize[1] = width >> 1; + pict.linesize[2] = width >> 1; + img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); + } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } + else + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); + img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); + } +} + /** Get an image from a frame. */ @@ -368,7 +453,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVPacket pkt; // Get the conversion frame - AVPicture *output = mlt_properties_get_data( properties, "video_output_frame", NULL ); + AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL ); // Special case pause handling flag int paused = 0; @@ -385,25 +470,36 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Get the seekable status int seekable = mlt_properties_get_int( properties, "seekable" ); + // Generate the size in bytes + int size = 0; + + // Hopefully provide better support for streams... + int av_bypass = mlt_properties_get_int( properties, "av_bypass" ); + // Set the result arguments that we know here (only *buffer is now required) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } + // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); mlt_properties_set_int( frame_properties, "height", *height ); - // Construct an AVFrame for YUV422 conversion - if ( output == NULL ) - { - int size = avpicture_get_size( PIX_FMT_YUV422, *width, *height + 1 ); - uint8_t *buf = mlt_pool_alloc( size ); - output = mlt_pool_alloc( sizeof( AVPicture ) ); - avpicture_fill( output, buf, PIX_FMT_YUV422, *width, *height ); - mlt_properties_set_data( properties, "video_output_frame", output, 0, ( mlt_destructor )mlt_pool_release, NULL ); - mlt_properties_set_data( properties, "video_output_buffer", buf, 0, ( mlt_destructor )mlt_pool_release, NULL ); - } + // Construct the output image + *buffer = mlt_pool_alloc( size ); // Seek if necessary if ( position != expected ) @@ -418,28 +514,25 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else if ( position < expected || position - expected >= 12 ) + else if ( seekable && ( position < expected || position - expected >= 12 ) ) { // Set to the real timecode - av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ); + av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ); // Remove the cached info relating to the previous position mlt_properties_set_double( properties, "current_time", real_timecode ); - mlt_properties_set_data( properties, "current_image", NULL, 0, NULL, NULL ); + + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + av_frame = NULL; } } // Duplicate the last image if necessary - if ( mlt_properties_get_data( properties, "current_image", NULL ) != NULL && - ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) ) + if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) && + av_bypass == 0 ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - // Duplicate it - *buffer = mlt_pool_alloc( size ); - memcpy( *buffer, image, size ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); // Set this on the frame properties mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); @@ -448,10 +541,20 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { int ret = 0; int got_picture = 0; - AVFrame frame; + int must_decode = 1; + + // Temporary hack to improve intra frame only + if ( !strcmp( codec_context->codec->name, "mjpeg" ) ) + must_decode = 0; memset( &pkt, 0, sizeof( pkt ) ); - memset( &frame, 0, sizeof( frame ) ); + + // Construct an AVFrame for YUV422 conversion + if ( av_frame == NULL ) + { + av_frame = calloc( 1, sizeof( AVFrame ) ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL ); + } while( ret >= 0 && !got_picture ) { @@ -461,16 +564,18 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We only deal with video from the selected video_index if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { + // Determine time code of the packet + if ( pkt.pts != AV_NOPTS_VALUE ) + current_time = ( double )pkt.pts / 1000000.0; + else + current_time = real_timecode; + // Decode the image - ret = avcodec_decode_video( codec_context, &frame, &got_picture, pkt.data, pkt.size ); + if ( must_decode || current_time >= real_timecode ) + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); if ( got_picture ) { - if ( pkt.pts != AV_NOPTS_VALUE ) - current_time = ( double )pkt.pts / 1000000.0; - else - current_time = real_timecode; - // Handle ignore if ( ( int )( current_time * 100 ) < ( int )( real_timecode * 100 ) - 7 ) { @@ -479,14 +584,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form } else if ( current_time >= real_timecode ) { - //current_time = real_timecode; ignore = 0; } else if ( ignore -- ) { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", frame.top_field_first ); + mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); } } @@ -497,71 +601,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Now handle the picture if we have one if ( got_picture ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - - if ( image == NULL || size != *width * *height * 2 ) - { - size = *width * ( *height + 1 ) * 2; - image = mlt_pool_alloc( size ); - mlt_properties_set_data( properties, "current_image", image, size, ( mlt_destructor )mlt_pool_release, NULL ); - } - - *buffer = mlt_pool_alloc( size ); - - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( codec_context->pix_fmt == PIX_FMT_YUV420P ) - { - register int i, j; - register int half = *width >> 1; - register uint8_t *Y = ( ( AVPicture * )&frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )&frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )&frame )->data[ 2 ]; - register uint8_t *d = *buffer; - register uint8_t *y, *u, *v; - - i = *height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - U += ( ( AVPicture * )&frame )->linesize[ 1 ]; - V += ( ( AVPicture * )&frame )->linesize[ 2 ]; - } - } - else - { - img_convert( output, PIX_FMT_YUV422, (AVPicture *)&frame, codec_context->pix_fmt, *width, *height ); - memcpy( *buffer, output->data[ 0 ], size ); - } + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); - memcpy( image, *buffer, size ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); if ( current_time == 0 && source_fps != 0 ) { @@ -636,14 +678,22 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - double aspect_ratio = 1; double source_fps = 0; + int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" ); - // Set aspect ratio - if ( codec_context->sample_aspect_ratio.num > 0 ) - aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); + // XXX: We won't know the real aspect ratio until an image is decoded + // but we do need it now (to satisfy filter_resize) - take a guess based + // on pal/ntsc + if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + { + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } + else + { + int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; + mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 128.0/117.0 : 72.0/79.0 ); + } - mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height ); // Determine the fps @@ -768,7 +818,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ) != 0 ) + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) paused = 1; // Clear the usage in the audio buffer @@ -952,7 +1002,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index mlt_frame_set_position( *frame, mlt_producer_position( this ) ); // Set the position of this producer - mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_position( this ) ); + mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_frame( this ) ); // Set up the video producer_set_up_video( this, *frame );