X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=7a176d88ae53edb7a447df7a9afe16027eb7f806;hb=3e021eb2f553fada306bc4879fabda445e75450d;hp=1a5e2ee7ca7d36a312e54829cae21e385108c99b;hpb=cf78109dc7e67f65d74f8139369b583ad0f6398e;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index 1a5e2ee..7a176d8 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -169,6 +169,9 @@ static int producer_open( mlt_producer this, char *file ) AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); + + // AV option (0 = both, 1 = video, 2 = audio) + int av = 0; // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) @@ -228,6 +231,8 @@ static int producer_open( mlt_producer this, char *file ) standard = strdup( value ); params->standard = standard; } + else if ( !strcmp( name, "av" ) ) + av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); @@ -271,14 +276,15 @@ static int producer_open( mlt_producer this, char *file ) mlt_properties_set_double( properties, "start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) - mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 ); + if ( strcmp( file, "pipe:" ) ) + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 ); // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( audio_index != -1 && video_index != -1 ) + if ( av == 0 && strcmp( file, "pipe:" ) && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -290,7 +296,7 @@ static int producer_open( mlt_producer this, char *file ) // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } - else if ( video_index != -1 ) + else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -329,6 +335,77 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) return ( double )position / fps; } +static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) +{ + // EXPERIMENTAL IMAGE NORMALISATIONS + if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 ) + { + register int i, j; + register int half = width >> 1; + register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ]; + register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ]; + register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ]; + register uint8_t *d = buffer; + register uint8_t *y, *u, *v; + + i = height >> 1; + while ( i -- ) + { + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + y = Y; + u = U; + v = V; + j = half; + while ( j -- ) + { + *d ++ = *y ++; + *d ++ = *u ++; + *d ++ = *y ++; + *d ++ = *v ++; + } + + Y += ( ( AVPicture * )frame )->linesize[ 0 ]; + U += ( ( AVPicture * )frame )->linesize[ 1 ]; + V += ( ( AVPicture * )frame )->linesize[ 2 ]; + } + } + else if ( format == mlt_image_yuv420p ) + { + AVPicture pict; + pict.data[0] = buffer; + pict.data[1] = buffer + width * height; + pict.data[2] = buffer + ( 3 * width * height ) / 2; + pict.linesize[0] = width; + pict.linesize[1] = width >> 1; + pict.linesize[2] = width >> 1; + img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); + } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } + else + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); + img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); + } +} + /** Get an image from a frame. */ @@ -368,7 +445,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVPacket pkt; // Get the conversion frame - AVPicture *output = mlt_properties_get_data( properties, "video_output_frame", NULL ); + AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL ); // Special case pause handling flag int paused = 0; @@ -385,25 +462,33 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Get the seekable status int seekable = mlt_properties_get_int( properties, "seekable" ); + // Generate the size in bytes + int size = 0; + // Set the result arguments that we know here (only *buffer is now required) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } + // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); mlt_properties_set_int( frame_properties, "height", *height ); - // Construct an AVFrame for YUV422 conversion - if ( output == NULL ) - { - int size = avpicture_get_size( PIX_FMT_YUV422, *width, *height + 1 ); - uint8_t *buf = mlt_pool_alloc( size ); - output = mlt_pool_alloc( sizeof( AVPicture ) ); - avpicture_fill( output, buf, PIX_FMT_YUV422, *width, *height ); - mlt_properties_set_data( properties, "video_output_frame", output, 0, ( mlt_destructor )mlt_pool_release, NULL ); - mlt_properties_set_data( properties, "video_output_buffer", buf, 0, ( mlt_destructor )mlt_pool_release, NULL ); - } + // Construct the output image + *buffer = mlt_pool_alloc( size ); // Seek if necessary if ( position != expected ) @@ -418,28 +503,25 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Fast forward - seeking is inefficient for small distances - just ignore following frames ignore = position - expected; } - else if ( position < expected || position - expected >= 12 ) + else if ( codec_context->gop_size == 0 || ( position < expected || position - expected >= 12 ) ) { // Set to the real timecode av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ); // Remove the cached info relating to the previous position mlt_properties_set_double( properties, "current_time", real_timecode ); - mlt_properties_set_data( properties, "current_image", NULL, 0, NULL, NULL ); + + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + av_frame = NULL; } } // Duplicate the last image if necessary - if ( mlt_properties_get_data( properties, "current_image", NULL ) != NULL && - ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) ) + if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) && + strcmp( mlt_properties_get( properties, "resource" ), "pipe:" ) ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - // Duplicate it - *buffer = mlt_pool_alloc( size ); - memcpy( *buffer, image, size ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); // Set this on the frame properties mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); @@ -448,10 +530,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { int ret = 0; int got_picture = 0; - AVFrame frame; memset( &pkt, 0, sizeof( pkt ) ); - memset( &frame, 0, sizeof( frame ) ); + + // Construct an AVFrame for YUV422 conversion + if ( av_frame == NULL ) + { + av_frame = calloc( 1, sizeof( AVFrame ) ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL ); + } while( ret >= 0 && !got_picture ) { @@ -462,7 +549,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { // Decode the image - ret = avcodec_decode_video( codec_context, &frame, &got_picture, pkt.data, pkt.size ); + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); if ( got_picture ) { @@ -486,7 +573,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", frame.top_field_first ); + mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); } } @@ -497,71 +584,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Now handle the picture if we have one if ( got_picture ) { - // Get current image and size - int size = 0; - uint8_t *image = mlt_properties_get_data( properties, "current_image", &size ); - - if ( image == NULL || size != *width * *height * 2 ) - { - size = *width * ( *height + 1 ) * 2; - image = mlt_pool_alloc( size ); - mlt_properties_set_data( properties, "current_image", image, size, ( mlt_destructor )mlt_pool_release, NULL ); - } - - *buffer = mlt_pool_alloc( size ); - - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( codec_context->pix_fmt == PIX_FMT_YUV420P ) - { - register int i, j; - register int half = *width >> 1; - register uint8_t *Y = ( ( AVPicture * )&frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )&frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )&frame )->data[ 2 ]; - register uint8_t *d = *buffer; - register uint8_t *y, *u, *v; - - i = *height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )&frame )->linesize[ 0 ]; - U += ( ( AVPicture * )&frame )->linesize[ 1 ]; - V += ( ( AVPicture * )&frame )->linesize[ 2 ]; - } - } - else - { - img_convert( output, PIX_FMT_YUV422, (AVPicture *)&frame, codec_context->pix_fmt, *width, *height ); - memcpy( *buffer, output->data[ 0 ], size ); - } - - memcpy( image, *buffer, size ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); if ( current_time == 0 && source_fps != 0 ) { @@ -636,14 +661,21 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - double aspect_ratio = 1; double source_fps = 0; - // Set aspect ratio + // XXX: We won't know the real aspect ratio until an image is decoded + // but we do need it now (to satisfy filter_resize) - take a guess based + // on pal/ntsc if ( codec_context->sample_aspect_ratio.num > 0 ) - aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); + { + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } + else + { + int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; + mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 128.0/117.0 : 72.0/79.0 ); + } - mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height ); // Determine the fps