X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=d0ff408caec8298d38152e0f3ce1b72bedbdbbcd;hb=6269c02322276247410e28545a1a4e07af6e5e5b;hp=f4ccb01669a70444f3f25d8e774897b70d3abdcf;hpb=db7368b4d5da5086d80c5004ee38d5e4ecc84c8b;p=melted diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index f4ccb01..d0ff408 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -89,7 +89,7 @@ static void find_default_streams( AVFormatContext *context, int *audio_index, in for( i = 0; i < context->nb_streams; i++ ) { // Get the codec context - AVCodecContext *codec_context = &context->streams[ i ]->codec; + AVCodecContext *codec_context = context->streams[ i ]->codec; // Determine the type and obtain the first index of each type switch( codec_context->codec_type ) @@ -242,8 +242,7 @@ static int producer_open( mlt_producer this, char *file ) } // Now attempt to open the file - error = av_open_input_file( &context, file, format, 0, params ); - error = error < 0; + error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); @@ -276,17 +275,26 @@ static int producer_open( mlt_producer this, char *file ) find_default_streams( context, &audio_index, &video_index ); if ( context->start_time != AV_NOPTS_VALUE ) - mlt_properties_set_double( properties, "start_time", context->start_time ); + mlt_properties_set_double( properties, "_start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) ) - mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); else av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); + + // Fetch the width, height and aspect ratio + if ( video_index != -1 ) + { + AVCodecContext *codec_context = context->streams[ video_index ]->codec; + mlt_properties_set_int( properties, "width", codec_context->width ); + mlt_properties_set_int( properties, "height", codec_context->height ); + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 ) @@ -344,51 +352,7 @@ static double producer_time_of_frame( mlt_producer this, mlt_position position ) static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) { - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 ) - { - register int i, j; - register int half = width >> 1; - register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ]; - register uint8_t *d = buffer; - register uint8_t *y, *u, *v; - - i = height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )frame )->linesize[ 0 ]; - U += ( ( AVPicture * )frame )->linesize[ 1 ]; - V += ( ( AVPicture * )frame )->linesize[ 2 ]; - } - } - else if ( format == mlt_image_yuv420p ) + if ( format == mlt_image_yuv420p ) { AVPicture pict; pict.data[0] = buffer; @@ -437,7 +401,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int index = mlt_properties_get_int( properties, "video_index" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "video_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_video_expected" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -446,7 +410,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -461,7 +425,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int ignore = 0; // Current time calcs - double current_time = mlt_properties_get_double( properties, "current_time" ); + double current_time = mlt_properties_get_double( properties, "_current_time" ); // We may want to use the source fps if available double source_fps = mlt_properties_get_double( properties, "source_fps" ); @@ -503,7 +467,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Seek if necessary if ( position != expected ) { - if ( position + 1 == expected ) + if ( av_frame != NULL && position + 1 == expected ) { // We're paused - use last image paused = 1; @@ -516,19 +480,18 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form else if ( seekable && ( position < expected || position - expected >= 12 ) ) { // Set to the real timecode - av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ); + av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ); // Remove the cached info relating to the previous position - mlt_properties_set_double( properties, "current_time", real_timecode ); + mlt_properties_set_double( properties, "_current_time", real_timecode ); mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); av_frame = NULL; } } - // Duplicate the last image if necessary - if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) && - av_bypass == 0 ) + // Duplicate the last image if necessary (see comment on rawvideo below) + if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "_current_time" ) >= real_timecode ) && av_bypass == 0 ) { // Duplicate it convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); @@ -543,16 +506,16 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int must_decode = 1; // Temporary hack to improve intra frame only - if ( !strcmp( codec_context->codec->name, "mjpeg" ) ) + if ( !strcmp( codec_context->codec->name, "mjpeg" ) || !strcmp( codec_context->codec->name, "rawvideo" ) ) must_decode = 0; - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); // Construct an AVFrame for YUV422 conversion if ( av_frame == NULL ) { - av_frame = calloc( 1, sizeof( AVFrame ) ); - mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL ); + av_frame = avcodec_alloc_frame( ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL ); } while( ret >= 0 && !got_picture ) @@ -589,40 +552,46 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); } } - // We're finished with this packet regardless - av_free_packet( &pkt ); - } + // Now handle the picture if we have one + if ( got_picture ) + { + mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame ); + mlt_properties_set_int( frame_properties, "top_field_first", av_frame->top_field_first ); - // Now handle the picture if we have one - if ( got_picture ) - { - convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); - mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); - if ( current_time == 0 && source_fps != 0 ) - { - double fps = mlt_properties_get_double( properties, "fps" ); - current_time = ceil( source_fps * ( double )position / fps ) * ( 1 / source_fps ); - mlt_properties_set_double( properties, "current_time", current_time ); - } - else - { - mlt_properties_set_double( properties, "current_time", current_time ); + if ( current_time == 0 && source_fps != 0 ) + { + double fps = mlt_properties_get_double( properties, "fps" ); + current_time = ceil( source_fps * ( double )position / fps ) * ( 1 / source_fps ); + mlt_properties_set_double( properties, "_current_time", current_time ); + } + else + { + mlt_properties_set_double( properties, "_current_time", current_time ); + } } + + // We're finished with this packet regardless + av_free_packet( &pkt ); } } - + + // Very untidy - for rawvideo, the packet contains the frame, hence the free packet + // above will break the pause behaviour - so we wipe the frame now + if ( !strcmp( codec_context->codec->name, "rawvideo" ) ) + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + // Set the field order property for this frame - mlt_properties_set_int( frame_properties, "top_field_first", - mlt_properties_get_int( properties, "top_field_first" ) ); + mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) ); // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "video_expected", position + 1 ); + mlt_properties_set_position( properties, "_video_expected", position + 1 ); return 0; } @@ -650,7 +619,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL ); @@ -679,18 +648,24 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { double source_fps = 0; int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" ); + double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" ); + double aspect_ratio; // XXX: We won't know the real aspect ratio until an image is decoded // but we do need it now (to satisfy filter_resize) - take a guess based // on pal/ntsc - if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + if ( force_aspect_ratio > 0.0 ) { - mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + aspect_ratio = force_aspect_ratio; + } + else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + { + aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); } else { int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; - mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 59.0/54.0 : 10.0/11.0 ); + aspect_ratio = is_pal ? 59.0/54.0 : 10.0/11.0; } // Determine the fps @@ -699,10 +674,12 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // We'll use fps if it's available if ( source_fps > 0 && source_fps < 30 ) mlt_properties_set_double( properties, "source_fps", source_fps ); + mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); // Set the width and height mlt_properties_set_int( frame_properties, "width", codec_context->width ); mlt_properties_set_int( frame_properties, "height", codec_context->height ); + mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio ); mlt_frame_push_get_image( frame, producer_get_image ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); @@ -745,7 +722,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int seekable = mlt_properties_get_int( properties, "seekable" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "audio_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" ); // Obtain the resample context if it exists (not always needed) ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL ); @@ -754,7 +731,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL ); // Get amount of audio used - int audio_used = mlt_properties_get_int( properties, "audio_used" ); + int audio_used = mlt_properties_get_int( properties, "_audio_used" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -763,7 +740,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -815,7 +792,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) paused = 1; // Clear the usage in the audio buffer @@ -830,7 +807,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int got_audio = 0; int16_t *temp = mlt_pool_alloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE ); - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); while( ret >= 0 && !got_audio ) { @@ -910,7 +887,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // Store the number of audio samples still available - mlt_properties_set_int( properties, "audio_used", audio_used ); + mlt_properties_set_int( properties, "_audio_used", audio_used ); // Release the temporary audio mlt_pool_release( temp ); @@ -923,7 +900,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Regardless of speed (other than paused), we expect to get the next frame if ( !paused ) - mlt_properties_set_position( properties, "audio_expected", position + 1 ); + mlt_properties_set_position( properties, "_audio_expected", position + 1 ); return 0; } @@ -952,7 +929,7 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL ); @@ -982,6 +959,8 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) { mlt_frame_push_audio( frame, producer_get_audio ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); + mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate ); + mlt_properties_set_int( frame_properties, "channels", codec_context->channels ); } } }