// ffmpeg Header files
#include <avformat.h>
+#include <opt.h>
#ifdef SWSCALE
-#include <swscale.h>
+# include <swscale.h>
+#endif
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+# include "audioconvert.h"
#endif
// System header files
/** Find the default streams.
*/
-static void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index )
+static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index )
{
int i;
+ char key[200];
+
+ mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams );
// Allow for multiple audio and video streams in the file and select first of each (if available)
for( i = 0; i < context->nb_streams; i++ )
{
// Get the codec context
- AVCodecContext *codec_context = context->streams[ i ]->codec;
+ AVStream *stream = context->streams[ i ];
+ if ( ! stream ) continue;
+ AVCodecContext *codec_context = stream->codec;
+ if ( ! codec_context ) continue;
+ AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
+ if ( ! codec ) continue;
- if ( avcodec_find_decoder( codec_context->codec_id ) == NULL )
- continue;
+ snprintf( key, sizeof(key), "meta.media.%d.stream.type", i );
// Determine the type and obtain the first index of each type
- switch( codec_context->codec_type )
+ switch( codec_context->codec_type )
{
case CODEC_TYPE_VIDEO:
if ( *video_index < 0 )
*video_index = i;
+ mlt_properties_set( meta_media, key, "video" );
+ snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
+ mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) );
+ snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
+ mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i );
+ mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i );
+ mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) );
break;
case CODEC_TYPE_AUDIO:
if ( *audio_index < 0 )
*audio_index = i;
+ mlt_properties_set( meta_media, key, "audio" );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
+ mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.sample_rate", i );
+ mlt_properties_set_int( meta_media, key, codec_context->sample_rate );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.channels", i );
+ mlt_properties_set_int( meta_media, key, codec_context->channels );
break;
default:
break;
}
+// snprintf( key, sizeof(key), "meta.media.%d.stream.time_base", i );
+// mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->time_base ) );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.name", i );
+ mlt_properties_set( meta_media, key, codec->name );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.long_name", i );
+ mlt_properties_set( meta_media, key, codec->long_name );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.bit_rate", i );
+ mlt_properties_set_int( meta_media, key, codec_context->bit_rate );
+// snprintf( key, sizeof(key), "meta.media.%d.codec.time_base", i );
+// mlt_properties_set_double( meta_media, key, av_q2d( codec_context->time_base ) );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.profile", i );
+ mlt_properties_set_int( meta_media, key, codec_context->profile );
+ snprintf( key, sizeof(key), "meta.media.%d.codec.level", i );
+ mlt_properties_set_int( meta_media, key, codec_context->level );
}
+
+ return meta_media;
}
/** Producer file destructor.
}
}
+static inline int dv_is_pal( AVPacket *pkt )
+{
+ return pkt->data[3] & 0x80;
+}
+
+static int dv_is_wide( AVPacket *pkt )
+{
+ int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
+
+ for ( ; i < pkt->size; i += 5 /* packet size */ )
+ {
+ if ( pkt->data[ i ] == 0x61 )
+ {
+ uint8_t x = pkt->data[ i + 2 ] & 0x7;
+ return ( x == 2 ) || ( x == 7 );
+ }
+ }
+ return 0;
+}
+
+static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
+{
+ double aspect_ratio = 1.0;
+
+ if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+ {
+ if ( pkt )
+ {
+ if ( dv_is_pal( pkt ) )
+ {
+ aspect_ratio = dv_is_wide( pkt )
+ ? 64.0/45.0 // 16:9 PAL
+ : 16.0/15.0; // 4:3 PAL
+ }
+ else
+ {
+ aspect_ratio = dv_is_wide( pkt )
+ ? 32.0/27.0 // 16:9 NTSC
+ : 8.0/9.0; // 4:3 NTSC
+ }
+ }
+ else
+ {
+ AVRational ar =
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
+ stream->sample_aspect_ratio;
+#else
+ codec_context->sample_aspect_ratio;
+#endif
+ // Override FFmpeg's notion of DV aspect ratios, which are
+ // based upon a width of 704. Since we do not have a normaliser
+ // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
+ // we just coerce the values to facilitate a passive behaviour through
+ // the rescale normaliser when using equivalent producers and consumers.
+ // = display_aspect / (width * height)
+ if ( ar.num == 10 && ar.den == 11 )
+ aspect_ratio = 8.0/9.0; // 4:3 NTSC
+ else if ( ar.num == 59 && ar.den == 54 )
+ aspect_ratio = 16.0/15.0; // 4:3 PAL
+ else if ( ar.num == 40 && ar.den == 33 )
+ aspect_ratio = 32.0/27.0; // 16:9 NTSC
+ else if ( ar.num == 118 && ar.den == 81 )
+ aspect_ratio = 64.0/45.0; // 16:9 PAL
+ }
+ }
+ else
+ {
+ AVRational codec_sar = codec_context->sample_aspect_ratio;
+ AVRational stream_sar =
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
+ stream->sample_aspect_ratio;
+#else
+ { 0, 1 };
+#endif
+ if ( codec_sar.num > 0 )
+ aspect_ratio = av_q2d( codec_sar );
+ else if ( stream_sar.num > 0 )
+ aspect_ratio = av_q2d( stream_sar );
+ }
+ return aspect_ratio;
+}
+
/** Open the file.
*/
}
// Find default audio and video streams
- find_default_streams( context, &audio_index, &video_index );
+ find_default_streams( properties, context, &audio_index, &video_index );
if ( context->start_time != AV_NOPTS_VALUE )
mlt_properties_set_double( properties, "_start_time", context->start_time );
AVCodecContext *codec_context = context->streams[ video_index ]->codec;
mlt_properties_set_int( properties, "width", codec_context->width );
mlt_properties_set_int( properties, "height", codec_context->height );
- mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
+
+ if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+ {
+ // Fetch the first frame of DV so we can read it directly
+ AVPacket pkt;
+ int ret = 0;
+ while ( ret >= 0 )
+ {
+ ret = av_read_frame( context, &pkt );
+ if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
+ {
+ mlt_properties_set_double( properties, "aspect_ratio",
+ get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
+ break;
+ }
+ }
+ }
+ else
+ {
+ mlt_properties_set_double( properties, "aspect_ratio",
+ get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
+ }
}
// Read Metadata
#endif
}
+/** Allocate the image buffer and set it on the frame.
+*/
+
+static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
+{
+ int size = 0;
+
+ if ( codec_context->width == 0 || codec_context->height == 0 )
+ return size;
+
+ *width = codec_context->width;
+ *height = codec_context->height;
+ mlt_properties_set_int( frame_properties, "width", *width );
+ mlt_properties_set_int( frame_properties, "height", *height );
+
+ switch ( *format )
+ {
+ case mlt_image_yuv420p:
+ size = *width * 3 * ( *height + 1 ) / 2;
+ break;
+ case mlt_image_rgb24:
+ size = *width * ( *height + 1 ) * 3;
+ break;
+ default:
+ *format = mlt_image_yuv422;
+ size = *width * ( *height + 1 ) * 2;
+ break;
+ }
+
+ // Construct the output image
+ *buffer = mlt_pool_alloc( size );
+ if ( *buffer )
+ mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
+ else
+ size = 0;
+
+ return size;
+}
+
/** Get an image from a frame.
*/
// Get the seekable status
int seekable = mlt_properties_get_int( properties, "seekable" );
- // Generate the size in bytes
- int size = 0;
-
// Hopefully provide better support for streams...
int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
// Determines if we have to decode all frames in a sequence
int must_decode = 1;
- // Set the result arguments that we know here (only *buffer is now required)
- *width = codec_context->width;
- *height = codec_context->height;
-
- switch ( *format )
- {
- case mlt_image_yuv420p:
- size = *width * 3 * ( *height + 1 ) / 2;
- break;
- case mlt_image_rgb24:
- size = *width * ( *height + 1 ) * 3;
- break;
- default:
- *format = mlt_image_yuv422;
- size = *width * ( *height + 1 ) * 2;
- break;
- }
-
- // Set this on the frame properties
- mlt_properties_set_int( frame_properties, "width", *width );
- mlt_properties_set_int( frame_properties, "height", *height );
-
- // Construct the output image
- *buffer = mlt_pool_alloc( size );
-
// Temporary hack to improve intra frame only
must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
strcmp( codec_context->codec->name, "rawvideo" ) &&
if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
{
// Duplicate it
- convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
-
- // Set this on the frame properties
- mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL );
+ if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
+ convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
+ else
+ mlt_frame_get_image( frame, buffer, format, width, height, writable );
}
else
{
// Construct an AVFrame for YUV422 conversion
if ( av_frame == NULL )
- {
av_frame = avcodec_alloc_frame( );
- mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
- }
while( ret >= 0 && !got_picture )
{
// Now handle the picture if we have one
if ( got_picture )
{
- mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
- mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
- convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
- mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
- mlt_properties_set_int( properties, "_current_position", int_position );
- mlt_properties_set_int( properties, "_got_picture", 1 );
+ if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
+ {
+ convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
+ mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
+ mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
+ mlt_properties_set_int( properties, "_current_position", int_position );
+ mlt_properties_set_int( properties, "_got_picture", 1 );
+ mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
+ }
+ else
+ {
+ got_picture = 0;
+ }
}
}
if ( !got_picture )
return 0;
}
+/** Process properties as AVOptions and apply to AV context obj
+*/
+
+static void apply_properties( void *obj, mlt_properties properties, int flags )
+{
+ int i;
+ int count = mlt_properties_count( properties );
+ for ( i = 0; i < count; i++ )
+ {
+ const char *opt_name = mlt_properties_get_name( properties, i );
+ const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
+ if ( opt != NULL )
+#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
+ av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
+#else
+ av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
+#endif
+ }
+}
+
/** Set up video handling.
*/
context = mlt_properties_get_data( properties, "video_context", NULL );
mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
mlt_events_unblock( properties, this );
+
+ // Process properties as AVOptions
+ apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
}
// Exception handling for video_index
if ( context && index >= (int) context->nb_streams )
{
+ // Get the last video stream
for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO; --index );
mlt_properties_set_int( properties, "video_index", index );
}
if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO )
{
+ // Invalidate the video stream
index = -1;
mlt_properties_set_int( properties, "video_index", index );
}
- // Update the video properties if the index changed
- if ( index > -1 && index != mlt_properties_get_int( properties, "_video_index" ) )
- {
- // Fetch the width, height and aspect ratio
- AVCodecContext *codec_context = context->streams[ index ]->codec;
- mlt_properties_set_int( properties, "_video_index", index );
- mlt_properties_set_data( properties, "video_codec", NULL, 0, NULL, NULL );
- mlt_properties_set_int( properties, "width", codec_context->width );
- mlt_properties_set_int( properties, "height", codec_context->height );
- mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
- }
-
// Get the frame properties
mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
- if ( context != NULL && index > -1 )
+ if ( context && index > -1 )
{
// Get the video stream
AVStream *stream = context->streams[ index ];
// Get the codec
AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
+ // Update the video properties if the index changed
+ if ( index != mlt_properties_get_int( properties, "_video_index" ) )
+ {
+ // Reset the video properties if the index changed
+ mlt_properties_set_int( properties, "_video_index", index );
+ mlt_properties_set_data( properties, "video_codec", NULL, 0, NULL, NULL );
+ mlt_properties_set_int( properties, "width", codec_context->width );
+ mlt_properties_set_int( properties, "height", codec_context->height );
+ // TODO: get the first usable AVPacket and reset the stream position
+ mlt_properties_set_double( properties, "aspect_ratio",
+ get_aspect_ratio( context->streams[ index ], codec_context, NULL ) );
+ codec = NULL;
+ }
+
// Initialise the codec if necessary
if ( codec == NULL )
{
{
// Remember that we can't use this later
mlt_properties_set_int( properties, "video_index", -1 );
+ index = -1;
}
avformat_unlock( );
+
+ // Process properties as AVOptions
+ apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
}
// No codec, no show...
- if ( codec != NULL )
+ if ( codec && index > -1 )
{
double source_fps = 0;
- int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" );
double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
- double aspect_ratio;
-
- if ( strcmp( codec_context->codec->name, "dvvideo" ) == 0 )
- {
- // Override FFmpeg's notion of DV aspect ratios, which are
- // based upon a width of 704. Since we do not have a normaliser
- // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
- // we just coerce the values to facilitate a passive behaviour through
- // the rescale normaliser when using equivalent producers and consumers.
- // = display_aspect / (width * height)
- if ( codec_context->sample_aspect_ratio.num == 10 &&
- codec_context->sample_aspect_ratio.den == 11 )
- force_aspect_ratio = 8.0/9.0; // 4:3 NTSC
- else if ( codec_context->sample_aspect_ratio.num == 59 &&
- codec_context->sample_aspect_ratio.den == 54 )
- force_aspect_ratio = 16.0/15.0; // 4:3 PAL
- else if ( codec_context->sample_aspect_ratio.num == 40 &&
- codec_context->sample_aspect_ratio.den == 33 )
- force_aspect_ratio = 32.0/27.0; // 16:9 NTSC
- else if ( codec_context->sample_aspect_ratio.num == 118 &&
- codec_context->sample_aspect_ratio.den == 81 )
- force_aspect_ratio = 64.0/45.0; // 16:9 PAL
- }
-
- // XXX: We won't know the real aspect ratio until an image is decoded
- // but we do need it now (to satisfy filter_resize) - take a guess based
- // on pal/ntsc
- if ( force_aspect_ratio > 0.0 )
- {
- aspect_ratio = force_aspect_ratio;
- }
- else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 )
- {
- aspect_ratio = av_q2d( codec_context->sample_aspect_ratio );
- }
- else
- {
- aspect_ratio = 1.0;
- }
+ double aspect_ratio = ( force_aspect_ratio > 0.0 ) ?
+ force_aspect_ratio : mlt_properties_get_double( properties, "aspect_ratio" );
// Determine the fps
source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
// Obtain the resample context if it exists (not always needed)
ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
- // Obtain the audio buffer
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+ // Get the format converter context if it exists
+ AVAudioConvert *convert = mlt_properties_get_data( properties, "audio_convert", NULL );
+#endif
+
+ // Obtain the audio buffers
int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
+ int16_t *decode_buffer = mlt_properties_get_data( properties, "decode_buffer", NULL );
+ int16_t *convert_buffer = mlt_properties_get_data( properties, "convert_buffer", NULL );
// Get amount of audio used
int audio_used = mlt_properties_get_int( properties, "_audio_used" );
*frequency = codec_context->sample_rate;
}
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+ // Check for audio format converter and create if necessary
+ // TODO: support higher resolutions than 16-bit.
+ if ( convert == NULL && codec_context->sample_fmt != SAMPLE_FMT_S16 )
+ {
+ // Create single channel converter for interleaved with no mixing matrix
+ convert = av_audio_convert_alloc( SAMPLE_FMT_S16, 1, codec_context->sample_fmt, 1, NULL, 0 );
+ mlt_properties_set_data( properties, "audio_convert", convert, 0, ( mlt_destructor )av_audio_convert_free, NULL );
+ }
+#endif
+
// Check for audio buffer and create if necessary
if ( audio_buffer == NULL )
{
mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
}
+ // Check for decoder buffer and create if necessary
+ if ( decode_buffer == NULL )
+ {
+ // Allocate the audio buffer
+ decode_buffer = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
+
+ // And store it on properties for reuse
+ mlt_properties_set_data( properties, "decode_buffer", decode_buffer, 0, ( mlt_destructor )av_free, NULL );
+ }
+
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+ // Check for format converter buffer and create if necessary
+ if ( resample && convert && convert_buffer == NULL )
+ {
+ // Allocate the audio buffer
+ convert_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
+
+ // And store it on properties for reuse
+ mlt_properties_set_data( properties, "convert_buffer", convert_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
+ }
+#endif
+
// Seek if necessary
if ( position != expected )
{
{
int ret = 0;
int got_audio = 0;
- int16_t *temp = av_malloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE );
av_init_packet( &pkt );
// Decode the audio
#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
- ret = avcodec_decode_audio2( codec_context, temp, &data_size, ptr, len );
+ ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
#else
- ret = avcodec_decode_audio( codec_context, temp, &data_size, ptr, len );
+ ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
#endif
if ( ret < 0 )
{
if ( data_size > 0 )
{
- if ( resample != NULL )
+ int src_stride[6]= { av_get_bits_per_sample_format( codec_context->sample_fmt ) / 8 };
+ int dst_stride[6]= { av_get_bits_per_sample_format( SAMPLE_FMT_S16 ) / 8 };
+
+ if ( resample )
{
- audio_used += audio_resample( resample, &audio_buffer[ audio_used * *channels ], temp, data_size / ( codec_context->channels * sizeof( int16_t ) ) );
+ int16_t *source = decode_buffer;
+ int16_t *dest = &audio_buffer[ audio_used * *channels ];
+ int convert_samples = data_size / src_stride[0];
+
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+ if ( convert )
+ {
+ const void *src_buf[6] = { decode_buffer };
+ void *dst_buf[6] = { convert_buffer };
+ av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, convert_samples );
+ source = convert_buffer;
+ }
+#endif
+ audio_used += audio_resample( resample, dest, source, convert_samples / codec_context->channels );
}
else
{
- memcpy( &audio_buffer[ audio_used * *channels ], temp, data_size );
- audio_used += data_size / ( codec_context->channels * sizeof( int16_t ) );
+#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+ if ( convert )
+ {
+ const void *src_buf[6] = { decode_buffer };
+ void *dst_buf[6] = { &audio_buffer[ audio_used * *channels ] };
+ av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, data_size / src_stride[0] );
+ }
+ else
+#endif
+ {
+ memcpy( &audio_buffer[ audio_used * *channels ], decode_buffer, data_size );
+ }
+ audio_used += data_size / *channels / src_stride[0];
}
// Handle ignore
// Store the number of audio samples still available
mlt_properties_set_int( properties, "_audio_used", audio_used );
-
- // Release the temporary audio
- av_free( temp );
}
else
{
}
// Update the audio properties if the index changed
- if ( index > -1 && index != mlt_properties_get_int( properties, "_audio_index" ) ) {
+ if ( index > -1 && index != mlt_properties_get_int( properties, "_audio_index" ) )
+ {
mlt_properties_set_int( properties, "_audio_index", index );
mlt_properties_set_data( properties, "audio_codec", NULL, 0, NULL, NULL );
}
{
// Remember that we can't use this later
mlt_properties_set_int( properties, "audio_index", -1 );
+ index = -1;
}
avformat_unlock( );
+
+ // Process properties as AVOptions
+ apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
}
// No codec, no show...
- if ( codec != NULL )
+ if ( codec && index > -1 )
{
mlt_frame_push_audio( frame, producer_get_audio );
mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );