14a97bf7d853b361a138a21b807b266e620cb846
[melted] / src / modules / avformat / producer_avformat.c
1 /*
2 * producer_avformat.c -- avformat producer
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // MLT Header files
23 #include <framework/mlt_producer.h>
24 #include <framework/mlt_frame.h>
25
26 // ffmpeg Header files
27 #include <avformat.h>
28 #include <opt.h>
29 #ifdef SWSCALE
30 # include <swscale.h>
31 #endif
32 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
33 # include "audioconvert.h"
34 #endif
35
36 // System header files
37 #include <stdlib.h>
38 #include <string.h>
39 #include <pthread.h>
40 #include <math.h>
41
42 void avformat_lock( );
43 void avformat_unlock( );
44
45 // Forward references.
46 static int producer_open( mlt_producer this, mlt_profile profile, char *file );
47 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
48
49 /** Constructor for libavformat.
50 */
51
52 mlt_producer producer_avformat_init( mlt_profile profile, char *file )
53 {
54 mlt_producer this = NULL;
55
56 // Check that we have a non-NULL argument
57 if ( file != NULL )
58 {
59 // Construct the producer
60 this = calloc( 1, sizeof( struct mlt_producer_s ) );
61
62 // Initialise it
63 if ( mlt_producer_init( this, NULL ) == 0 )
64 {
65 // Get the properties
66 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
67
68 // Set the resource property (required for all producers)
69 mlt_properties_set( properties, "resource", file );
70
71 // Register our get_frame implementation
72 this->get_frame = producer_get_frame;
73
74 // Open the file
75 if ( producer_open( this, profile, file ) != 0 )
76 {
77 // Clean up
78 mlt_producer_close( this );
79 this = NULL;
80 }
81 else
82 {
83 // Close the file to release resources for large playlists - reopen later as needed
84 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
85 mlt_properties_set_data( properties, "audio_context", NULL, 0, NULL, NULL );
86 mlt_properties_set_data( properties, "video_context", NULL, 0, NULL, NULL );
87
88 // Default the user-selectable indices from the auto-detected indices
89 mlt_properties_set_int( properties, "audio_index", mlt_properties_get_int( properties, "_audio_index" ) );
90 mlt_properties_set_int( properties, "video_index", mlt_properties_get_int( properties, "_video_index" ) );
91 }
92 }
93 }
94
95 return this;
96 }
97
98 /** Find the default streams.
99 */
100
101 static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index )
102 {
103 int i;
104 char key[200];
105
106 mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams );
107
108 // Allow for multiple audio and video streams in the file and select first of each (if available)
109 for( i = 0; i < context->nb_streams; i++ )
110 {
111 // Get the codec context
112 AVStream *stream = context->streams[ i ];
113 if ( ! stream ) continue;
114 AVCodecContext *codec_context = stream->codec;
115 if ( ! codec_context ) continue;
116 AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
117 if ( ! codec ) continue;
118
119 snprintf( key, sizeof(key), "meta.media.%d.stream.type", i );
120
121 // Determine the type and obtain the first index of each type
122 switch( codec_context->codec_type )
123 {
124 case CODEC_TYPE_VIDEO:
125 if ( *video_index < 0 )
126 *video_index = i;
127 mlt_properties_set( meta_media, key, "video" );
128 snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
129 mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) );
130 snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
131 mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
132 snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i );
133 mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) );
134 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i );
135 mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) );
136 break;
137 case CODEC_TYPE_AUDIO:
138 if ( *audio_index < 0 )
139 *audio_index = i;
140 mlt_properties_set( meta_media, key, "audio" );
141 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
142 mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) );
143 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_rate", i );
144 mlt_properties_set_int( meta_media, key, codec_context->sample_rate );
145 snprintf( key, sizeof(key), "meta.media.%d.codec.channels", i );
146 mlt_properties_set_int( meta_media, key, codec_context->channels );
147 break;
148 default:
149 break;
150 }
151 // snprintf( key, sizeof(key), "meta.media.%d.stream.time_base", i );
152 // mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->time_base ) );
153 snprintf( key, sizeof(key), "meta.media.%d.codec.name", i );
154 mlt_properties_set( meta_media, key, codec->name );
155 snprintf( key, sizeof(key), "meta.media.%d.codec.long_name", i );
156 mlt_properties_set( meta_media, key, codec->long_name );
157 snprintf( key, sizeof(key), "meta.media.%d.codec.bit_rate", i );
158 mlt_properties_set_int( meta_media, key, codec_context->bit_rate );
159 // snprintf( key, sizeof(key), "meta.media.%d.codec.time_base", i );
160 // mlt_properties_set_double( meta_media, key, av_q2d( codec_context->time_base ) );
161 snprintf( key, sizeof(key), "meta.media.%d.codec.profile", i );
162 mlt_properties_set_int( meta_media, key, codec_context->profile );
163 snprintf( key, sizeof(key), "meta.media.%d.codec.level", i );
164 mlt_properties_set_int( meta_media, key, codec_context->level );
165 }
166
167 return meta_media;
168 }
169
170 /** Producer file destructor.
171 */
172
173 static void producer_file_close( void *context )
174 {
175 if ( context != NULL )
176 {
177 // Lock the mutex now
178 avformat_lock( );
179
180 // Close the file
181 av_close_input_file( context );
182
183 // Unlock the mutex now
184 avformat_unlock( );
185 }
186 }
187
188 /** Producer file destructor.
189 */
190
191 static void producer_codec_close( void *codec )
192 {
193 if ( codec != NULL )
194 {
195 // Lock the mutex now
196 avformat_lock( );
197
198 // Close the file
199 avcodec_close( codec );
200
201 // Unlock the mutex now
202 avformat_unlock( );
203 }
204 }
205
206 static inline int dv_is_pal( AVPacket *pkt )
207 {
208 return pkt->data[3] & 0x80;
209 }
210
211 static int dv_is_wide( AVPacket *pkt )
212 {
213 int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
214
215 for ( ; i < pkt->size; i += 5 /* packet size */ )
216 {
217 if ( pkt->data[ i ] == 0x61 )
218 {
219 uint8_t x = pkt->data[ i + 2 ] & 0x7;
220 return ( x == 2 ) || ( x == 7 );
221 }
222 }
223 return 0;
224 }
225
226 static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
227 {
228 double aspect_ratio = 1.0;
229
230 if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
231 {
232 if ( pkt )
233 {
234 if ( dv_is_pal( pkt ) )
235 {
236 aspect_ratio = dv_is_wide( pkt )
237 ? 64.0/45.0 // 16:9 PAL
238 : 16.0/15.0; // 4:3 PAL
239 }
240 else
241 {
242 aspect_ratio = dv_is_wide( pkt )
243 ? 32.0/27.0 // 16:9 NTSC
244 : 8.0/9.0; // 4:3 NTSC
245 }
246 }
247 else
248 {
249 AVRational ar =
250 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
251 stream->sample_aspect_ratio;
252 #else
253 codec_context->sample_aspect_ratio;
254 #endif
255 // Override FFmpeg's notion of DV aspect ratios, which are
256 // based upon a width of 704. Since we do not have a normaliser
257 // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
258 // we just coerce the values to facilitate a passive behaviour through
259 // the rescale normaliser when using equivalent producers and consumers.
260 // = display_aspect / (width * height)
261 if ( ar.num == 10 && ar.den == 11 )
262 aspect_ratio = 8.0/9.0; // 4:3 NTSC
263 else if ( ar.num == 59 && ar.den == 54 )
264 aspect_ratio = 16.0/15.0; // 4:3 PAL
265 else if ( ar.num == 40 && ar.den == 33 )
266 aspect_ratio = 32.0/27.0; // 16:9 NTSC
267 else if ( ar.num == 118 && ar.den == 81 )
268 aspect_ratio = 64.0/45.0; // 16:9 PAL
269 }
270 }
271 else
272 {
273 AVRational codec_sar = codec_context->sample_aspect_ratio;
274 AVRational stream_sar =
275 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
276 stream->sample_aspect_ratio;
277 #else
278 { 0, 1 };
279 #endif
280 if ( codec_sar.num > 0 )
281 aspect_ratio = av_q2d( codec_sar );
282 else if ( stream_sar.num > 0 )
283 aspect_ratio = av_q2d( stream_sar );
284 }
285 return aspect_ratio;
286 }
287
288 /** Open the file.
289 */
290
291 static int producer_open( mlt_producer this, mlt_profile profile, char *file )
292 {
293 // Return an error code (0 == no error)
294 int error = 0;
295
296 // Context for avformat
297 AVFormatContext *context = NULL;
298
299 // Get the properties
300 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
301
302 // We will treat everything with the producer fps
303 double fps = mlt_profile_fps( profile );
304
305 // Lock the mutex now
306 avformat_lock( );
307
308 // If "MRL", then create AVInputFormat
309 AVInputFormat *format = NULL;
310 AVFormatParameters *params = NULL;
311 char *standard = NULL;
312 char *mrl = strchr( file, ':' );
313
314 // AV option (0 = both, 1 = video, 2 = audio)
315 int av = 0;
316
317 // Setting lowest log level
318 av_log_set_level( -1 );
319
320 // Only if there is not a protocol specification that avformat can handle
321 if ( mrl && !url_exist( file ) )
322 {
323 // 'file' becomes format abbreviation
324 mrl[0] = 0;
325
326 // Lookup the format
327 format = av_find_input_format( file );
328
329 // Eat the format designator
330 file = ++mrl;
331
332 if ( format )
333 {
334 // Allocate params
335 params = calloc( sizeof( AVFormatParameters ), 1 );
336
337 // These are required by video4linux (defaults)
338 params->width = 640;
339 params->height = 480;
340 params->time_base= (AVRational){1,25};
341 // params->device = file;
342 params->channels = 2;
343 params->sample_rate = 48000;
344 }
345
346 // XXX: this does not work anymore since avdevice
347 // TODO: make producer_avddevice?
348 // Parse out params
349 mrl = strchr( file, '?' );
350 while ( mrl )
351 {
352 mrl[0] = 0;
353 char *name = strdup( ++mrl );
354 char *value = strchr( name, ':' );
355 if ( value )
356 {
357 value[0] = 0;
358 value++;
359 char *t = strchr( value, '&' );
360 if ( t )
361 t[0] = 0;
362 if ( !strcmp( name, "frame_rate" ) )
363 params->time_base.den = atoi( value );
364 else if ( !strcmp( name, "frame_rate_base" ) )
365 params->time_base.num = atoi( value );
366 else if ( !strcmp( name, "sample_rate" ) )
367 params->sample_rate = atoi( value );
368 else if ( !strcmp( name, "channels" ) )
369 params->channels = atoi( value );
370 else if ( !strcmp( name, "width" ) )
371 params->width = atoi( value );
372 else if ( !strcmp( name, "height" ) )
373 params->height = atoi( value );
374 else if ( !strcmp( name, "standard" ) )
375 {
376 standard = strdup( value );
377 params->standard = standard;
378 }
379 else if ( !strcmp( name, "av" ) )
380 av = atoi( value );
381 }
382 free( name );
383 mrl = strchr( mrl, '&' );
384 }
385 }
386
387 // Now attempt to open the file
388 error = av_open_input_file( &context, file, format, 0, params ) < 0;
389
390 // Cleanup AVFormatParameters
391 free( standard );
392 free( params );
393
394 // If successful, then try to get additional info
395 if ( error == 0 )
396 {
397 // Get the stream info
398 error = av_find_stream_info( context ) < 0;
399
400 // Continue if no error
401 if ( error == 0 )
402 {
403 // We will default to the first audio and video streams found
404 int audio_index = -1;
405 int video_index = -1;
406 int av_bypass = 0;
407
408 // Now set properties where we can (use default unknowns if required)
409 if ( context->duration != AV_NOPTS_VALUE )
410 {
411 // This isn't going to be accurate for all formats
412 mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
413 mlt_properties_set_position( properties, "out", frames - 1 );
414 mlt_properties_set_position( properties, "length", frames );
415 }
416
417 // Find default audio and video streams
418 find_default_streams( properties, context, &audio_index, &video_index );
419
420 if ( context->start_time != AV_NOPTS_VALUE )
421 mlt_properties_set_double( properties, "_start_time", context->start_time );
422
423 // Check if we're seekable (something funny about mpeg here :-/)
424 if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) && strncmp( file, "udp:", 4 ) && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 ) && strncmp( file, "rtp:", 4 ) )
425 {
426 mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
427 mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
428 av_open_input_file( &context, file, NULL, 0, NULL );
429 av_find_stream_info( context );
430 }
431 else
432 av_bypass = 1;
433
434 // Store selected audio and video indexes on properties
435 mlt_properties_set_int( properties, "_audio_index", audio_index );
436 mlt_properties_set_int( properties, "_video_index", video_index );
437 mlt_properties_set_int( properties, "_last_position", -1 );
438
439 // Fetch the width, height and aspect ratio
440 if ( video_index != -1 )
441 {
442 AVCodecContext *codec_context = context->streams[ video_index ]->codec;
443 mlt_properties_set_int( properties, "width", codec_context->width );
444 mlt_properties_set_int( properties, "height", codec_context->height );
445
446 if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
447 {
448 // Fetch the first frame of DV so we can read it directly
449 AVPacket pkt;
450 int ret = 0;
451 while ( ret >= 0 )
452 {
453 ret = av_read_frame( context, &pkt );
454 if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
455 {
456 mlt_properties_set_double( properties, "aspect_ratio",
457 get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
458 break;
459 }
460 }
461 }
462 else
463 {
464 mlt_properties_set_double( properties, "aspect_ratio",
465 get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
466 }
467 }
468
469 // Read Metadata
470 if (context->title != NULL)
471 mlt_properties_set(properties, "meta.attr.title.markup", context->title );
472 if (context->author != NULL)
473 mlt_properties_set(properties, "meta.attr.author.markup", context->author );
474 if (context->copyright != NULL)
475 mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
476 if (context->comment != NULL)
477 mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
478 if (context->album != NULL)
479 mlt_properties_set(properties, "meta.attr.album.markup", context->album );
480 if (context->year != 0)
481 mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
482 if (context->track != 0)
483 mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
484
485 // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
486 if ( av == 0 && audio_index != -1 && video_index != -1 )
487 {
488 // We'll use the open one as our video_context
489 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
490
491 // And open again for our audio context
492 av_open_input_file( &context, file, NULL, 0, NULL );
493 av_find_stream_info( context );
494
495 // Audio context
496 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
497 }
498 else if ( av != 2 && video_index != -1 )
499 {
500 // We only have a video context
501 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
502 }
503 else if ( audio_index != -1 )
504 {
505 // We only have an audio context
506 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
507 }
508 else
509 {
510 // Something has gone wrong
511 error = -1;
512 }
513
514 mlt_properties_set_int( properties, "av_bypass", av_bypass );
515 }
516 }
517
518 // Unlock the mutex now
519 avformat_unlock( );
520
521 return error;
522 }
523
524 /** Convert a frame position to a time code.
525 */
526
527 static double producer_time_of_frame( mlt_producer this, mlt_position position )
528 {
529 return ( double )position / mlt_producer_get_fps( this );
530 }
531
532 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
533 {
534 #ifdef SWSCALE
535 if ( format == mlt_image_yuv420p )
536 {
537 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
538 width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
539 AVPicture output;
540 output.data[0] = buffer;
541 output.data[1] = buffer + width * height;
542 output.data[2] = buffer + ( 3 * width * height ) / 2;
543 output.linesize[0] = width;
544 output.linesize[1] = width >> 1;
545 output.linesize[2] = width >> 1;
546 sws_scale( context, frame->data, frame->linesize, 0, height,
547 output.data, output.linesize);
548 sws_freeContext( context );
549 }
550 else if ( format == mlt_image_rgb24 )
551 {
552 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
553 width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
554 AVPicture output;
555 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
556 sws_scale( context, frame->data, frame->linesize, 0, height,
557 output.data, output.linesize);
558 sws_freeContext( context );
559 }
560 else
561 {
562 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
563 width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
564 AVPicture output;
565 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
566 sws_scale( context, frame->data, frame->linesize, 0, height,
567 output.data, output.linesize);
568 sws_freeContext( context );
569 }
570 #else
571 if ( format == mlt_image_yuv420p )
572 {
573 AVPicture pict;
574 pict.data[0] = buffer;
575 pict.data[1] = buffer + width * height;
576 pict.data[2] = buffer + ( 3 * width * height ) / 2;
577 pict.linesize[0] = width;
578 pict.linesize[1] = width >> 1;
579 pict.linesize[2] = width >> 1;
580 img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
581 }
582 else if ( format == mlt_image_rgb24 )
583 {
584 AVPicture output;
585 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
586 img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
587 }
588 else
589 {
590 AVPicture output;
591 avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height );
592 img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height );
593 }
594 #endif
595 }
596
597 /** Allocate the image buffer and set it on the frame.
598 */
599
600 static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
601 {
602 int size = 0;
603
604 if ( codec_context->width == 0 || codec_context->height == 0 )
605 return size;
606
607 *width = codec_context->width;
608 *height = codec_context->height;
609 mlt_properties_set_int( frame_properties, "width", *width );
610 mlt_properties_set_int( frame_properties, "height", *height );
611
612 switch ( *format )
613 {
614 case mlt_image_yuv420p:
615 size = *width * 3 * ( *height + 1 ) / 2;
616 break;
617 case mlt_image_rgb24:
618 size = *width * ( *height + 1 ) * 3;
619 break;
620 default:
621 *format = mlt_image_yuv422;
622 size = *width * ( *height + 1 ) * 2;
623 break;
624 }
625
626 // Construct the output image
627 *buffer = mlt_pool_alloc( size );
628 if ( *buffer )
629 mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
630 else
631 size = 0;
632
633 return size;
634 }
635
636 /** Get an image from a frame.
637 */
638
639 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
640 {
641 // Get the properties from the frame
642 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
643
644 // Obtain the frame number of this frame
645 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
646
647 // Get the producer
648 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
649
650 // Get the producer properties
651 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
652
653 // Fetch the video_context
654 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
655
656 // Get the video_index
657 int index = mlt_properties_get_int( properties, "video_index" );
658
659 // Obtain the expected frame numer
660 mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
661
662 // Get the video stream
663 AVStream *stream = context->streams[ index ];
664
665 // Get codec context
666 AVCodecContext *codec_context = stream->codec;
667
668 // Packet
669 AVPacket pkt;
670
671 // Get the conversion frame
672 AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL );
673
674 // Special case pause handling flag
675 int paused = 0;
676
677 // Special case ffwd handling
678 int ignore = 0;
679
680 // We may want to use the source fps if available
681 double source_fps = mlt_properties_get_double( properties, "source_fps" );
682 double fps = mlt_producer_get_fps( this );
683
684 // This is the physical frame position in the source
685 int req_position = ( int )( position / fps * source_fps + 0.5 );
686
687 // Get the seekable status
688 int seekable = mlt_properties_get_int( properties, "seekable" );
689
690 // Hopefully provide better support for streams...
691 int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
692
693 // Determines if we have to decode all frames in a sequence
694 int must_decode = 1;
695
696 // Temporary hack to improve intra frame only
697 must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
698 strcmp( codec_context->codec->name, "rawvideo" ) &&
699 strcmp( codec_context->codec->name, "dvvideo" );
700
701 // Seek if necessary
702 if ( position != expected )
703 {
704 if ( av_frame != NULL && position + 1 == expected )
705 {
706 // We're paused - use last image
707 paused = 1;
708 }
709 else if ( !seekable && position > expected && ( position - expected ) < 250 )
710 {
711 // Fast forward - seeking is inefficient for small distances - just ignore following frames
712 ignore = ( int )( ( position - expected ) / fps * source_fps );
713 }
714 else if ( seekable && ( position < expected || position - expected >= 12 ) )
715 {
716 // Calculate the timestamp for the requested frame
717 int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
718 if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
719 timestamp += context->start_time;
720 if ( must_decode )
721 timestamp -= AV_TIME_BASE;
722 if ( timestamp < 0 )
723 timestamp = 0;
724
725 // Set to the timestamp
726 av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
727
728 // Remove the cached info relating to the previous position
729 mlt_properties_set_int( properties, "_current_position", -1 );
730 mlt_properties_set_int( properties, "_last_position", -1 );
731 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
732 av_frame = NULL;
733 }
734 }
735
736 // Duplicate the last image if necessary (see comment on rawvideo below)
737 int current_position = mlt_properties_get_int( properties, "_current_position" );
738 int got_picture = mlt_properties_get_int( properties, "_got_picture" );
739 if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
740 {
741 // Duplicate it
742 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
743 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
744 else
745 mlt_frame_get_image( frame, buffer, format, width, height, writable );
746 }
747 else
748 {
749 int ret = 0;
750 int int_position = 0;
751 got_picture = 0;
752
753 av_init_packet( &pkt );
754
755 // Construct an AVFrame for YUV422 conversion
756 if ( av_frame == NULL )
757 av_frame = avcodec_alloc_frame( );
758
759 while( ret >= 0 && !got_picture )
760 {
761 // Read a packet
762 ret = av_read_frame( context, &pkt );
763
764 // We only deal with video from the selected video_index
765 if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
766 {
767 // Determine time code of the packet
768 int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
769 if ( context->start_time != AV_NOPTS_VALUE )
770 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
771 int last_position = mlt_properties_get_int( properties, "_last_position" );
772 if ( int_position == last_position )
773 int_position = last_position + 1;
774 mlt_properties_set_int( properties, "_last_position", int_position );
775
776 // Decode the image
777 if ( must_decode || int_position >= req_position )
778 ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
779
780 if ( got_picture )
781 {
782 // Handle ignore
783 if ( int_position < req_position )
784 {
785 ignore = 0;
786 got_picture = 0;
787 }
788 else if ( int_position >= req_position )
789 {
790 ignore = 0;
791 }
792 else if ( ignore -- )
793 {
794 got_picture = 0;
795 }
796 }
797 av_free_packet( &pkt );
798 }
799 else if ( ret >= 0 )
800 {
801 av_free_packet( &pkt );
802 }
803
804 // Now handle the picture if we have one
805 if ( got_picture )
806 {
807 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
808 {
809 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
810 mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
811 mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
812 mlt_properties_set_int( properties, "_current_position", int_position );
813 mlt_properties_set_int( properties, "_got_picture", 1 );
814 mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
815 }
816 else
817 {
818 got_picture = 0;
819 }
820 }
821 }
822 if ( !got_picture )
823 mlt_frame_get_image( frame, buffer, format, width, height, writable );
824 }
825
826 // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
827 // above will break the pause behaviour - so we wipe the frame now
828 if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
829 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
830
831 // Set the field order property for this frame
832 mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
833
834 // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
835 mlt_properties_set_position( properties, "_video_expected", position + 1 );
836
837 return 0;
838 }
839
840 /** Process properties as AVOptions and apply to AV context obj
841 */
842
843 static void apply_properties( void *obj, mlt_properties properties, int flags )
844 {
845 int i;
846 int count = mlt_properties_count( properties );
847 for ( i = 0; i < count; i++ )
848 {
849 const char *opt_name = mlt_properties_get_name( properties, i );
850 const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
851 if ( opt != NULL )
852 #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0)
853 av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL );
854 #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
855 av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
856 #else
857 av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
858 #endif
859 }
860 }
861
862 /** Set up video handling.
863 */
864
865 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
866 {
867 // Get the properties
868 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
869
870 // Fetch the video_context
871 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
872
873 // Get the video_index
874 int index = mlt_properties_get_int( properties, "video_index" );
875
876 // Reopen the file if necessary
877 if ( !context && index > -1 )
878 {
879 mlt_events_block( properties, this );
880 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
881 mlt_properties_get( properties, "resource" ) );
882 context = mlt_properties_get_data( properties, "video_context", NULL );
883 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
884 mlt_events_unblock( properties, this );
885
886 // Process properties as AVOptions
887 apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
888 }
889
890 // Exception handling for video_index
891 if ( context && index >= (int) context->nb_streams )
892 {
893 // Get the last video stream
894 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO; --index );
895 mlt_properties_set_int( properties, "video_index", index );
896 }
897 if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO )
898 {
899 // Invalidate the video stream
900 index = -1;
901 mlt_properties_set_int( properties, "video_index", index );
902 }
903
904 // Get the frame properties
905 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
906
907 if ( context && index > -1 )
908 {
909 // Get the video stream
910 AVStream *stream = context->streams[ index ];
911
912 // Get codec context
913 AVCodecContext *codec_context = stream->codec;
914
915 // Get the codec
916 AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
917
918 // Update the video properties if the index changed
919 if ( index != mlt_properties_get_int( properties, "_video_index" ) )
920 {
921 // Reset the video properties if the index changed
922 mlt_properties_set_int( properties, "_video_index", index );
923 mlt_properties_set_data( properties, "video_codec", NULL, 0, NULL, NULL );
924 mlt_properties_set_int( properties, "width", codec_context->width );
925 mlt_properties_set_int( properties, "height", codec_context->height );
926 // TODO: get the first usable AVPacket and reset the stream position
927 mlt_properties_set_double( properties, "aspect_ratio",
928 get_aspect_ratio( context->streams[ index ], codec_context, NULL ) );
929 codec = NULL;
930 }
931
932 // Initialise the codec if necessary
933 if ( codec == NULL )
934 {
935 // Initialise multi-threading
936 int thread_count = mlt_properties_get_int( properties, "threads" );
937 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
938 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
939 if ( thread_count > 1 )
940 {
941 avcodec_thread_init( codec_context, thread_count );
942 codec_context->thread_count = thread_count;
943 }
944
945 // Find the codec
946 codec = avcodec_find_decoder( codec_context->codec_id );
947
948 // If we don't have a codec and we can't initialise it, we can't do much more...
949 avformat_lock( );
950 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
951 {
952 // Now store the codec with its destructor
953 mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL );
954 }
955 else
956 {
957 // Remember that we can't use this later
958 mlt_properties_set_int( properties, "video_index", -1 );
959 index = -1;
960 }
961 avformat_unlock( );
962
963 // Process properties as AVOptions
964 apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
965 }
966
967 // No codec, no show...
968 if ( codec && index > -1 )
969 {
970 double source_fps = 0;
971 double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
972 double aspect_ratio = ( force_aspect_ratio > 0.0 ) ?
973 force_aspect_ratio : mlt_properties_get_double( properties, "aspect_ratio" );
974
975 // Determine the fps
976 source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
977
978 // We'll use fps if it's available
979 if ( source_fps > 0 )
980 mlt_properties_set_double( properties, "source_fps", source_fps );
981 else
982 mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) );
983 mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
984
985 // Set the width and height
986 mlt_properties_set_int( frame_properties, "width", codec_context->width );
987 mlt_properties_set_int( frame_properties, "height", codec_context->height );
988 mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
989
990 mlt_frame_push_get_image( frame, producer_get_image );
991 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
992 }
993 else
994 {
995 mlt_properties_set_int( frame_properties, "test_image", 1 );
996 }
997 }
998 else
999 {
1000 mlt_properties_set_int( frame_properties, "test_image", 1 );
1001 }
1002 }
1003
1004 /** Get the audio from a frame.
1005 */
1006
1007 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
1008 {
1009 // Get the properties from the frame
1010 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1011
1012 // Obtain the frame number of this frame
1013 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
1014
1015 // Get the producer
1016 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
1017
1018 // Get the producer properties
1019 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1020
1021 // Fetch the audio_context
1022 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1023
1024 // Get the audio_index
1025 int index = mlt_properties_get_int( properties, "audio_index" );
1026
1027 // Get the seekable status
1028 int seekable = mlt_properties_get_int( properties, "seekable" );
1029
1030 // Obtain the expected frame numer
1031 mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
1032
1033 // Obtain the resample context if it exists (not always needed)
1034 ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
1035
1036 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1037 // Get the format converter context if it exists
1038 AVAudioConvert *convert = mlt_properties_get_data( properties, "audio_convert", NULL );
1039 #endif
1040
1041 // Obtain the audio buffers
1042 int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
1043 int16_t *decode_buffer = mlt_properties_get_data( properties, "decode_buffer", NULL );
1044 int16_t *convert_buffer = mlt_properties_get_data( properties, "convert_buffer", NULL );
1045
1046 // Get amount of audio used
1047 int audio_used = mlt_properties_get_int( properties, "_audio_used" );
1048
1049 // Calculate the real time code
1050 double real_timecode = producer_time_of_frame( this, position );
1051
1052 // Get the audio stream
1053 AVStream *stream = context->streams[ index ];
1054
1055 // Get codec context
1056 AVCodecContext *codec_context = stream->codec;
1057
1058 // Packet
1059 AVPacket pkt;
1060
1061 // Number of frames to ignore (for ffwd)
1062 int ignore = 0;
1063
1064 // Flag for paused (silence)
1065 int paused = 0;
1066
1067 // Check for resample and create if necessary
1068 if ( resample == NULL && codec_context->channels <= 2 )
1069 {
1070 // Create the resampler
1071 resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate );
1072
1073 // And store it on properties
1074 mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );
1075 }
1076 else if ( resample == NULL )
1077 {
1078 *channels = codec_context->channels;
1079 *frequency = codec_context->sample_rate;
1080 }
1081
1082 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1083 // Check for audio format converter and create if necessary
1084 // TODO: support higher resolutions than 16-bit.
1085 if ( convert == NULL && codec_context->sample_fmt != SAMPLE_FMT_S16 )
1086 {
1087 // Create single channel converter for interleaved with no mixing matrix
1088 convert = av_audio_convert_alloc( SAMPLE_FMT_S16, 1, codec_context->sample_fmt, 1, NULL, 0 );
1089 mlt_properties_set_data( properties, "audio_convert", convert, 0, ( mlt_destructor )av_audio_convert_free, NULL );
1090 }
1091 #endif
1092
1093 // Check for audio buffer and create if necessary
1094 if ( audio_buffer == NULL )
1095 {
1096 // Allocate the audio buffer
1097 audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1098
1099 // And store it on properties for reuse
1100 mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1101 }
1102
1103 // Check for decoder buffer and create if necessary
1104 if ( decode_buffer == NULL )
1105 {
1106 // Allocate the audio buffer
1107 decode_buffer = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1108
1109 // And store it on properties for reuse
1110 mlt_properties_set_data( properties, "decode_buffer", decode_buffer, 0, ( mlt_destructor )av_free, NULL );
1111 }
1112
1113 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1114 // Check for format converter buffer and create if necessary
1115 if ( resample && convert && convert_buffer == NULL )
1116 {
1117 // Allocate the audio buffer
1118 convert_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1119
1120 // And store it on properties for reuse
1121 mlt_properties_set_data( properties, "convert_buffer", convert_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1122 }
1123 #endif
1124
1125 // Seek if necessary
1126 if ( position != expected )
1127 {
1128 if ( position + 1 == expected )
1129 {
1130 // We're paused - silence required
1131 paused = 1;
1132 }
1133 else if ( !seekable && position > expected && ( position - expected ) < 250 )
1134 {
1135 // Fast forward - seeking is inefficient for small distances - just ignore following frames
1136 ignore = position - expected;
1137 }
1138 else if ( position < expected || position - expected >= 12 )
1139 {
1140 // Set to the real timecode
1141 if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 )
1142 paused = 1;
1143
1144 // Clear the usage in the audio buffer
1145 audio_used = 0;
1146 }
1147 }
1148
1149 // Get the audio if required
1150 if ( !paused )
1151 {
1152 int ret = 0;
1153 int got_audio = 0;
1154
1155 av_init_packet( &pkt );
1156
1157 while( ret >= 0 && !got_audio )
1158 {
1159 // Check if the buffer already contains the samples required
1160 if ( audio_used >= *samples && ignore == 0 )
1161 {
1162 got_audio = 1;
1163 break;
1164 }
1165
1166 // Read a packet
1167 ret = av_read_frame( context, &pkt );
1168
1169 int len = pkt.size;
1170 uint8_t *ptr = pkt.data;
1171
1172 // We only deal with audio from the selected audio_index
1173 while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 )
1174 {
1175 int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
1176
1177 // Decode the audio
1178 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
1179 ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
1180 #else
1181 ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
1182 #endif
1183 if ( ret < 0 )
1184 {
1185 ret = 0;
1186 break;
1187 }
1188
1189 len -= ret;
1190 ptr += ret;
1191
1192 if ( data_size > 0 )
1193 {
1194 int src_stride[6]= { av_get_bits_per_sample_format( codec_context->sample_fmt ) / 8 };
1195 int dst_stride[6]= { av_get_bits_per_sample_format( SAMPLE_FMT_S16 ) / 8 };
1196
1197 if ( resample )
1198 {
1199 int16_t *source = decode_buffer;
1200 int16_t *dest = &audio_buffer[ audio_used * *channels ];
1201 int convert_samples = data_size / src_stride[0];
1202
1203 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1204 if ( convert )
1205 {
1206 const void *src_buf[6] = { decode_buffer };
1207 void *dst_buf[6] = { convert_buffer };
1208 av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, convert_samples );
1209 source = convert_buffer;
1210 }
1211 #endif
1212 audio_used += audio_resample( resample, dest, source, convert_samples / codec_context->channels );
1213 }
1214 else
1215 {
1216 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1217 if ( convert )
1218 {
1219 const void *src_buf[6] = { decode_buffer };
1220 void *dst_buf[6] = { &audio_buffer[ audio_used * *channels ] };
1221 av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, data_size / src_stride[0] );
1222 }
1223 else
1224 #endif
1225 {
1226 memcpy( &audio_buffer[ audio_used * *channels ], decode_buffer, data_size );
1227 }
1228 audio_used += data_size / *channels / src_stride[0];
1229 }
1230
1231 // Handle ignore
1232 while ( ignore && audio_used > *samples )
1233 {
1234 ignore --;
1235 audio_used -= *samples;
1236 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * sizeof( int16_t ) );
1237 }
1238 }
1239
1240 // If we're behind, ignore this packet
1241 if ( pkt.pts >= 0 )
1242 {
1243 float current_pts = av_q2d( stream->time_base ) * pkt.pts;
1244 if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) )
1245 ignore = 1;
1246 }
1247 }
1248
1249 // We're finished with this packet regardless
1250 av_free_packet( &pkt );
1251 }
1252
1253 *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) );
1254 mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1255
1256 // Now handle the audio if we have enough
1257 if ( audio_used >= *samples )
1258 {
1259 memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) );
1260 audio_used -= *samples;
1261 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) );
1262 }
1263 else
1264 {
1265 memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) );
1266 }
1267
1268 // Store the number of audio samples still available
1269 mlt_properties_set_int( properties, "_audio_used", audio_used );
1270 }
1271 else
1272 {
1273 // Get silence and don't touch the context
1274 mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
1275 }
1276
1277 // Regardless of speed (other than paused), we expect to get the next frame
1278 if ( !paused )
1279 mlt_properties_set_position( properties, "_audio_expected", position + 1 );
1280
1281 return 0;
1282 }
1283
1284 /** Set up audio handling.
1285 */
1286
1287 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
1288 {
1289 // Get the properties
1290 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1291
1292 // Fetch the audio_context
1293 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1294
1295 // Get the audio_index
1296 int index = mlt_properties_get_int( properties, "audio_index" );
1297
1298 // Reopen the file if necessary
1299 if ( !context && index > -1 )
1300 {
1301 mlt_events_block( properties, this );
1302 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
1303 mlt_properties_get( properties, "resource" ) );
1304 context = mlt_properties_get_data( properties, "audio_context", NULL );
1305 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
1306 mlt_events_unblock( properties, this );
1307 }
1308
1309 // Exception handling for audio_index
1310 if ( context && index >= (int) context->nb_streams )
1311 {
1312 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO; --index );
1313 mlt_properties_set_int( properties, "audio_index", index );
1314 }
1315 if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO )
1316 {
1317 index = -1;
1318 mlt_properties_set_int( properties, "audio_index", index );
1319 }
1320
1321 // Update the audio properties if the index changed
1322 if ( index > -1 && index != mlt_properties_get_int( properties, "_audio_index" ) )
1323 {
1324 mlt_properties_set_int( properties, "_audio_index", index );
1325 mlt_properties_set_data( properties, "audio_codec", NULL, 0, NULL, NULL );
1326 }
1327
1328 // Deal with audio context
1329 if ( context != NULL && index > -1 )
1330 {
1331 // Get the frame properties
1332 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1333
1334 // Get the audio stream
1335 AVStream *stream = context->streams[ index ];
1336
1337 // Get codec context
1338 AVCodecContext *codec_context = stream->codec;
1339
1340 // Get the codec
1341 AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
1342
1343 // Initialise the codec if necessary
1344 if ( codec == NULL )
1345 {
1346 // Find the codec
1347 codec = avcodec_find_decoder( codec_context->codec_id );
1348
1349 // If we don't have a codec and we can't initialise it, we can't do much more...
1350 avformat_lock( );
1351 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1352 {
1353 // Now store the codec with its destructor
1354 mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL );
1355
1356 }
1357 else
1358 {
1359 // Remember that we can't use this later
1360 mlt_properties_set_int( properties, "audio_index", -1 );
1361 index = -1;
1362 }
1363 avformat_unlock( );
1364
1365 // Process properties as AVOptions
1366 apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
1367 }
1368
1369 // No codec, no show...
1370 if ( codec && index > -1 )
1371 {
1372 mlt_frame_push_audio( frame, producer_get_audio );
1373 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1374 mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
1375 mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
1376 }
1377 }
1378 }
1379
1380 /** Our get frame implementation.
1381 */
1382
1383 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index )
1384 {
1385 // Create an empty frame
1386 *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) );
1387
1388 // Update timecode on the frame we're creating
1389 mlt_frame_set_position( *frame, mlt_producer_position( this ) );
1390
1391 // Set the position of this producer
1392 mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
1393
1394 // Set up the video
1395 producer_set_up_video( this, *frame );
1396
1397 // Set up the audio
1398 producer_set_up_audio( this, *frame );
1399
1400 // Set the aspect_ratio
1401 mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
1402
1403 // Calculate the next timecode
1404 mlt_producer_prepare_next( this );
1405
1406 return 0;
1407 }