producer_avformat.c: bugfix segfault when fail to open or read file in init.
[melted] / src / modules / avformat / producer_avformat.c
1 /*
2 * producer_avformat.c -- avformat producer
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // MLT Header files
23 #include <framework/mlt_producer.h>
24 #include <framework/mlt_frame.h>
25
26 // ffmpeg Header files
27 #include <avformat.h>
28 #ifdef SWSCALE
29 #include <swscale.h>
30 #endif
31
32 // System header files
33 #include <stdlib.h>
34 #include <string.h>
35 #include <pthread.h>
36 #include <math.h>
37
38 void avformat_lock( );
39 void avformat_unlock( );
40
41 // Forward references.
42 static int producer_open( mlt_producer this, mlt_profile profile, char *file );
43 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
44
45 /** Constructor for libavformat.
46 */
47
48 mlt_producer producer_avformat_init( mlt_profile profile, char *file )
49 {
50 mlt_producer this = NULL;
51
52 // Check that we have a non-NULL argument
53 if ( file != NULL )
54 {
55 // Construct the producer
56 this = calloc( 1, sizeof( struct mlt_producer_s ) );
57
58 // Initialise it
59 if ( mlt_producer_init( this, NULL ) == 0 )
60 {
61 // Get the properties
62 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
63
64 // Set the resource property (required for all producers)
65 mlt_properties_set( properties, "resource", file );
66
67 // Register our get_frame implementation
68 this->get_frame = producer_get_frame;
69
70 // Open the file
71 if ( producer_open( this, profile, file ) != 0 )
72 {
73 // Clean up
74 mlt_producer_close( this );
75 this = NULL;
76 }
77 else
78 {
79 // Close the file to release resources for large playlists - reopen later as needed
80 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
81 mlt_properties_set_data( properties, "audio_context", NULL, 0, NULL, NULL );
82 mlt_properties_set_data( properties, "video_context", NULL, 0, NULL, NULL );
83 }
84 }
85 }
86
87 return this;
88 }
89
90 /** Find the default streams.
91 */
92
93 static void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index )
94 {
95 int i;
96
97 // Allow for multiple audio and video streams in the file and select first of each (if available)
98 for( i = 0; i < context->nb_streams; i++ )
99 {
100 // Get the codec context
101 AVCodecContext *codec_context = context->streams[ i ]->codec;
102
103 if ( avcodec_find_decoder( codec_context->codec_id ) == NULL )
104 continue;
105
106 // Determine the type and obtain the first index of each type
107 switch( codec_context->codec_type )
108 {
109 case CODEC_TYPE_VIDEO:
110 if ( *video_index < 0 )
111 *video_index = i;
112 break;
113 case CODEC_TYPE_AUDIO:
114 if ( *audio_index < 0 )
115 *audio_index = i;
116 break;
117 default:
118 break;
119 }
120 }
121 }
122
123 /** Producer file destructor.
124 */
125
126 static void producer_file_close( void *context )
127 {
128 if ( context != NULL )
129 {
130 // Lock the mutex now
131 avformat_lock( );
132
133 // Close the file
134 av_close_input_file( context );
135
136 // Unlock the mutex now
137 avformat_unlock( );
138 }
139 }
140
141 /** Producer file destructor.
142 */
143
144 static void producer_codec_close( void *codec )
145 {
146 if ( codec != NULL )
147 {
148 // Lock the mutex now
149 avformat_lock( );
150
151 // Close the file
152 avcodec_close( codec );
153
154 // Unlock the mutex now
155 avformat_unlock( );
156 }
157 }
158
159 /** Open the file.
160 */
161
162 static int producer_open( mlt_producer this, mlt_profile profile, char *file )
163 {
164 // Return an error code (0 == no error)
165 int error = 0;
166
167 // Context for avformat
168 AVFormatContext *context = NULL;
169
170 // Get the properties
171 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
172
173 // We will treat everything with the producer fps
174 double fps = mlt_profile_fps( profile );
175
176 // Lock the mutex now
177 avformat_lock( );
178
179 // If "MRL", then create AVInputFormat
180 AVInputFormat *format = NULL;
181 AVFormatParameters *params = NULL;
182 char *standard = NULL;
183 char *mrl = strchr( file, ':' );
184
185 // AV option (0 = both, 1 = video, 2 = audio)
186 int av = 0;
187
188 // Setting lowest log level
189 av_log_set_level( -1 );
190
191 // Only if there is not a protocol specification that avformat can handle
192 if ( mrl && !url_exist( file ) )
193 {
194 // 'file' becomes format abbreviation
195 mrl[0] = 0;
196
197 // Lookup the format
198 format = av_find_input_format( file );
199
200 // Eat the format designator
201 file = ++mrl;
202
203 if ( format )
204 {
205 // Allocate params
206 params = calloc( sizeof( AVFormatParameters ), 1 );
207
208 // These are required by video4linux (defaults)
209 params->width = 640;
210 params->height = 480;
211 params->time_base= (AVRational){1,25};
212 // params->device = file;
213 params->channels = 2;
214 params->sample_rate = 48000;
215 }
216
217 // XXX: this does not work anymore since avdevice
218 // TODO: make producer_avddevice?
219 // Parse out params
220 mrl = strchr( file, '?' );
221 while ( mrl )
222 {
223 mrl[0] = 0;
224 char *name = strdup( ++mrl );
225 char *value = strchr( name, ':' );
226 if ( value )
227 {
228 value[0] = 0;
229 value++;
230 char *t = strchr( value, '&' );
231 if ( t )
232 t[0] = 0;
233 if ( !strcmp( name, "frame_rate" ) )
234 params->time_base.den = atoi( value );
235 else if ( !strcmp( name, "frame_rate_base" ) )
236 params->time_base.num = atoi( value );
237 else if ( !strcmp( name, "sample_rate" ) )
238 params->sample_rate = atoi( value );
239 else if ( !strcmp( name, "channels" ) )
240 params->channels = atoi( value );
241 else if ( !strcmp( name, "width" ) )
242 params->width = atoi( value );
243 else if ( !strcmp( name, "height" ) )
244 params->height = atoi( value );
245 else if ( !strcmp( name, "standard" ) )
246 {
247 standard = strdup( value );
248 params->standard = standard;
249 }
250 else if ( !strcmp( name, "av" ) )
251 av = atoi( value );
252 }
253 free( name );
254 mrl = strchr( mrl, '&' );
255 }
256 }
257
258 // Now attempt to open the file
259 error = av_open_input_file( &context, file, format, 0, params ) < 0;
260
261 // Cleanup AVFormatParameters
262 free( standard );
263 free( params );
264
265 // If successful, then try to get additional info
266 if ( error == 0 )
267 {
268 // Get the stream info
269 error = av_find_stream_info( context ) < 0;
270
271 // Continue if no error
272 if ( error == 0 )
273 {
274 // We will default to the first audio and video streams found
275 int audio_index = -1;
276 int video_index = -1;
277 int av_bypass = 0;
278
279 // Now set properties where we can (use default unknowns if required)
280 if ( context->duration != AV_NOPTS_VALUE )
281 {
282 // This isn't going to be accurate for all formats
283 mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
284 mlt_properties_set_position( properties, "out", frames - 1 );
285 mlt_properties_set_position( properties, "length", frames );
286 }
287
288 // Find default audio and video streams
289 find_default_streams( context, &audio_index, &video_index );
290
291 if ( context->start_time != AV_NOPTS_VALUE )
292 mlt_properties_set_double( properties, "_start_time", context->start_time );
293
294 // Check if we're seekable (something funny about mpeg here :-/)
295 if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) )
296 {
297 mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
298 mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
299 av_open_input_file( &context, file, NULL, 0, NULL );
300 av_find_stream_info( context );
301 }
302 else
303 av_bypass = 1;
304
305 // Store selected audio and video indexes on properties
306 mlt_properties_set_int( properties, "audio_index", audio_index );
307 mlt_properties_set_int( properties, "video_index", video_index );
308 mlt_properties_set_int( properties, "_last_position", -1 );
309
310 // Fetch the width, height and aspect ratio
311 if ( video_index != -1 )
312 {
313 AVCodecContext *codec_context = context->streams[ video_index ]->codec;
314 mlt_properties_set_int( properties, "width", codec_context->width );
315 mlt_properties_set_int( properties, "height", codec_context->height );
316 mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
317 }
318
319 // Read Metadata
320 if (context->title != NULL)
321 mlt_properties_set(properties, "meta.attr.title.markup", context->title );
322 if (context->author != NULL)
323 mlt_properties_set(properties, "meta.attr.author.markup", context->author );
324 if (context->copyright != NULL)
325 mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
326 if (context->comment != NULL)
327 mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
328 if (context->album != NULL)
329 mlt_properties_set(properties, "meta.attr.album.markup", context->album );
330 if (context->year != 0)
331 mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
332 if (context->track != 0)
333 mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
334
335 // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
336 if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 )
337 {
338 // We'll use the open one as our video_context
339 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
340
341 // And open again for our audio context
342 av_open_input_file( &context, file, NULL, 0, NULL );
343 av_find_stream_info( context );
344
345 // Audio context
346 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
347 }
348 else if ( av != 2 && video_index != -1 )
349 {
350 // We only have a video context
351 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
352 }
353 else if ( audio_index != -1 )
354 {
355 // We only have an audio context
356 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
357 }
358 else
359 {
360 // Something has gone wrong
361 error = -1;
362 }
363
364 mlt_properties_set_int( properties, "av_bypass", av_bypass );
365 }
366 }
367
368 // Unlock the mutex now
369 avformat_unlock( );
370
371 return error;
372 }
373
374 /** Convert a frame position to a time code.
375 */
376
377 static double producer_time_of_frame( mlt_producer this, mlt_position position )
378 {
379 return ( double )position / mlt_producer_get_fps( this );
380 }
381
382 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
383 {
384 #ifdef SWSCALE
385 if ( format == mlt_image_yuv420p )
386 {
387 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
388 width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
389 AVPicture output;
390 output.data[0] = buffer;
391 output.data[1] = buffer + width * height;
392 output.data[2] = buffer + ( 3 * width * height ) / 2;
393 output.linesize[0] = width;
394 output.linesize[1] = width >> 1;
395 output.linesize[2] = width >> 1;
396 sws_scale( context, frame->data, frame->linesize, 0, height,
397 output.data, output.linesize);
398 sws_freeContext( context );
399 }
400 else if ( format == mlt_image_rgb24 )
401 {
402 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
403 width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
404 AVPicture output;
405 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
406 sws_scale( context, frame->data, frame->linesize, 0, height,
407 output.data, output.linesize);
408 sws_freeContext( context );
409 }
410 else
411 {
412 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
413 width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
414 AVPicture output;
415 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
416 sws_scale( context, frame->data, frame->linesize, 0, height,
417 output.data, output.linesize);
418 sws_freeContext( context );
419 }
420 #else
421 if ( format == mlt_image_yuv420p )
422 {
423 AVPicture pict;
424 pict.data[0] = buffer;
425 pict.data[1] = buffer + width * height;
426 pict.data[2] = buffer + ( 3 * width * height ) / 2;
427 pict.linesize[0] = width;
428 pict.linesize[1] = width >> 1;
429 pict.linesize[2] = width >> 1;
430 img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
431 }
432 else if ( format == mlt_image_rgb24 )
433 {
434 AVPicture output;
435 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
436 img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
437 }
438 else
439 {
440 AVPicture output;
441 avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height );
442 img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height );
443 }
444 #endif
445 }
446
447 /** Get an image from a frame.
448 */
449
450 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
451 {
452 // Get the properties from the frame
453 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
454
455 // Obtain the frame number of this frame
456 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
457
458 // Get the producer
459 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
460
461 // Get the producer properties
462 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
463
464 // Fetch the video_context
465 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
466
467 // Get the video_index
468 int index = mlt_properties_get_int( properties, "video_index" );
469
470 // Obtain the expected frame numer
471 mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
472
473 // Get the video stream
474 AVStream *stream = context->streams[ index ];
475
476 // Get codec context
477 AVCodecContext *codec_context = stream->codec;
478
479 // Packet
480 AVPacket pkt;
481
482 // Get the conversion frame
483 AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL );
484
485 // Special case pause handling flag
486 int paused = 0;
487
488 // Special case ffwd handling
489 int ignore = 0;
490
491 // We may want to use the source fps if available
492 double source_fps = mlt_properties_get_double( properties, "source_fps" );
493 double fps = mlt_producer_get_fps( this );
494
495 // This is the physical frame position in the source
496 int req_position = ( int )( position / fps * source_fps + 0.5 );
497
498 // Get the seekable status
499 int seekable = mlt_properties_get_int( properties, "seekable" );
500
501 // Generate the size in bytes
502 int size = 0;
503
504 // Hopefully provide better support for streams...
505 int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
506
507 // Determines if we have to decode all frames in a sequence
508 int must_decode = 1;
509
510 // Set the result arguments that we know here (only *buffer is now required)
511 *width = codec_context->width;
512 *height = codec_context->height;
513
514 switch ( *format )
515 {
516 case mlt_image_yuv420p:
517 size = *width * 3 * ( *height + 1 ) / 2;
518 break;
519 case mlt_image_rgb24:
520 size = *width * ( *height + 1 ) * 3;
521 break;
522 default:
523 *format = mlt_image_yuv422;
524 size = *width * ( *height + 1 ) * 2;
525 break;
526 }
527
528 // Set this on the frame properties
529 mlt_properties_set_int( frame_properties, "width", *width );
530 mlt_properties_set_int( frame_properties, "height", *height );
531
532 // Construct the output image
533 *buffer = mlt_pool_alloc( size );
534
535 // Temporary hack to improve intra frame only
536 must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
537 strcmp( codec_context->codec->name, "rawvideo" ) &&
538 strcmp( codec_context->codec->name, "dvvideo" );
539
540 // Seek if necessary
541 if ( position != expected )
542 {
543 if ( av_frame != NULL && position + 1 == expected )
544 {
545 // We're paused - use last image
546 paused = 1;
547 }
548 else if ( !seekable && position > expected && ( position - expected ) < 250 )
549 {
550 // Fast forward - seeking is inefficient for small distances - just ignore following frames
551 ignore = ( int )( ( position - expected ) / fps * source_fps );
552 }
553 else if ( seekable && ( position < expected || position - expected >= 12 ) )
554 {
555 // Calculate the timestamp for the requested frame
556 int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
557 if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
558 timestamp += context->start_time;
559 if ( must_decode )
560 timestamp -= AV_TIME_BASE;
561 if ( timestamp < 0 )
562 timestamp = 0;
563
564 // Set to the timestamp
565 av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
566
567 // Remove the cached info relating to the previous position
568 mlt_properties_set_int( properties, "_current_position", -1 );
569 mlt_properties_set_int( properties, "_last_position", -1 );
570 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
571 av_frame = NULL;
572 }
573 }
574
575 // Duplicate the last image if necessary (see comment on rawvideo below)
576 int current_position = mlt_properties_get_int( properties, "_current_position" );
577 int got_picture = mlt_properties_get_int( properties, "_got_picture" );
578 if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
579 {
580 // Duplicate it
581 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
582
583 // Set this on the frame properties
584 mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL );
585 }
586 else
587 {
588 int ret = 0;
589 int int_position = 0;
590 got_picture = 0;
591
592 av_init_packet( &pkt );
593
594 // Construct an AVFrame for YUV422 conversion
595 if ( av_frame == NULL )
596 {
597 av_frame = avcodec_alloc_frame( );
598 mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
599 }
600
601 while( ret >= 0 && !got_picture )
602 {
603 // Read a packet
604 ret = av_read_frame( context, &pkt );
605
606 // We only deal with video from the selected video_index
607 if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
608 {
609 // Determine time code of the packet
610 int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
611 if ( context->start_time != AV_NOPTS_VALUE )
612 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
613 int last_position = mlt_properties_get_int( properties, "_last_position" );
614 if ( int_position == last_position )
615 int_position = last_position + 1;
616 mlt_properties_set_int( properties, "_last_position", int_position );
617
618 // Decode the image
619 if ( must_decode || int_position >= req_position )
620 ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
621
622 if ( got_picture )
623 {
624 // Handle ignore
625 if ( int_position < req_position )
626 {
627 ignore = 0;
628 got_picture = 0;
629 }
630 else if ( int_position >= req_position )
631 {
632 ignore = 0;
633 }
634 else if ( ignore -- )
635 {
636 got_picture = 0;
637 }
638 }
639 av_free_packet( &pkt );
640 }
641 else if ( ret >= 0 )
642 {
643 av_free_packet( &pkt );
644 }
645
646 // Now handle the picture if we have one
647 if ( got_picture )
648 {
649 mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
650 mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
651 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
652 mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
653 mlt_properties_set_int( properties, "_current_position", int_position );
654 mlt_properties_set_int( properties, "_got_picture", 1 );
655 }
656 }
657 if ( !got_picture )
658 mlt_frame_get_image( frame, buffer, format, width, height, writable );
659 }
660
661 // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
662 // above will break the pause behaviour - so we wipe the frame now
663 if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
664 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
665
666 // Set the field order property for this frame
667 mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
668
669 // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
670 mlt_properties_set_position( properties, "_video_expected", position + 1 );
671
672 return 0;
673 }
674
675 /** Set up video handling.
676 */
677
678 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
679 {
680 // Get the properties
681 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
682
683 // Fetch the video_context
684 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
685
686 // Get the video_index
687 int index = mlt_properties_get_int( properties, "video_index" );
688
689 // Reopen the file if necessary
690 if ( !context && index != -1 )
691 {
692 mlt_events_block( properties, this );
693 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
694 mlt_properties_get( properties, "resource" ) );
695 context = mlt_properties_get_data( properties, "video_context", NULL );
696 index = mlt_properties_get_int( properties, "video_index" );
697 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
698 mlt_events_unblock( properties, this );
699 }
700
701 // Get the frame properties
702 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
703
704 if ( context != NULL && index != -1 )
705 {
706 // Get the video stream
707 AVStream *stream = context->streams[ index ];
708
709 // Get codec context
710 AVCodecContext *codec_context = stream->codec;
711
712 // Get the codec
713 AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
714
715 // Initialise the codec if necessary
716 if ( codec == NULL )
717 {
718 // Initialise multi-threading
719 int thread_count = mlt_properties_get_int( properties, "threads" );
720 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
721 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
722 if ( thread_count > 1 )
723 {
724 avcodec_thread_init( codec_context, thread_count );
725 codec_context->thread_count = thread_count;
726 }
727
728 // Find the codec
729 codec = avcodec_find_decoder( codec_context->codec_id );
730
731 // If we don't have a codec and we can't initialise it, we can't do much more...
732 avformat_lock( );
733 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
734 {
735 // Now store the codec with its destructor
736 mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL );
737 }
738 else
739 {
740 // Remember that we can't use this later
741 mlt_properties_set_int( properties, "video_index", -1 );
742 }
743 avformat_unlock( );
744 }
745
746 // No codec, no show...
747 if ( codec != NULL )
748 {
749 double source_fps = 0;
750 int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" );
751 double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
752 double aspect_ratio;
753
754 if ( strcmp( codec_context->codec->name, "dvvideo" ) == 0 )
755 {
756 // Override FFmpeg's notion of DV aspect ratios, which are
757 // based upon a width of 704. Since we do not have a normaliser
758 // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
759 // we just coerce the values to facilitate a passive behaviour through
760 // the rescale normaliser when using equivalent producers and consumers.
761 // = display_aspect / (width * height)
762 if ( codec_context->sample_aspect_ratio.num == 10 &&
763 codec_context->sample_aspect_ratio.den == 11 )
764 force_aspect_ratio = 8.0/9.0; // 4:3 NTSC
765 else if ( codec_context->sample_aspect_ratio.num == 59 &&
766 codec_context->sample_aspect_ratio.den == 54 )
767 force_aspect_ratio = 16.0/15.0; // 4:3 PAL
768 else if ( codec_context->sample_aspect_ratio.num == 40 &&
769 codec_context->sample_aspect_ratio.den == 33 )
770 force_aspect_ratio = 32.0/27.0; // 16:9 NTSC
771 else if ( codec_context->sample_aspect_ratio.num == 118 &&
772 codec_context->sample_aspect_ratio.den == 81 )
773 force_aspect_ratio = 64.0/45.0; // 16:9 PAL
774 }
775
776 // XXX: We won't know the real aspect ratio until an image is decoded
777 // but we do need it now (to satisfy filter_resize) - take a guess based
778 // on pal/ntsc
779 if ( force_aspect_ratio > 0.0 )
780 {
781 aspect_ratio = force_aspect_ratio;
782 }
783 else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 )
784 {
785 aspect_ratio = av_q2d( codec_context->sample_aspect_ratio );
786 }
787 else
788 {
789 aspect_ratio = 1.0;
790 }
791
792 // Determine the fps
793 source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
794
795 // We'll use fps if it's available
796 if ( source_fps > 0 )
797 mlt_properties_set_double( properties, "source_fps", source_fps );
798 else
799 mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) );
800 mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
801
802 // Set the width and height
803 mlt_properties_set_int( frame_properties, "width", codec_context->width );
804 mlt_properties_set_int( frame_properties, "height", codec_context->height );
805 mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
806
807 mlt_frame_push_get_image( frame, producer_get_image );
808 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
809 }
810 else
811 {
812 mlt_properties_set_int( frame_properties, "test_image", 1 );
813 }
814 }
815 else
816 {
817 mlt_properties_set_int( frame_properties, "test_image", 1 );
818 }
819 }
820
821 /** Get the audio from a frame.
822 */
823
824 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
825 {
826 // Get the properties from the frame
827 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
828
829 // Obtain the frame number of this frame
830 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
831
832 // Get the producer
833 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
834
835 // Get the producer properties
836 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
837
838 // Fetch the audio_context
839 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
840
841 // Get the audio_index
842 int index = mlt_properties_get_int( properties, "audio_index" );
843
844 // Get the seekable status
845 int seekable = mlt_properties_get_int( properties, "seekable" );
846
847 // Obtain the expected frame numer
848 mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
849
850 // Obtain the resample context if it exists (not always needed)
851 ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
852
853 // Obtain the audio buffer
854 int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
855
856 // Get amount of audio used
857 int audio_used = mlt_properties_get_int( properties, "_audio_used" );
858
859 // Calculate the real time code
860 double real_timecode = producer_time_of_frame( this, position );
861
862 // Get the audio stream
863 AVStream *stream = context->streams[ index ];
864
865 // Get codec context
866 AVCodecContext *codec_context = stream->codec;
867
868 // Packet
869 AVPacket pkt;
870
871 // Number of frames to ignore (for ffwd)
872 int ignore = 0;
873
874 // Flag for paused (silence)
875 int paused = 0;
876
877 // Check for resample and create if necessary
878 if ( resample == NULL && codec_context->channels <= 2 )
879 {
880 // Create the resampler
881 resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate );
882
883 // And store it on properties
884 mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );
885 }
886 else if ( resample == NULL )
887 {
888 *channels = codec_context->channels;
889 *frequency = codec_context->sample_rate;
890 }
891
892 // Check for audio buffer and create if necessary
893 if ( audio_buffer == NULL )
894 {
895 // Allocate the audio buffer
896 audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
897
898 // And store it on properties for reuse
899 mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
900 }
901
902 // Seek if necessary
903 if ( position != expected )
904 {
905 if ( position + 1 == expected )
906 {
907 // We're paused - silence required
908 paused = 1;
909 }
910 else if ( !seekable && position > expected && ( position - expected ) < 250 )
911 {
912 // Fast forward - seeking is inefficient for small distances - just ignore following frames
913 ignore = position - expected;
914 }
915 else if ( position < expected || position - expected >= 12 )
916 {
917 // Set to the real timecode
918 if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 )
919 paused = 1;
920
921 // Clear the usage in the audio buffer
922 audio_used = 0;
923 }
924 }
925
926 // Get the audio if required
927 if ( !paused )
928 {
929 int ret = 0;
930 int got_audio = 0;
931 int16_t *temp = av_malloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE );
932
933 av_init_packet( &pkt );
934
935 while( ret >= 0 && !got_audio )
936 {
937 // Check if the buffer already contains the samples required
938 if ( audio_used >= *samples && ignore == 0 )
939 {
940 got_audio = 1;
941 break;
942 }
943
944 // Read a packet
945 ret = av_read_frame( context, &pkt );
946
947 int len = pkt.size;
948 uint8_t *ptr = pkt.data;
949
950 // We only deal with audio from the selected audio_index
951 while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 )
952 {
953 int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
954
955 // Decode the audio
956 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
957 ret = avcodec_decode_audio2( codec_context, temp, &data_size, ptr, len );
958 #else
959 ret = avcodec_decode_audio( codec_context, temp, &data_size, ptr, len );
960 #endif
961 if ( ret < 0 )
962 {
963 ret = 0;
964 break;
965 }
966
967 len -= ret;
968 ptr += ret;
969
970 if ( data_size > 0 )
971 {
972 if ( resample != NULL )
973 {
974 audio_used += audio_resample( resample, &audio_buffer[ audio_used * *channels ], temp, data_size / ( codec_context->channels * sizeof( int16_t ) ) );
975 }
976 else
977 {
978 memcpy( &audio_buffer[ audio_used * *channels ], temp, data_size );
979 audio_used += data_size / ( codec_context->channels * sizeof( int16_t ) );
980 }
981
982 // Handle ignore
983 while ( ignore && audio_used > *samples )
984 {
985 ignore --;
986 audio_used -= *samples;
987 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * sizeof( int16_t ) );
988 }
989 }
990
991 // If we're behind, ignore this packet
992 float current_pts = av_q2d( stream->time_base ) * pkt.pts;
993 if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) )
994 ignore = 1;
995 }
996
997 // We're finished with this packet regardless
998 av_free_packet( &pkt );
999 }
1000
1001 *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) );
1002 mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1003
1004 // Now handle the audio if we have enough
1005 if ( audio_used >= *samples )
1006 {
1007 memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) );
1008 audio_used -= *samples;
1009 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) );
1010 }
1011 else
1012 {
1013 memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) );
1014 }
1015
1016 // Store the number of audio samples still available
1017 mlt_properties_set_int( properties, "_audio_used", audio_used );
1018
1019 // Release the temporary audio
1020 av_free( temp );
1021 }
1022 else
1023 {
1024 // Get silence and don't touch the context
1025 mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
1026 }
1027
1028 // Regardless of speed (other than paused), we expect to get the next frame
1029 if ( !paused )
1030 mlt_properties_set_position( properties, "_audio_expected", position + 1 );
1031
1032 return 0;
1033 }
1034
1035 /** Set up audio handling.
1036 */
1037
1038 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
1039 {
1040 // Get the properties
1041 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1042
1043 // Fetch the audio_context
1044 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1045
1046 // Get the audio_index
1047 int index = mlt_properties_get_int( properties, "audio_index" );
1048
1049 // Reopen the file if necessary
1050 if ( !context && index != -1 )
1051 {
1052 mlt_events_block( properties, this );
1053 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
1054 mlt_properties_get( properties, "resource" ) );
1055 context = mlt_properties_get_data( properties, "audio_context", NULL );
1056 index = mlt_properties_get_int( properties, "audio_index" );
1057 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
1058 mlt_events_unblock( properties, this );
1059 }
1060
1061 // Deal with audio context
1062 if ( context != NULL && index != -1 )
1063 {
1064 // Get the frame properties
1065 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1066
1067 // Get the audio stream
1068 AVStream *stream = context->streams[ index ];
1069
1070 // Get codec context
1071 AVCodecContext *codec_context = stream->codec;
1072
1073 // Get the codec
1074 AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
1075
1076 // Initialise the codec if necessary
1077 if ( codec == NULL )
1078 {
1079 // Find the codec
1080 codec = avcodec_find_decoder( codec_context->codec_id );
1081
1082 // If we don't have a codec and we can't initialise it, we can't do much more...
1083 avformat_lock( );
1084 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1085 {
1086 // Now store the codec with its destructor
1087 mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL );
1088
1089 }
1090 else
1091 {
1092 // Remember that we can't use this later
1093 mlt_properties_set_int( properties, "audio_index", -1 );
1094 }
1095 avformat_unlock( );
1096 }
1097
1098 // No codec, no show...
1099 if ( codec != NULL )
1100 {
1101 mlt_frame_push_audio( frame, producer_get_audio );
1102 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1103 mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
1104 mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
1105 }
1106 }
1107 }
1108
1109 /** Our get frame implementation.
1110 */
1111
1112 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index )
1113 {
1114 // Create an empty frame
1115 *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) );
1116
1117 // Update timecode on the frame we're creating
1118 mlt_frame_set_position( *frame, mlt_producer_position( this ) );
1119
1120 // Set the position of this producer
1121 mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
1122
1123 // Set up the video
1124 producer_set_up_video( this, *frame );
1125
1126 // Set up the audio
1127 producer_set_up_audio( this, *frame );
1128
1129 // Set the aspect_ratio
1130 mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
1131
1132 // Calculate the next timecode
1133 mlt_producer_prepare_next( this );
1134
1135 return 0;
1136 }