producer_avformat.c: remove multi-threaded audio decoding option. It does not provide...
[melted] / src / modules / avformat / producer_avformat.c
1 /*
2 * producer_avformat.c -- avformat producer
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // MLT Header files
23 #include <framework/mlt_producer.h>
24 #include <framework/mlt_frame.h>
25
26 // ffmpeg Header files
27 #include <avformat.h>
28 #ifdef SWSCALE
29 #include <swscale.h>
30 #endif
31
32 // System header files
33 #include <stdlib.h>
34 #include <string.h>
35 #include <pthread.h>
36 #include <math.h>
37
38 void avformat_lock( );
39 void avformat_unlock( );
40
41 // Forward references.
42 static int producer_open( mlt_producer this, mlt_profile profile, char *file );
43 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
44
45 /** Constructor for libavformat.
46 */
47
48 mlt_producer producer_avformat_init( mlt_profile profile, char *file )
49 {
50 mlt_producer this = NULL;
51
52 // Check that we have a non-NULL argument
53 if ( file != NULL )
54 {
55 // Construct the producer
56 this = calloc( 1, sizeof( struct mlt_producer_s ) );
57
58 // Initialise it
59 if ( mlt_producer_init( this, NULL ) == 0 )
60 {
61 // Get the properties
62 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
63
64 // Set the resource property (required for all producers)
65 mlt_properties_set( properties, "resource", file );
66
67 // Register our get_frame implementation
68 this->get_frame = producer_get_frame;
69
70 // Open the file
71 if ( producer_open( this, profile, file ) != 0 )
72 {
73 // Clean up
74 mlt_producer_close( this );
75 this = NULL;
76 }
77 }
78 }
79
80 return this;
81 }
82
83 /** Find the default streams.
84 */
85
86 static void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index )
87 {
88 int i;
89
90 // Allow for multiple audio and video streams in the file and select first of each (if available)
91 for( i = 0; i < context->nb_streams; i++ )
92 {
93 // Get the codec context
94 AVCodecContext *codec_context = context->streams[ i ]->codec;
95
96 if ( avcodec_find_decoder( codec_context->codec_id ) == NULL )
97 continue;
98
99 // Determine the type and obtain the first index of each type
100 switch( codec_context->codec_type )
101 {
102 case CODEC_TYPE_VIDEO:
103 if ( *video_index < 0 )
104 *video_index = i;
105 break;
106 case CODEC_TYPE_AUDIO:
107 if ( *audio_index < 0 )
108 *audio_index = i;
109 break;
110 default:
111 break;
112 }
113 }
114 }
115
116 /** Producer file destructor.
117 */
118
119 static void producer_file_close( void *context )
120 {
121 if ( context != NULL )
122 {
123 // Lock the mutex now
124 avformat_lock( );
125
126 // Close the file
127 av_close_input_file( context );
128
129 // Unlock the mutex now
130 avformat_unlock( );
131 }
132 }
133
134 /** Producer file destructor.
135 */
136
137 static void producer_codec_close( void *codec )
138 {
139 if ( codec != NULL )
140 {
141 // Lock the mutex now
142 avformat_lock( );
143
144 // Close the file
145 avcodec_close( codec );
146
147 // Unlock the mutex now
148 avformat_unlock( );
149 }
150 }
151
152 /** Open the file.
153 */
154
155 static int producer_open( mlt_producer this, mlt_profile profile, char *file )
156 {
157 // Return an error code (0 == no error)
158 int error = 0;
159
160 // Context for avformat
161 AVFormatContext *context = NULL;
162
163 // Get the properties
164 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
165
166 // We will treat everything with the producer fps
167 double fps = mlt_profile_fps( profile );
168
169 // Lock the mutex now
170 avformat_lock( );
171
172 // If "MRL", then create AVInputFormat
173 AVInputFormat *format = NULL;
174 AVFormatParameters *params = NULL;
175 char *standard = NULL;
176 char *mrl = strchr( file, ':' );
177
178 // AV option (0 = both, 1 = video, 2 = audio)
179 int av = 0;
180
181 // Setting lowest log level
182 av_log_set_level( -1 );
183
184 // Only if there is not a protocol specification that avformat can handle
185 if ( mrl && !url_exist( file ) )
186 {
187 // 'file' becomes format abbreviation
188 mrl[0] = 0;
189
190 // Lookup the format
191 format = av_find_input_format( file );
192
193 // Eat the format designator
194 file = ++mrl;
195
196 if ( format )
197 {
198 // Allocate params
199 params = calloc( sizeof( AVFormatParameters ), 1 );
200
201 // These are required by video4linux (defaults)
202 params->width = 640;
203 params->height = 480;
204 params->time_base= (AVRational){1,25};
205 // params->device = file;
206 params->channels = 2;
207 params->sample_rate = 48000;
208 }
209
210 // XXX: this does not work anymore since avdevice
211 // TODO: make producer_avddevice?
212 // Parse out params
213 mrl = strchr( file, '?' );
214 while ( mrl )
215 {
216 mrl[0] = 0;
217 char *name = strdup( ++mrl );
218 char *value = strchr( name, ':' );
219 if ( value )
220 {
221 value[0] = 0;
222 value++;
223 char *t = strchr( value, '&' );
224 if ( t )
225 t[0] = 0;
226 if ( !strcmp( name, "frame_rate" ) )
227 params->time_base.den = atoi( value );
228 else if ( !strcmp( name, "frame_rate_base" ) )
229 params->time_base.num = atoi( value );
230 else if ( !strcmp( name, "sample_rate" ) )
231 params->sample_rate = atoi( value );
232 else if ( !strcmp( name, "channels" ) )
233 params->channels = atoi( value );
234 else if ( !strcmp( name, "width" ) )
235 params->width = atoi( value );
236 else if ( !strcmp( name, "height" ) )
237 params->height = atoi( value );
238 else if ( !strcmp( name, "standard" ) )
239 {
240 standard = strdup( value );
241 params->standard = standard;
242 }
243 else if ( !strcmp( name, "av" ) )
244 av = atoi( value );
245 }
246 free( name );
247 mrl = strchr( mrl, '&' );
248 }
249 }
250
251 // Now attempt to open the file
252 error = av_open_input_file( &context, file, format, 0, params ) < 0;
253
254 // Cleanup AVFormatParameters
255 free( standard );
256 free( params );
257
258 // If successful, then try to get additional info
259 if ( error == 0 )
260 {
261 // Get the stream info
262 error = av_find_stream_info( context ) < 0;
263
264 // Continue if no error
265 if ( error == 0 )
266 {
267 // We will default to the first audio and video streams found
268 int audio_index = -1;
269 int video_index = -1;
270 int av_bypass = 0;
271
272 // Now set properties where we can (use default unknowns if required)
273 if ( context->duration != AV_NOPTS_VALUE )
274 {
275 // This isn't going to be accurate for all formats
276 mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
277 mlt_properties_set_position( properties, "out", frames - 1 );
278 mlt_properties_set_position( properties, "length", frames );
279 }
280
281 // Find default audio and video streams
282 find_default_streams( context, &audio_index, &video_index );
283
284 if ( context->start_time != AV_NOPTS_VALUE )
285 mlt_properties_set_double( properties, "_start_time", context->start_time );
286
287 // Check if we're seekable (something funny about mpeg here :-/)
288 if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) )
289 {
290 mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
291 mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
292 av_open_input_file( &context, file, NULL, 0, NULL );
293 av_find_stream_info( context );
294 }
295 else
296 av_bypass = 1;
297
298 // Store selected audio and video indexes on properties
299 mlt_properties_set_int( properties, "audio_index", audio_index );
300 mlt_properties_set_int( properties, "video_index", video_index );
301 mlt_properties_set_int( properties, "_last_position", -1 );
302
303 // Fetch the width, height and aspect ratio
304 if ( video_index != -1 )
305 {
306 AVCodecContext *codec_context = context->streams[ video_index ]->codec;
307 mlt_properties_set_int( properties, "width", codec_context->width );
308 mlt_properties_set_int( properties, "height", codec_context->height );
309 mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) );
310 }
311
312 // Read Metadata
313 if (context->title != NULL)
314 mlt_properties_set(properties, "meta.attr.title.markup", context->title );
315 if (context->author != NULL)
316 mlt_properties_set(properties, "meta.attr.author.markup", context->author );
317 if (context->copyright != NULL)
318 mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
319 if (context->comment != NULL)
320 mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
321 if (context->album != NULL)
322 mlt_properties_set(properties, "meta.attr.album.markup", context->album );
323 if (context->year != 0)
324 mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
325 if (context->track != 0)
326 mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
327
328 // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
329 if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 )
330 {
331 // We'll use the open one as our video_context
332 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
333
334 // And open again for our audio context
335 av_open_input_file( &context, file, NULL, 0, NULL );
336 av_find_stream_info( context );
337
338 // Audio context
339 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
340 }
341 else if ( av != 2 && video_index != -1 )
342 {
343 // We only have a video context
344 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
345 }
346 else if ( audio_index != -1 )
347 {
348 // We only have an audio context
349 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
350 }
351 else
352 {
353 // Something has gone wrong
354 error = -1;
355 }
356
357 mlt_properties_set_int( properties, "av_bypass", av_bypass );
358 }
359 }
360
361 // Unlock the mutex now
362 avformat_unlock( );
363
364 return error;
365 }
366
367 /** Convert a frame position to a time code.
368 */
369
370 static double producer_time_of_frame( mlt_producer this, mlt_position position )
371 {
372 return ( double )position / mlt_producer_get_fps( this );
373 }
374
375 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
376 {
377 #ifdef SWSCALE
378 if ( format == mlt_image_yuv420p )
379 {
380 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
381 width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
382 AVPicture output;
383 output.data[0] = buffer;
384 output.data[1] = buffer + width * height;
385 output.data[2] = buffer + ( 3 * width * height ) / 2;
386 output.linesize[0] = width;
387 output.linesize[1] = width >> 1;
388 output.linesize[2] = width >> 1;
389 sws_scale( context, frame->data, frame->linesize, 0, height,
390 output.data, output.linesize);
391 sws_freeContext( context );
392 }
393 else if ( format == mlt_image_rgb24 )
394 {
395 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
396 width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
397 AVPicture output;
398 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
399 sws_scale( context, frame->data, frame->linesize, 0, height,
400 output.data, output.linesize);
401 sws_freeContext( context );
402 }
403 else
404 {
405 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
406 width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
407 AVPicture output;
408 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
409 sws_scale( context, frame->data, frame->linesize, 0, height,
410 output.data, output.linesize);
411 sws_freeContext( context );
412 }
413 #else
414 if ( format == mlt_image_yuv420p )
415 {
416 AVPicture pict;
417 pict.data[0] = buffer;
418 pict.data[1] = buffer + width * height;
419 pict.data[2] = buffer + ( 3 * width * height ) / 2;
420 pict.linesize[0] = width;
421 pict.linesize[1] = width >> 1;
422 pict.linesize[2] = width >> 1;
423 img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
424 }
425 else if ( format == mlt_image_rgb24 )
426 {
427 AVPicture output;
428 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
429 img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
430 }
431 else
432 {
433 AVPicture output;
434 avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height );
435 img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height );
436 }
437 #endif
438 }
439
440 /** Get an image from a frame.
441 */
442
443 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
444 {
445 // Get the properties from the frame
446 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
447
448 // Obtain the frame number of this frame
449 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
450
451 // Get the producer
452 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
453
454 // Get the producer properties
455 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
456
457 // Fetch the video_context
458 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
459
460 // Get the video_index
461 int index = mlt_properties_get_int( properties, "video_index" );
462
463 // Obtain the expected frame numer
464 mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
465
466 // Get the video stream
467 AVStream *stream = context->streams[ index ];
468
469 // Get codec context
470 AVCodecContext *codec_context = stream->codec;
471
472 // Packet
473 AVPacket pkt;
474
475 // Get the conversion frame
476 AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL );
477
478 // Special case pause handling flag
479 int paused = 0;
480
481 // Special case ffwd handling
482 int ignore = 0;
483
484 // We may want to use the source fps if available
485 double source_fps = mlt_properties_get_double( properties, "source_fps" );
486 double fps = mlt_producer_get_fps( this );
487
488 // This is the physical frame position in the source
489 int req_position = ( int )( position / fps * source_fps + 0.5 );
490
491 // Get the seekable status
492 int seekable = mlt_properties_get_int( properties, "seekable" );
493
494 // Generate the size in bytes
495 int size = 0;
496
497 // Hopefully provide better support for streams...
498 int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
499
500 // Determines if we have to decode all frames in a sequence
501 int must_decode = 1;
502
503 // Set the result arguments that we know here (only *buffer is now required)
504 *width = codec_context->width;
505 *height = codec_context->height;
506
507 switch ( *format )
508 {
509 case mlt_image_yuv420p:
510 size = *width * 3 * ( *height + 1 ) / 2;
511 break;
512 case mlt_image_rgb24:
513 size = *width * ( *height + 1 ) * 3;
514 break;
515 default:
516 *format = mlt_image_yuv422;
517 size = *width * ( *height + 1 ) * 2;
518 break;
519 }
520
521 // Set this on the frame properties
522 mlt_properties_set_int( frame_properties, "width", *width );
523 mlt_properties_set_int( frame_properties, "height", *height );
524
525 // Construct the output image
526 *buffer = mlt_pool_alloc( size );
527
528 // Temporary hack to improve intra frame only
529 must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
530 strcmp( codec_context->codec->name, "rawvideo" ) &&
531 strcmp( codec_context->codec->name, "dvvideo" );
532
533 // Seek if necessary
534 if ( position != expected )
535 {
536 if ( av_frame != NULL && position + 1 == expected )
537 {
538 // We're paused - use last image
539 paused = 1;
540 }
541 else if ( !seekable && position > expected && ( position - expected ) < 250 )
542 {
543 // Fast forward - seeking is inefficient for small distances - just ignore following frames
544 ignore = ( int )( ( position - expected ) / fps * source_fps );
545 }
546 else if ( seekable && ( position < expected || position - expected >= 12 ) )
547 {
548 // Calculate the timestamp for the requested frame
549 int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
550 if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
551 timestamp += context->start_time;
552 if ( must_decode )
553 timestamp -= AV_TIME_BASE;
554 if ( timestamp < 0 )
555 timestamp = 0;
556
557 // Set to the timestamp
558 av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
559
560 // Remove the cached info relating to the previous position
561 mlt_properties_set_int( properties, "_current_position", -1 );
562 mlt_properties_set_int( properties, "_last_position", -1 );
563 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
564 av_frame = NULL;
565 }
566 }
567
568 // Duplicate the last image if necessary (see comment on rawvideo below)
569 int current_position = mlt_properties_get_int( properties, "_current_position" );
570 int got_picture = mlt_properties_get_int( properties, "_got_picture" );
571 if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
572 {
573 // Duplicate it
574 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
575
576 // Set this on the frame properties
577 mlt_properties_set_data( frame_properties, "image", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL );
578 }
579 else
580 {
581 int ret = 0;
582 int int_position = 0;
583 got_picture = 0;
584
585 av_init_packet( &pkt );
586
587 // Construct an AVFrame for YUV422 conversion
588 if ( av_frame == NULL )
589 {
590 av_frame = avcodec_alloc_frame( );
591 mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
592 }
593
594 while( ret >= 0 && !got_picture )
595 {
596 // Read a packet
597 ret = av_read_frame( context, &pkt );
598
599 // We only deal with video from the selected video_index
600 if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
601 {
602 // Determine time code of the packet
603 int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
604 if ( context->start_time != AV_NOPTS_VALUE )
605 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
606 int last_position = mlt_properties_get_int( properties, "_last_position" );
607 if ( int_position == last_position )
608 int_position = last_position + 1;
609 mlt_properties_set_int( properties, "_last_position", int_position );
610
611 // Decode the image
612 if ( must_decode || int_position >= req_position )
613 ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
614
615 if ( got_picture )
616 {
617 // Handle ignore
618 if ( int_position < req_position )
619 {
620 ignore = 0;
621 got_picture = 0;
622 }
623 else if ( int_position >= req_position )
624 {
625 ignore = 0;
626 }
627 else if ( ignore -- )
628 {
629 got_picture = 0;
630 }
631 }
632 av_free_packet( &pkt );
633 }
634 else if ( ret >= 0 )
635 {
636 av_free_packet( &pkt );
637 }
638
639 // Now handle the picture if we have one
640 if ( got_picture )
641 {
642 mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
643 mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
644 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
645 mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
646 mlt_properties_set_int( properties, "_current_position", int_position );
647 mlt_properties_set_int( properties, "_got_picture", 1 );
648 }
649 }
650 }
651
652 // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
653 // above will break the pause behaviour - so we wipe the frame now
654 if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
655 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
656
657 // Set the field order property for this frame
658 mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
659
660 // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
661 mlt_properties_set_position( properties, "_video_expected", position + 1 );
662
663 return 0;
664 }
665
666 /** Set up video handling.
667 */
668
669 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
670 {
671 // Get the properties
672 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
673
674 // Fetch the video_context
675 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
676
677 // Get the video_index
678 int index = mlt_properties_get_int( properties, "video_index" );
679
680 // Get the frame properties
681 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
682
683 if ( context != NULL && index != -1 )
684 {
685 // Get the video stream
686 AVStream *stream = context->streams[ index ];
687
688 // Get codec context
689 AVCodecContext *codec_context = stream->codec;
690
691 // Get the codec
692 AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
693
694 // Initialise the codec if necessary
695 if ( codec == NULL )
696 {
697 // Initialise multi-threading
698 int thread_count = mlt_properties_get_int( properties, "threads" );
699 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
700 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
701 if ( thread_count > 1 )
702 {
703 avcodec_thread_init( codec_context, thread_count );
704 codec_context->thread_count = thread_count;
705 }
706
707 // Find the codec
708 codec = avcodec_find_decoder( codec_context->codec_id );
709
710 // If we don't have a codec and we can't initialise it, we can't do much more...
711 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
712 {
713 // Now store the codec with its destructor
714 mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL );
715 }
716 else
717 {
718 // Remember that we can't use this later
719 mlt_properties_set_int( properties, "video_index", -1 );
720 }
721 }
722
723 // No codec, no show...
724 if ( codec != NULL )
725 {
726 double source_fps = 0;
727 int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" );
728 double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
729 double aspect_ratio;
730
731 if ( strcmp( codec_context->codec->name, "dvvideo" ) == 0 )
732 {
733 // Override FFmpeg's notion of DV aspect ratios, which are
734 // based upon a width of 704. Since we do not have a normaliser
735 // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
736 // we just coerce the values to facilitate a passive behaviour through
737 // the rescale normaliser when using equivalent producers and consumers.
738 // = display_aspect / (width * height)
739 if ( codec_context->sample_aspect_ratio.num == 10 &&
740 codec_context->sample_aspect_ratio.den == 11 )
741 force_aspect_ratio = 8.0/9.0; // 4:3 NTSC
742 else if ( codec_context->sample_aspect_ratio.num == 59 &&
743 codec_context->sample_aspect_ratio.den == 54 )
744 force_aspect_ratio = 16.0/15.0; // 4:3 PAL
745 else if ( codec_context->sample_aspect_ratio.num == 40 &&
746 codec_context->sample_aspect_ratio.den == 33 )
747 force_aspect_ratio = 32.0/27.0; // 16:9 NTSC
748 else if ( codec_context->sample_aspect_ratio.num == 118 &&
749 codec_context->sample_aspect_ratio.den == 81 )
750 force_aspect_ratio = 64.0/45.0; // 16:9 PAL
751 }
752
753 // XXX: We won't know the real aspect ratio until an image is decoded
754 // but we do need it now (to satisfy filter_resize) - take a guess based
755 // on pal/ntsc
756 if ( force_aspect_ratio > 0.0 )
757 {
758 aspect_ratio = force_aspect_ratio;
759 }
760 else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 )
761 {
762 aspect_ratio = av_q2d( codec_context->sample_aspect_ratio );
763 }
764 else
765 {
766 aspect_ratio = 1.0;
767 }
768
769 // Determine the fps
770 source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
771
772 // We'll use fps if it's available
773 if ( source_fps > 0 )
774 mlt_properties_set_double( properties, "source_fps", source_fps );
775 else
776 mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) );
777 mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
778
779 // Set the width and height
780 mlt_properties_set_int( frame_properties, "width", codec_context->width );
781 mlt_properties_set_int( frame_properties, "height", codec_context->height );
782 mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
783
784 mlt_frame_push_get_image( frame, producer_get_image );
785 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
786 }
787 else
788 {
789 mlt_properties_set_int( frame_properties, "test_image", 1 );
790 }
791 }
792 else
793 {
794 mlt_properties_set_int( frame_properties, "test_image", 1 );
795 }
796 }
797
798 /** Get the audio from a frame.
799 */
800
801 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
802 {
803 // Get the properties from the frame
804 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
805
806 // Obtain the frame number of this frame
807 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
808
809 // Get the producer
810 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
811
812 // Get the producer properties
813 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
814
815 // Fetch the audio_context
816 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
817
818 // Get the audio_index
819 int index = mlt_properties_get_int( properties, "audio_index" );
820
821 // Get the seekable status
822 int seekable = mlt_properties_get_int( properties, "seekable" );
823
824 // Obtain the expected frame numer
825 mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
826
827 // Obtain the resample context if it exists (not always needed)
828 ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
829
830 // Obtain the audio buffer
831 int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
832
833 // Get amount of audio used
834 int audio_used = mlt_properties_get_int( properties, "_audio_used" );
835
836 // Calculate the real time code
837 double real_timecode = producer_time_of_frame( this, position );
838
839 // Get the audio stream
840 AVStream *stream = context->streams[ index ];
841
842 // Get codec context
843 AVCodecContext *codec_context = stream->codec;
844
845 // Packet
846 AVPacket pkt;
847
848 // Number of frames to ignore (for ffwd)
849 int ignore = 0;
850
851 // Flag for paused (silence)
852 int paused = 0;
853
854 // Check for resample and create if necessary
855 if ( resample == NULL && codec_context->channels <= 2 )
856 {
857 // Create the resampler
858 resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate );
859
860 // And store it on properties
861 mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );
862 }
863 else if ( resample == NULL )
864 {
865 *channels = codec_context->channels;
866 *frequency = codec_context->sample_rate;
867 }
868
869 // Check for audio buffer and create if necessary
870 if ( audio_buffer == NULL )
871 {
872 // Allocate the audio buffer
873 audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
874
875 // And store it on properties for reuse
876 mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
877 }
878
879 // Seek if necessary
880 if ( position != expected )
881 {
882 if ( position + 1 == expected )
883 {
884 // We're paused - silence required
885 paused = 1;
886 }
887 else if ( !seekable && position > expected && ( position - expected ) < 250 )
888 {
889 // Fast forward - seeking is inefficient for small distances - just ignore following frames
890 ignore = position - expected;
891 }
892 else if ( position < expected || position - expected >= 12 )
893 {
894 // Set to the real timecode
895 if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 )
896 paused = 1;
897
898 // Clear the usage in the audio buffer
899 audio_used = 0;
900 }
901 }
902
903 // Get the audio if required
904 if ( !paused )
905 {
906 int ret = 0;
907 int got_audio = 0;
908 int16_t *temp = av_malloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE );
909
910 av_init_packet( &pkt );
911
912 while( ret >= 0 && !got_audio )
913 {
914 // Check if the buffer already contains the samples required
915 if ( audio_used >= *samples && ignore == 0 )
916 {
917 got_audio = 1;
918 break;
919 }
920
921 // Read a packet
922 ret = av_read_frame( context, &pkt );
923
924 int len = pkt.size;
925 uint8_t *ptr = pkt.data;
926
927 // We only deal with audio from the selected audio_index
928 while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 )
929 {
930 int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
931
932 // Decode the audio
933 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
934 ret = avcodec_decode_audio2( codec_context, temp, &data_size, ptr, len );
935 #else
936 ret = avcodec_decode_audio( codec_context, temp, &data_size, ptr, len );
937 #endif
938 if ( ret < 0 )
939 {
940 ret = 0;
941 break;
942 }
943
944 len -= ret;
945 ptr += ret;
946
947 if ( data_size > 0 )
948 {
949 if ( resample != NULL )
950 {
951 audio_used += audio_resample( resample, &audio_buffer[ audio_used * *channels ], temp, data_size / ( codec_context->channels * sizeof( int16_t ) ) );
952 }
953 else
954 {
955 memcpy( &audio_buffer[ audio_used * *channels ], temp, data_size );
956 audio_used += data_size / ( codec_context->channels * sizeof( int16_t ) );
957 }
958
959 // Handle ignore
960 while ( ignore && audio_used > *samples )
961 {
962 ignore --;
963 audio_used -= *samples;
964 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * sizeof( int16_t ) );
965 }
966 }
967
968 // If we're behind, ignore this packet
969 float current_pts = av_q2d( stream->time_base ) * pkt.pts;
970 if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) )
971 ignore = 1;
972 }
973
974 // We're finished with this packet regardless
975 av_free_packet( &pkt );
976 }
977
978 *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) );
979 mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
980
981 // Now handle the audio if we have enough
982 if ( audio_used >= *samples )
983 {
984 memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) );
985 audio_used -= *samples;
986 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) );
987 }
988 else
989 {
990 memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) );
991 }
992
993 // Store the number of audio samples still available
994 mlt_properties_set_int( properties, "_audio_used", audio_used );
995
996 // Release the temporary audio
997 av_free( temp );
998 }
999 else
1000 {
1001 // Get silence and don't touch the context
1002 mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
1003 }
1004
1005 // Regardless of speed (other than paused), we expect to get the next frame
1006 if ( !paused )
1007 mlt_properties_set_position( properties, "_audio_expected", position + 1 );
1008
1009 return 0;
1010 }
1011
1012 /** Set up audio handling.
1013 */
1014
1015 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
1016 {
1017 // Get the properties
1018 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1019
1020 // Fetch the audio_context
1021 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1022
1023 // Get the audio_index
1024 int index = mlt_properties_get_int( properties, "audio_index" );
1025
1026 // Deal with audio context
1027 if ( context != NULL && index != -1 )
1028 {
1029 // Get the frame properties
1030 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1031
1032 // Get the audio stream
1033 AVStream *stream = context->streams[ index ];
1034
1035 // Get codec context
1036 AVCodecContext *codec_context = stream->codec;
1037
1038 // Get the codec
1039 AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
1040
1041 // Initialise the codec if necessary
1042 if ( codec == NULL )
1043 {
1044 // Find the codec
1045 codec = avcodec_find_decoder( codec_context->codec_id );
1046
1047 // If we don't have a codec and we can't initialise it, we can't do much more...
1048 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1049 {
1050 // Now store the codec with its destructor
1051 mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL );
1052
1053 }
1054 else
1055 {
1056 // Remember that we can't use this later
1057 mlt_properties_set_int( properties, "audio_index", -1 );
1058 }
1059 }
1060
1061 // No codec, no show...
1062 if ( codec != NULL )
1063 {
1064 mlt_frame_push_audio( frame, producer_get_audio );
1065 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1066 mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
1067 mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
1068 }
1069 }
1070 }
1071
1072 /** Our get frame implementation.
1073 */
1074
1075 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index )
1076 {
1077 // Create an empty frame
1078 *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) );
1079
1080 // Update timecode on the frame we're creating
1081 mlt_frame_set_position( *frame, mlt_producer_position( this ) );
1082
1083 // Set the position of this producer
1084 mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
1085
1086 // Set up the video
1087 producer_set_up_video( this, *frame );
1088
1089 // Set up the audio
1090 producer_set_up_audio( this, *frame );
1091
1092 // Set the aspect_ratio
1093 mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
1094
1095 // Calculate the next timecode
1096 mlt_producer_prepare_next( this );
1097
1098 return 0;
1099 }