3763bdb0ff7c08aa6f568fd0ed42bfbb7953a21d
[melted] / src / modules / avformat / producer_avformat.c
1 /*
2 * producer_avformat.c -- avformat producer
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // MLT Header files
23 #include <framework/mlt_producer.h>
24 #include <framework/mlt_frame.h>
25
26 // ffmpeg Header files
27 #include <avformat.h>
28 #include <opt.h>
29 #ifdef SWSCALE
30 # include <swscale.h>
31 #endif
32 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
33 # include "audioconvert.h"
34 #endif
35
36 // System header files
37 #include <stdlib.h>
38 #include <string.h>
39 #include <pthread.h>
40 #include <math.h>
41
42 void avformat_lock( );
43 void avformat_unlock( );
44
45 // Forward references.
46 static int producer_open( mlt_producer this, mlt_profile profile, char *file );
47 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
48
49 /** Constructor for libavformat.
50 */
51
52 mlt_producer producer_avformat_init( mlt_profile profile, char *file )
53 {
54 mlt_producer this = NULL;
55
56 // Check that we have a non-NULL argument
57 if ( file != NULL )
58 {
59 // Construct the producer
60 this = calloc( 1, sizeof( struct mlt_producer_s ) );
61
62 // Initialise it
63 if ( mlt_producer_init( this, NULL ) == 0 )
64 {
65 // Get the properties
66 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
67
68 // Set the resource property (required for all producers)
69 mlt_properties_set( properties, "resource", file );
70
71 // Register our get_frame implementation
72 this->get_frame = producer_get_frame;
73
74 // Open the file
75 if ( producer_open( this, profile, file ) != 0 )
76 {
77 // Clean up
78 mlt_producer_close( this );
79 this = NULL;
80 }
81 else
82 {
83 // Close the file to release resources for large playlists - reopen later as needed
84 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
85 mlt_properties_set_data( properties, "audio_context", NULL, 0, NULL, NULL );
86 mlt_properties_set_data( properties, "video_context", NULL, 0, NULL, NULL );
87
88 // Default the user-selectable indices from the auto-detected indices
89 mlt_properties_set_int( properties, "audio_index", mlt_properties_get_int( properties, "_audio_index" ) );
90 mlt_properties_set_int( properties, "video_index", mlt_properties_get_int( properties, "_video_index" ) );
91 }
92 }
93 }
94
95 return this;
96 }
97
98 /** Find the default streams.
99 */
100
101 static void find_default_streams( AVFormatContext *context, int *audio_index, int *video_index )
102 {
103 int i;
104
105 // Allow for multiple audio and video streams in the file and select first of each (if available)
106 for( i = 0; i < context->nb_streams; i++ )
107 {
108 // Get the codec context
109 AVCodecContext *codec_context = context->streams[ i ]->codec;
110
111 if ( avcodec_find_decoder( codec_context->codec_id ) == NULL )
112 continue;
113
114 // Determine the type and obtain the first index of each type
115 switch( codec_context->codec_type )
116 {
117 case CODEC_TYPE_VIDEO:
118 if ( *video_index < 0 )
119 *video_index = i;
120 break;
121 case CODEC_TYPE_AUDIO:
122 if ( *audio_index < 0 )
123 *audio_index = i;
124 break;
125 default:
126 break;
127 }
128 }
129 }
130
131 /** Producer file destructor.
132 */
133
134 static void producer_file_close( void *context )
135 {
136 if ( context != NULL )
137 {
138 // Lock the mutex now
139 avformat_lock( );
140
141 // Close the file
142 av_close_input_file( context );
143
144 // Unlock the mutex now
145 avformat_unlock( );
146 }
147 }
148
149 /** Producer file destructor.
150 */
151
152 static void producer_codec_close( void *codec )
153 {
154 if ( codec != NULL )
155 {
156 // Lock the mutex now
157 avformat_lock( );
158
159 // Close the file
160 avcodec_close( codec );
161
162 // Unlock the mutex now
163 avformat_unlock( );
164 }
165 }
166
167 static inline int dv_is_pal( AVPacket *pkt )
168 {
169 return pkt->data[3] & 0x80;
170 }
171
172 static int dv_is_wide( AVPacket *pkt )
173 {
174 int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
175
176 for ( ; i < pkt->size; i += 5 /* packet size */ )
177 {
178 if ( pkt->data[ i ] == 0x61 )
179 {
180 uint8_t x = pkt->data[ i + 2 ] & 0x7;
181 return ( x == 2 ) || ( x == 7 );
182 }
183 }
184 return 0;
185 }
186
187 static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
188 {
189 double aspect_ratio = 1.0;
190
191 if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
192 {
193 if ( pkt )
194 {
195 if ( dv_is_pal( pkt ) )
196 {
197 aspect_ratio = dv_is_wide( pkt )
198 ? 64.0/45.0 // 16:9 PAL
199 : 16.0/15.0; // 4:3 PAL
200 }
201 else
202 {
203 aspect_ratio = dv_is_wide( pkt )
204 ? 32.0/27.0 // 16:9 NTSC
205 : 8.0/9.0; // 4:3 NTSC
206 }
207 }
208 else
209 {
210 AVRational ar =
211 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
212 stream->sample_aspect_ratio;
213 #else
214 codec_context->sample_aspect_ratio;
215 #endif
216 // Override FFmpeg's notion of DV aspect ratios, which are
217 // based upon a width of 704. Since we do not have a normaliser
218 // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
219 // we just coerce the values to facilitate a passive behaviour through
220 // the rescale normaliser when using equivalent producers and consumers.
221 // = display_aspect / (width * height)
222 if ( ar.num == 10 && ar.den == 11 )
223 aspect_ratio = 8.0/9.0; // 4:3 NTSC
224 else if ( ar.num == 59 && ar.den == 54 )
225 aspect_ratio = 16.0/15.0; // 4:3 PAL
226 else if ( ar.num == 40 && ar.den == 33 )
227 aspect_ratio = 32.0/27.0; // 16:9 NTSC
228 else if ( ar.num == 118 && ar.den == 81 )
229 aspect_ratio = 64.0/45.0; // 16:9 PAL
230 }
231 }
232 else
233 {
234 AVRational codec_sar = codec_context->sample_aspect_ratio;
235 AVRational stream_sar =
236 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
237 stream->sample_aspect_ratio;
238 #else
239 { 0, 1 };
240 #endif
241 if ( codec_sar.num > 0 )
242 aspect_ratio = av_q2d( codec_sar );
243 else if ( stream_sar.num > 0 )
244 aspect_ratio = av_q2d( stream_sar );
245 }
246 return aspect_ratio;
247 }
248
249 /** Open the file.
250 */
251
252 static int producer_open( mlt_producer this, mlt_profile profile, char *file )
253 {
254 // Return an error code (0 == no error)
255 int error = 0;
256
257 // Context for avformat
258 AVFormatContext *context = NULL;
259
260 // Get the properties
261 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
262
263 // We will treat everything with the producer fps
264 double fps = mlt_profile_fps( profile );
265
266 // Lock the mutex now
267 avformat_lock( );
268
269 // If "MRL", then create AVInputFormat
270 AVInputFormat *format = NULL;
271 AVFormatParameters *params = NULL;
272 char *standard = NULL;
273 char *mrl = strchr( file, ':' );
274
275 // AV option (0 = both, 1 = video, 2 = audio)
276 int av = 0;
277
278 // Setting lowest log level
279 av_log_set_level( -1 );
280
281 // Only if there is not a protocol specification that avformat can handle
282 if ( mrl && !url_exist( file ) )
283 {
284 // 'file' becomes format abbreviation
285 mrl[0] = 0;
286
287 // Lookup the format
288 format = av_find_input_format( file );
289
290 // Eat the format designator
291 file = ++mrl;
292
293 if ( format )
294 {
295 // Allocate params
296 params = calloc( sizeof( AVFormatParameters ), 1 );
297
298 // These are required by video4linux (defaults)
299 params->width = 640;
300 params->height = 480;
301 params->time_base= (AVRational){1,25};
302 // params->device = file;
303 params->channels = 2;
304 params->sample_rate = 48000;
305 }
306
307 // XXX: this does not work anymore since avdevice
308 // TODO: make producer_avddevice?
309 // Parse out params
310 mrl = strchr( file, '?' );
311 while ( mrl )
312 {
313 mrl[0] = 0;
314 char *name = strdup( ++mrl );
315 char *value = strchr( name, ':' );
316 if ( value )
317 {
318 value[0] = 0;
319 value++;
320 char *t = strchr( value, '&' );
321 if ( t )
322 t[0] = 0;
323 if ( !strcmp( name, "frame_rate" ) )
324 params->time_base.den = atoi( value );
325 else if ( !strcmp( name, "frame_rate_base" ) )
326 params->time_base.num = atoi( value );
327 else if ( !strcmp( name, "sample_rate" ) )
328 params->sample_rate = atoi( value );
329 else if ( !strcmp( name, "channels" ) )
330 params->channels = atoi( value );
331 else if ( !strcmp( name, "width" ) )
332 params->width = atoi( value );
333 else if ( !strcmp( name, "height" ) )
334 params->height = atoi( value );
335 else if ( !strcmp( name, "standard" ) )
336 {
337 standard = strdup( value );
338 params->standard = standard;
339 }
340 else if ( !strcmp( name, "av" ) )
341 av = atoi( value );
342 }
343 free( name );
344 mrl = strchr( mrl, '&' );
345 }
346 }
347
348 // Now attempt to open the file
349 error = av_open_input_file( &context, file, format, 0, params ) < 0;
350
351 // Cleanup AVFormatParameters
352 free( standard );
353 free( params );
354
355 // If successful, then try to get additional info
356 if ( error == 0 )
357 {
358 // Get the stream info
359 error = av_find_stream_info( context ) < 0;
360
361 // Continue if no error
362 if ( error == 0 )
363 {
364 // We will default to the first audio and video streams found
365 int audio_index = -1;
366 int video_index = -1;
367 int av_bypass = 0;
368
369 // Now set properties where we can (use default unknowns if required)
370 if ( context->duration != AV_NOPTS_VALUE )
371 {
372 // This isn't going to be accurate for all formats
373 mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
374 mlt_properties_set_position( properties, "out", frames - 1 );
375 mlt_properties_set_position( properties, "length", frames );
376 }
377
378 // Find default audio and video streams
379 find_default_streams( context, &audio_index, &video_index );
380
381 if ( context->start_time != AV_NOPTS_VALUE )
382 mlt_properties_set_double( properties, "_start_time", context->start_time );
383
384 // Check if we're seekable (something funny about mpeg here :-/)
385 if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) && strncmp( file, "udp:", 4 ) && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 ) && strncmp( file, "rtp:", 4 ) )
386 {
387 mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
388 mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
389 av_open_input_file( &context, file, NULL, 0, NULL );
390 av_find_stream_info( context );
391 }
392 else
393 av_bypass = 1;
394
395 // Store selected audio and video indexes on properties
396 mlt_properties_set_int( properties, "_audio_index", audio_index );
397 mlt_properties_set_int( properties, "_video_index", video_index );
398 mlt_properties_set_int( properties, "_last_position", -1 );
399
400 // Fetch the width, height and aspect ratio
401 if ( video_index != -1 )
402 {
403 AVCodecContext *codec_context = context->streams[ video_index ]->codec;
404 mlt_properties_set_int( properties, "width", codec_context->width );
405 mlt_properties_set_int( properties, "height", codec_context->height );
406
407 if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
408 {
409 // Fetch the first frame of DV so we can read it directly
410 AVPacket pkt;
411 int ret = 0;
412 while ( ret >= 0 )
413 {
414 ret = av_read_frame( context, &pkt );
415 if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
416 {
417 mlt_properties_set_double( properties, "aspect_ratio",
418 get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
419 break;
420 }
421 }
422 }
423 else
424 {
425 mlt_properties_set_double( properties, "aspect_ratio",
426 get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
427 }
428 }
429
430 // Read Metadata
431 if (context->title != NULL)
432 mlt_properties_set(properties, "meta.attr.title.markup", context->title );
433 if (context->author != NULL)
434 mlt_properties_set(properties, "meta.attr.author.markup", context->author );
435 if (context->copyright != NULL)
436 mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
437 if (context->comment != NULL)
438 mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
439 if (context->album != NULL)
440 mlt_properties_set(properties, "meta.attr.album.markup", context->album );
441 if (context->year != 0)
442 mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
443 if (context->track != 0)
444 mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
445
446 // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
447 if ( av == 0 && audio_index != -1 && video_index != -1 )
448 {
449 // We'll use the open one as our video_context
450 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
451
452 // And open again for our audio context
453 av_open_input_file( &context, file, NULL, 0, NULL );
454 av_find_stream_info( context );
455
456 // Audio context
457 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
458 }
459 else if ( av != 2 && video_index != -1 )
460 {
461 // We only have a video context
462 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
463 }
464 else if ( audio_index != -1 )
465 {
466 // We only have an audio context
467 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
468 }
469 else
470 {
471 // Something has gone wrong
472 error = -1;
473 }
474
475 mlt_properties_set_int( properties, "av_bypass", av_bypass );
476 }
477 }
478
479 // Unlock the mutex now
480 avformat_unlock( );
481
482 return error;
483 }
484
485 /** Convert a frame position to a time code.
486 */
487
488 static double producer_time_of_frame( mlt_producer this, mlt_position position )
489 {
490 return ( double )position / mlt_producer_get_fps( this );
491 }
492
493 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
494 {
495 #ifdef SWSCALE
496 if ( format == mlt_image_yuv420p )
497 {
498 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
499 width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
500 AVPicture output;
501 output.data[0] = buffer;
502 output.data[1] = buffer + width * height;
503 output.data[2] = buffer + ( 3 * width * height ) / 2;
504 output.linesize[0] = width;
505 output.linesize[1] = width >> 1;
506 output.linesize[2] = width >> 1;
507 sws_scale( context, frame->data, frame->linesize, 0, height,
508 output.data, output.linesize);
509 sws_freeContext( context );
510 }
511 else if ( format == mlt_image_rgb24 )
512 {
513 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
514 width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
515 AVPicture output;
516 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
517 sws_scale( context, frame->data, frame->linesize, 0, height,
518 output.data, output.linesize);
519 sws_freeContext( context );
520 }
521 else
522 {
523 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
524 width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
525 AVPicture output;
526 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
527 sws_scale( context, frame->data, frame->linesize, 0, height,
528 output.data, output.linesize);
529 sws_freeContext( context );
530 }
531 #else
532 if ( format == mlt_image_yuv420p )
533 {
534 AVPicture pict;
535 pict.data[0] = buffer;
536 pict.data[1] = buffer + width * height;
537 pict.data[2] = buffer + ( 3 * width * height ) / 2;
538 pict.linesize[0] = width;
539 pict.linesize[1] = width >> 1;
540 pict.linesize[2] = width >> 1;
541 img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
542 }
543 else if ( format == mlt_image_rgb24 )
544 {
545 AVPicture output;
546 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
547 img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
548 }
549 else
550 {
551 AVPicture output;
552 avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height );
553 img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height );
554 }
555 #endif
556 }
557
558 /** Allocate the image buffer and set it on the frame.
559 */
560
561 static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
562 {
563 int size = 0;
564
565 if ( codec_context->width == 0 || codec_context->height == 0 )
566 return size;
567
568 *width = codec_context->width;
569 *height = codec_context->height;
570 mlt_properties_set_int( frame_properties, "width", *width );
571 mlt_properties_set_int( frame_properties, "height", *height );
572
573 switch ( *format )
574 {
575 case mlt_image_yuv420p:
576 size = *width * 3 * ( *height + 1 ) / 2;
577 break;
578 case mlt_image_rgb24:
579 size = *width * ( *height + 1 ) * 3;
580 break;
581 default:
582 *format = mlt_image_yuv422;
583 size = *width * ( *height + 1 ) * 2;
584 break;
585 }
586
587 // Construct the output image
588 *buffer = mlt_pool_alloc( size );
589 if ( *buffer )
590 mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
591 else
592 size = 0;
593
594 return size;
595 }
596
597 /** Get an image from a frame.
598 */
599
600 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
601 {
602 // Get the properties from the frame
603 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
604
605 // Obtain the frame number of this frame
606 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
607
608 // Get the producer
609 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
610
611 // Get the producer properties
612 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
613
614 // Fetch the video_context
615 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
616
617 // Get the video_index
618 int index = mlt_properties_get_int( properties, "video_index" );
619
620 // Obtain the expected frame numer
621 mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
622
623 // Get the video stream
624 AVStream *stream = context->streams[ index ];
625
626 // Get codec context
627 AVCodecContext *codec_context = stream->codec;
628
629 // Packet
630 AVPacket pkt;
631
632 // Get the conversion frame
633 AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL );
634
635 // Special case pause handling flag
636 int paused = 0;
637
638 // Special case ffwd handling
639 int ignore = 0;
640
641 // We may want to use the source fps if available
642 double source_fps = mlt_properties_get_double( properties, "source_fps" );
643 double fps = mlt_producer_get_fps( this );
644
645 // This is the physical frame position in the source
646 int req_position = ( int )( position / fps * source_fps + 0.5 );
647
648 // Get the seekable status
649 int seekable = mlt_properties_get_int( properties, "seekable" );
650
651 // Hopefully provide better support for streams...
652 int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
653
654 // Determines if we have to decode all frames in a sequence
655 int must_decode = 1;
656
657 // Temporary hack to improve intra frame only
658 must_decode = strcmp( codec_context->codec->name, "mjpeg" ) &&
659 strcmp( codec_context->codec->name, "rawvideo" ) &&
660 strcmp( codec_context->codec->name, "dvvideo" );
661
662 // Seek if necessary
663 if ( position != expected )
664 {
665 if ( av_frame != NULL && position + 1 == expected )
666 {
667 // We're paused - use last image
668 paused = 1;
669 }
670 else if ( !seekable && position > expected && ( position - expected ) < 250 )
671 {
672 // Fast forward - seeking is inefficient for small distances - just ignore following frames
673 ignore = ( int )( ( position - expected ) / fps * source_fps );
674 }
675 else if ( seekable && ( position < expected || position - expected >= 12 ) )
676 {
677 // Calculate the timestamp for the requested frame
678 int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
679 if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
680 timestamp += context->start_time;
681 if ( must_decode )
682 timestamp -= AV_TIME_BASE;
683 if ( timestamp < 0 )
684 timestamp = 0;
685
686 // Set to the timestamp
687 av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
688
689 // Remove the cached info relating to the previous position
690 mlt_properties_set_int( properties, "_current_position", -1 );
691 mlt_properties_set_int( properties, "_last_position", -1 );
692 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
693 av_frame = NULL;
694 }
695 }
696
697 // Duplicate the last image if necessary (see comment on rawvideo below)
698 int current_position = mlt_properties_get_int( properties, "_current_position" );
699 int got_picture = mlt_properties_get_int( properties, "_got_picture" );
700 if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
701 {
702 // Duplicate it
703 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
704 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
705 else
706 mlt_frame_get_image( frame, buffer, format, width, height, writable );
707 }
708 else
709 {
710 int ret = 0;
711 int int_position = 0;
712 got_picture = 0;
713
714 av_init_packet( &pkt );
715
716 // Construct an AVFrame for YUV422 conversion
717 if ( av_frame == NULL )
718 av_frame = avcodec_alloc_frame( );
719
720 while( ret >= 0 && !got_picture )
721 {
722 // Read a packet
723 ret = av_read_frame( context, &pkt );
724
725 // We only deal with video from the selected video_index
726 if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
727 {
728 // Determine time code of the packet
729 int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
730 if ( context->start_time != AV_NOPTS_VALUE )
731 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
732 int last_position = mlt_properties_get_int( properties, "_last_position" );
733 if ( int_position == last_position )
734 int_position = last_position + 1;
735 mlt_properties_set_int( properties, "_last_position", int_position );
736
737 // Decode the image
738 if ( must_decode || int_position >= req_position )
739 ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
740
741 if ( got_picture )
742 {
743 // Handle ignore
744 if ( int_position < req_position )
745 {
746 ignore = 0;
747 got_picture = 0;
748 }
749 else if ( int_position >= req_position )
750 {
751 ignore = 0;
752 }
753 else if ( ignore -- )
754 {
755 got_picture = 0;
756 }
757 }
758 av_free_packet( &pkt );
759 }
760 else if ( ret >= 0 )
761 {
762 av_free_packet( &pkt );
763 }
764
765 // Now handle the picture if we have one
766 if ( got_picture )
767 {
768 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
769 {
770 convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height );
771 mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
772 mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
773 mlt_properties_set_int( properties, "_current_position", int_position );
774 mlt_properties_set_int( properties, "_got_picture", 1 );
775 mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
776 }
777 else
778 {
779 got_picture = 0;
780 }
781 }
782 }
783 if ( !got_picture )
784 mlt_frame_get_image( frame, buffer, format, width, height, writable );
785 }
786
787 // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
788 // above will break the pause behaviour - so we wipe the frame now
789 if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
790 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
791
792 // Set the field order property for this frame
793 mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
794
795 // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
796 mlt_properties_set_position( properties, "_video_expected", position + 1 );
797
798 return 0;
799 }
800
801 /** Process properties as AVOptions and apply to AV context obj
802 */
803
804 static void apply_properties( void *obj, mlt_properties properties, int flags )
805 {
806 int i;
807 int count = mlt_properties_count( properties );
808 for ( i = 0; i < count; i++ )
809 {
810 const char *opt_name = mlt_properties_get_name( properties, i );
811 const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
812 if ( opt != NULL )
813 #if LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
814 av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
815 #else
816 av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
817 #endif
818 }
819 }
820
821 /** Set up video handling.
822 */
823
824 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
825 {
826 // Get the properties
827 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
828
829 // Fetch the video_context
830 AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
831
832 // Get the video_index
833 int index = mlt_properties_get_int( properties, "video_index" );
834
835 // Reopen the file if necessary
836 if ( !context && index > -1 )
837 {
838 mlt_events_block( properties, this );
839 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
840 mlt_properties_get( properties, "resource" ) );
841 context = mlt_properties_get_data( properties, "video_context", NULL );
842 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
843 mlt_events_unblock( properties, this );
844
845 // Process properties as AVOptions
846 apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
847 }
848
849 // Exception handling for video_index
850 if ( context && index >= (int) context->nb_streams )
851 {
852 // Get the last video stream
853 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO; --index );
854 mlt_properties_set_int( properties, "video_index", index );
855 }
856 if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO )
857 {
858 // Invalidate the video stream
859 index = -1;
860 mlt_properties_set_int( properties, "video_index", index );
861 }
862
863 // Get the frame properties
864 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
865
866 if ( context && index > -1 )
867 {
868 // Get the video stream
869 AVStream *stream = context->streams[ index ];
870
871 // Get codec context
872 AVCodecContext *codec_context = stream->codec;
873
874 // Get the codec
875 AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
876
877 // Update the video properties if the index changed
878 if ( index != mlt_properties_get_int( properties, "_video_index" ) )
879 {
880 // Reset the video properties if the index changed
881 mlt_properties_set_int( properties, "_video_index", index );
882 mlt_properties_set_data( properties, "video_codec", NULL, 0, NULL, NULL );
883 mlt_properties_set_int( properties, "width", codec_context->width );
884 mlt_properties_set_int( properties, "height", codec_context->height );
885 // TODO: get the first usable AVPacket and reset the stream position
886 mlt_properties_set_double( properties, "aspect_ratio",
887 get_aspect_ratio( context->streams[ index ], codec_context, NULL ) );
888 codec = NULL;
889 }
890
891 // Initialise the codec if necessary
892 if ( codec == NULL )
893 {
894 // Initialise multi-threading
895 int thread_count = mlt_properties_get_int( properties, "threads" );
896 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
897 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
898 if ( thread_count > 1 )
899 {
900 avcodec_thread_init( codec_context, thread_count );
901 codec_context->thread_count = thread_count;
902 }
903
904 // Find the codec
905 codec = avcodec_find_decoder( codec_context->codec_id );
906
907 // If we don't have a codec and we can't initialise it, we can't do much more...
908 avformat_lock( );
909 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
910 {
911 // Now store the codec with its destructor
912 mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL );
913 }
914 else
915 {
916 // Remember that we can't use this later
917 mlt_properties_set_int( properties, "video_index", -1 );
918 index = -1;
919 }
920 avformat_unlock( );
921
922 // Process properties as AVOptions
923 apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
924 }
925
926 // No codec, no show...
927 if ( codec && index > -1 )
928 {
929 double source_fps = 0;
930 double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
931 double aspect_ratio = ( force_aspect_ratio > 0.0 ) ?
932 force_aspect_ratio : mlt_properties_get_double( properties, "aspect_ratio" );
933
934 // Determine the fps
935 source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
936
937 // We'll use fps if it's available
938 if ( source_fps > 0 )
939 mlt_properties_set_double( properties, "source_fps", source_fps );
940 else
941 mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) );
942 mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
943
944 // Set the width and height
945 mlt_properties_set_int( frame_properties, "width", codec_context->width );
946 mlt_properties_set_int( frame_properties, "height", codec_context->height );
947 mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
948
949 mlt_frame_push_get_image( frame, producer_get_image );
950 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
951 }
952 else
953 {
954 mlt_properties_set_int( frame_properties, "test_image", 1 );
955 }
956 }
957 else
958 {
959 mlt_properties_set_int( frame_properties, "test_image", 1 );
960 }
961 }
962
963 /** Get the audio from a frame.
964 */
965
966 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
967 {
968 // Get the properties from the frame
969 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
970
971 // Obtain the frame number of this frame
972 mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
973
974 // Get the producer
975 mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
976
977 // Get the producer properties
978 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
979
980 // Fetch the audio_context
981 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
982
983 // Get the audio_index
984 int index = mlt_properties_get_int( properties, "audio_index" );
985
986 // Get the seekable status
987 int seekable = mlt_properties_get_int( properties, "seekable" );
988
989 // Obtain the expected frame numer
990 mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
991
992 // Obtain the resample context if it exists (not always needed)
993 ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
994
995 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
996 // Get the format converter context if it exists
997 AVAudioConvert *convert = mlt_properties_get_data( properties, "audio_convert", NULL );
998 #endif
999
1000 // Obtain the audio buffers
1001 int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
1002 int16_t *decode_buffer = mlt_properties_get_data( properties, "decode_buffer", NULL );
1003 int16_t *convert_buffer = mlt_properties_get_data( properties, "convert_buffer", NULL );
1004
1005 // Get amount of audio used
1006 int audio_used = mlt_properties_get_int( properties, "_audio_used" );
1007
1008 // Calculate the real time code
1009 double real_timecode = producer_time_of_frame( this, position );
1010
1011 // Get the audio stream
1012 AVStream *stream = context->streams[ index ];
1013
1014 // Get codec context
1015 AVCodecContext *codec_context = stream->codec;
1016
1017 // Packet
1018 AVPacket pkt;
1019
1020 // Number of frames to ignore (for ffwd)
1021 int ignore = 0;
1022
1023 // Flag for paused (silence)
1024 int paused = 0;
1025
1026 // Check for resample and create if necessary
1027 if ( resample == NULL && codec_context->channels <= 2 )
1028 {
1029 // Create the resampler
1030 resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate );
1031
1032 // And store it on properties
1033 mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );
1034 }
1035 else if ( resample == NULL )
1036 {
1037 *channels = codec_context->channels;
1038 *frequency = codec_context->sample_rate;
1039 }
1040
1041 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1042 // Check for audio format converter and create if necessary
1043 // TODO: support higher resolutions than 16-bit.
1044 if ( convert == NULL && codec_context->sample_fmt != SAMPLE_FMT_S16 )
1045 {
1046 // Create single channel converter for interleaved with no mixing matrix
1047 convert = av_audio_convert_alloc( SAMPLE_FMT_S16, 1, codec_context->sample_fmt, 1, NULL, 0 );
1048 mlt_properties_set_data( properties, "audio_convert", convert, 0, ( mlt_destructor )av_audio_convert_free, NULL );
1049 }
1050 #endif
1051
1052 // Check for audio buffer and create if necessary
1053 if ( audio_buffer == NULL )
1054 {
1055 // Allocate the audio buffer
1056 audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1057
1058 // And store it on properties for reuse
1059 mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1060 }
1061
1062 // Check for decoder buffer and create if necessary
1063 if ( decode_buffer == NULL )
1064 {
1065 // Allocate the audio buffer
1066 decode_buffer = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1067
1068 // And store it on properties for reuse
1069 mlt_properties_set_data( properties, "decode_buffer", decode_buffer, 0, ( mlt_destructor )av_free, NULL );
1070 }
1071
1072 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1073 // Check for format converter buffer and create if necessary
1074 if ( resample && convert && convert_buffer == NULL )
1075 {
1076 // Allocate the audio buffer
1077 convert_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1078
1079 // And store it on properties for reuse
1080 mlt_properties_set_data( properties, "convert_buffer", convert_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1081 }
1082 #endif
1083
1084 // Seek if necessary
1085 if ( position != expected )
1086 {
1087 if ( position + 1 == expected )
1088 {
1089 // We're paused - silence required
1090 paused = 1;
1091 }
1092 else if ( !seekable && position > expected && ( position - expected ) < 250 )
1093 {
1094 // Fast forward - seeking is inefficient for small distances - just ignore following frames
1095 ignore = position - expected;
1096 }
1097 else if ( position < expected || position - expected >= 12 )
1098 {
1099 // Set to the real timecode
1100 if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 )
1101 paused = 1;
1102
1103 // Clear the usage in the audio buffer
1104 audio_used = 0;
1105 }
1106 }
1107
1108 // Get the audio if required
1109 if ( !paused )
1110 {
1111 int ret = 0;
1112 int got_audio = 0;
1113
1114 av_init_packet( &pkt );
1115
1116 while( ret >= 0 && !got_audio )
1117 {
1118 // Check if the buffer already contains the samples required
1119 if ( audio_used >= *samples && ignore == 0 )
1120 {
1121 got_audio = 1;
1122 break;
1123 }
1124
1125 // Read a packet
1126 ret = av_read_frame( context, &pkt );
1127
1128 int len = pkt.size;
1129 uint8_t *ptr = pkt.data;
1130
1131 // We only deal with audio from the selected audio_index
1132 while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 )
1133 {
1134 int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
1135
1136 // Decode the audio
1137 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
1138 ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
1139 #else
1140 ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
1141 #endif
1142 if ( ret < 0 )
1143 {
1144 ret = 0;
1145 break;
1146 }
1147
1148 len -= ret;
1149 ptr += ret;
1150
1151 if ( data_size > 0 )
1152 {
1153 int src_stride[6]= { av_get_bits_per_sample_format( codec_context->sample_fmt ) / 8 };
1154 int dst_stride[6]= { av_get_bits_per_sample_format( SAMPLE_FMT_S16 ) / 8 };
1155
1156 if ( resample )
1157 {
1158 int16_t *source = decode_buffer;
1159 int16_t *dest = &audio_buffer[ audio_used * *channels ];
1160 int convert_samples = data_size / src_stride[0];
1161
1162 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1163 if ( convert )
1164 {
1165 const void *src_buf[6] = { decode_buffer };
1166 void *dst_buf[6] = { convert_buffer };
1167 av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, convert_samples );
1168 source = convert_buffer;
1169 }
1170 #endif
1171 audio_used += audio_resample( resample, dest, source, convert_samples / codec_context->channels );
1172 }
1173 else
1174 {
1175 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
1176 if ( convert )
1177 {
1178 const void *src_buf[6] = { decode_buffer };
1179 void *dst_buf[6] = { &audio_buffer[ audio_used * *channels ] };
1180 av_audio_convert( convert, dst_buf, dst_stride, src_buf, src_stride, data_size / src_stride[0] );
1181 }
1182 else
1183 #endif
1184 {
1185 memcpy( &audio_buffer[ audio_used * *channels ], decode_buffer, data_size );
1186 }
1187 audio_used += data_size / *channels / src_stride[0];
1188 }
1189
1190 // Handle ignore
1191 while ( ignore && audio_used > *samples )
1192 {
1193 ignore --;
1194 audio_used -= *samples;
1195 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * sizeof( int16_t ) );
1196 }
1197 }
1198
1199 // If we're behind, ignore this packet
1200 if ( pkt.pts >= 0 )
1201 {
1202 float current_pts = av_q2d( stream->time_base ) * pkt.pts;
1203 if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) )
1204 ignore = 1;
1205 }
1206 }
1207
1208 // We're finished with this packet regardless
1209 av_free_packet( &pkt );
1210 }
1211
1212 *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) );
1213 mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1214
1215 // Now handle the audio if we have enough
1216 if ( audio_used >= *samples )
1217 {
1218 memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) );
1219 audio_used -= *samples;
1220 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) );
1221 }
1222 else
1223 {
1224 memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) );
1225 }
1226
1227 // Store the number of audio samples still available
1228 mlt_properties_set_int( properties, "_audio_used", audio_used );
1229 }
1230 else
1231 {
1232 // Get silence and don't touch the context
1233 mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
1234 }
1235
1236 // Regardless of speed (other than paused), we expect to get the next frame
1237 if ( !paused )
1238 mlt_properties_set_position( properties, "_audio_expected", position + 1 );
1239
1240 return 0;
1241 }
1242
1243 /** Set up audio handling.
1244 */
1245
1246 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
1247 {
1248 // Get the properties
1249 mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1250
1251 // Fetch the audio_context
1252 AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1253
1254 // Get the audio_index
1255 int index = mlt_properties_get_int( properties, "audio_index" );
1256
1257 // Reopen the file if necessary
1258 if ( !context && index > -1 )
1259 {
1260 mlt_events_block( properties, this );
1261 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
1262 mlt_properties_get( properties, "resource" ) );
1263 context = mlt_properties_get_data( properties, "audio_context", NULL );
1264 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
1265 mlt_events_unblock( properties, this );
1266 }
1267
1268 // Exception handling for audio_index
1269 if ( context && index >= (int) context->nb_streams )
1270 {
1271 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO; --index );
1272 mlt_properties_set_int( properties, "audio_index", index );
1273 }
1274 if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO )
1275 {
1276 index = -1;
1277 mlt_properties_set_int( properties, "audio_index", index );
1278 }
1279
1280 // Update the audio properties if the index changed
1281 if ( index > -1 && index != mlt_properties_get_int( properties, "_audio_index" ) )
1282 {
1283 mlt_properties_set_int( properties, "_audio_index", index );
1284 mlt_properties_set_data( properties, "audio_codec", NULL, 0, NULL, NULL );
1285 }
1286
1287 // Deal with audio context
1288 if ( context != NULL && index > -1 )
1289 {
1290 // Get the frame properties
1291 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1292
1293 // Get the audio stream
1294 AVStream *stream = context->streams[ index ];
1295
1296 // Get codec context
1297 AVCodecContext *codec_context = stream->codec;
1298
1299 // Get the codec
1300 AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
1301
1302 // Initialise the codec if necessary
1303 if ( codec == NULL )
1304 {
1305 // Find the codec
1306 codec = avcodec_find_decoder( codec_context->codec_id );
1307
1308 // If we don't have a codec and we can't initialise it, we can't do much more...
1309 avformat_lock( );
1310 if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1311 {
1312 // Now store the codec with its destructor
1313 mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL );
1314
1315 }
1316 else
1317 {
1318 // Remember that we can't use this later
1319 mlt_properties_set_int( properties, "audio_index", -1 );
1320 index = -1;
1321 }
1322 avformat_unlock( );
1323
1324 // Process properties as AVOptions
1325 apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
1326 }
1327
1328 // No codec, no show...
1329 if ( codec && index > -1 )
1330 {
1331 mlt_frame_push_audio( frame, producer_get_audio );
1332 mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1333 mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
1334 mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
1335 }
1336 }
1337 }
1338
1339 /** Our get frame implementation.
1340 */
1341
1342 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index )
1343 {
1344 // Create an empty frame
1345 *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) );
1346
1347 // Update timecode on the frame we're creating
1348 mlt_frame_set_position( *frame, mlt_producer_position( this ) );
1349
1350 // Set the position of this producer
1351 mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
1352
1353 // Set up the video
1354 producer_set_up_video( this, *frame );
1355
1356 // Set up the audio
1357 producer_set_up_audio( this, *frame );
1358
1359 // Set the aspect_ratio
1360 mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
1361
1362 // Calculate the next timecode
1363 mlt_producer_prepare_next( this );
1364
1365 return 0;
1366 }