avformat/configure: fix compiling against shared ffmpeg due to changes in ffmpeg...
[melted] / src / modules / avformat / consumer_avformat.c
1 /*
2 * consumer_avformat.c -- an encoder based on avformat
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // mlt Header files
23 #include <framework/mlt_consumer.h>
24 #include <framework/mlt_frame.h>
25
26 // System header files
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <pthread.h>
32 #include <sys/time.h>
33 #include <math.h>
34
35 // avformat header files
36 #include <avformat.h>
37 #ifdef SWSCALE
38 #include <swscale.h>
39 #endif
40 #include <opt.h>
41
42 //
43 // This structure should be extended and made globally available in mlt
44 //
45
46 typedef struct
47 {
48 int16_t *buffer;
49 int size;
50 int used;
51 double time;
52 int frequency;
53 int channels;
54 }
55 *sample_fifo, sample_fifo_s;
56
57 sample_fifo sample_fifo_init( int frequency, int channels )
58 {
59 sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
60 this->frequency = frequency;
61 this->channels = channels;
62 return this;
63 }
64
65 // sample_fifo_clear and check are temporarily aborted (not working as intended)
66
67 void sample_fifo_clear( sample_fifo this, double time )
68 {
69 int words = ( float )( time - this->time ) * this->frequency * this->channels;
70 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
71 {
72 memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
73 this->used -= words;
74 this->time = time;
75 }
76 else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
77 {
78 this->used = 0;
79 this->time = time;
80 }
81 }
82
83 void sample_fifo_check( sample_fifo this, double time )
84 {
85 if ( this->used == 0 )
86 {
87 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
88 this->time = time;
89 }
90 }
91
92 void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
93 {
94 if ( ( this->size - this->used ) < count )
95 {
96 this->size += count * 5;
97 this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
98 }
99
100 memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
101 this->used += count;
102 }
103
104 int sample_fifo_used( sample_fifo this )
105 {
106 return this->used;
107 }
108
109 int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
110 {
111 if ( count > this->used )
112 count = this->used;
113
114 memcpy( samples, this->buffer, count * sizeof( int16_t ) );
115 this->used -= count;
116 memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
117
118 this->time += ( double )count / this->channels / this->frequency;
119
120 return count;
121 }
122
123 void sample_fifo_close( sample_fifo this )
124 {
125 free( this->buffer );
126 free( this );
127 }
128
129 // Forward references.
130 static int consumer_start( mlt_consumer this );
131 static int consumer_stop( mlt_consumer this );
132 static int consumer_is_stopped( mlt_consumer this );
133 static void *consumer_thread( void *arg );
134 static void consumer_close( mlt_consumer this );
135
136 /** Initialise the dv consumer.
137 */
138
139 mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
140 {
141 // Allocate the consumer
142 mlt_consumer this = mlt_consumer_new( profile );
143
144 // If memory allocated and initialises without error
145 if ( this != NULL )
146 {
147 // Get properties from the consumer
148 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
149
150 // Assign close callback
151 this->close = consumer_close;
152
153 // Interpret the argument
154 if ( arg != NULL )
155 mlt_properties_set( properties, "target", arg );
156
157 // sample and frame queue
158 mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL );
159
160 // Audio options not fully handled by AVOptions
161 #define QSCALE_NONE (-99999)
162 mlt_properties_set_int( properties, "aq", QSCALE_NONE );
163
164 // Video options not fully handled by AVOptions
165 mlt_properties_set_int( properties, "dc", 8 );
166
167 // Muxer options not fully handled by AVOptions
168 mlt_properties_set_double( properties, "muxdelay", 0.7 );
169 mlt_properties_set_double( properties, "muxpreload", 0.5 );
170
171 // Ensure termination at end of the stream
172 mlt_properties_set_int( properties, "terminate_on_pause", 1 );
173
174 // Default to separate processing threads for producer and consumer with no frame dropping!
175 mlt_properties_set_int( properties, "real_time", -1 );
176
177 // Set up start/stop/terminated callbacks
178 this->start = consumer_start;
179 this->stop = consumer_stop;
180 this->is_stopped = consumer_is_stopped;
181 }
182
183 // Return this
184 return this;
185 }
186
187 /** Start the consumer.
188 */
189
190 static int consumer_start( mlt_consumer this )
191 {
192 // Get the properties
193 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
194
195 // Check that we're not already running
196 if ( !mlt_properties_get_int( properties, "running" ) )
197 {
198 // Allocate a thread
199 pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
200
201 // Get the width and height
202 int width = mlt_properties_get_int( properties, "width" );
203 int height = mlt_properties_get_int( properties, "height" );
204
205 // Obtain the size property
206 char *size = mlt_properties_get( properties, "s" );
207
208 // Interpret it
209 if ( size != NULL )
210 {
211 int tw, th;
212 if ( sscanf( size, "%dx%d", &tw, &th ) == 2 && tw > 0 && th > 0 )
213 {
214 width = tw;
215 height = th;
216 }
217 else
218 {
219 fprintf( stderr, "%s: Invalid size property %s - ignoring.\n", __FILE__, size );
220 }
221 }
222
223 // Now ensure we honour the multiple of two requested by libavformat
224 mlt_properties_set_int( properties, "width", ( width / 2 ) * 2 );
225 mlt_properties_set_int( properties, "height", ( height / 2 ) * 2 );
226
227 // Apply AVOptions that are synonyms for standard mlt_consumer options
228 if ( mlt_properties_get( properties, "ac" ) )
229 mlt_properties_set_int( properties, "channels", mlt_properties_get_int( properties, "ac" ) );
230 if ( mlt_properties_get( properties, "ar" ) )
231 mlt_properties_set_int( properties, "frequency", mlt_properties_get_int( properties, "ar" ) );
232
233 // Assign the thread to properties
234 mlt_properties_set_data( properties, "thread", thread, sizeof( pthread_t ), free, NULL );
235
236 // Set the running state
237 mlt_properties_set_int( properties, "running", 1 );
238
239 // Create the thread
240 pthread_create( thread, NULL, consumer_thread, this );
241 }
242 return 0;
243 }
244
245 /** Stop the consumer.
246 */
247
248 static int consumer_stop( mlt_consumer this )
249 {
250 // Get the properties
251 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
252
253 // Check that we're running
254 if ( mlt_properties_get_int( properties, "running" ) )
255 {
256 // Get the thread
257 pthread_t *thread = mlt_properties_get_data( properties, "thread", NULL );
258
259 // Stop the thread
260 mlt_properties_set_int( properties, "running", 0 );
261
262 // Wait for termination
263 pthread_join( *thread, NULL );
264 }
265
266 return 0;
267 }
268
269 /** Determine if the consumer is stopped.
270 */
271
272 static int consumer_is_stopped( mlt_consumer this )
273 {
274 // Get the properties
275 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
276 return !mlt_properties_get_int( properties, "running" );
277 }
278
279 /** Process properties as AVOptions and apply to AV context obj
280 */
281
282 static void apply_properties( void *obj, mlt_properties properties, int flags )
283 {
284 int i;
285 int count = mlt_properties_count( properties );
286 for ( i = 0; i < count; i++ )
287 {
288 const char *opt_name = mlt_properties_get_name( properties, i );
289 const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
290 if ( opt != NULL )
291 av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
292 }
293 }
294
295 /** Add an audio output stream
296 */
297
298 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
299 {
300 // Get the properties
301 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
302
303 // Create a new stream
304 AVStream *st = av_new_stream( oc, 1 );
305
306 // If created, then initialise from properties
307 if ( st != NULL )
308 {
309 AVCodecContext *c = st->codec;
310
311 // Establish defaults from AVOptions
312 avcodec_get_context_defaults2( c, CODEC_TYPE_AUDIO );
313
314 c->codec_id = codec_id;
315 c->codec_type = CODEC_TYPE_AUDIO;
316
317 // Setup multi-threading
318 int thread_count = mlt_properties_get_int( properties, "threads" );
319 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
320 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
321 if ( thread_count > 1 )
322 avcodec_thread_init( c, thread_count );
323
324 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
325 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
326
327 // Allow the user to override the audio fourcc
328 if ( mlt_properties_get( properties, "atag" ) )
329 {
330 char *tail = NULL;
331 char *arg = mlt_properties_get( properties, "atag" );
332 int tag = strtol( arg, &tail, 0);
333 if( !tail || *tail )
334 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
335 c->codec_tag = tag;
336 }
337
338 // Process properties as AVOptions
339 apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
340
341 int audio_qscale = mlt_properties_get_int( properties, "aq" );
342 if ( audio_qscale > QSCALE_NONE )
343 {
344 c->flags |= CODEC_FLAG_QSCALE;
345 c->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale;
346 }
347
348 // Set parameters controlled by MLT
349 c->sample_rate = mlt_properties_get_int( properties, "frequency" );
350 c->channels = mlt_properties_get_int( properties, "channels" );
351
352 if ( mlt_properties_get( properties, "alang" ) != NULL )
353 strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) );
354 }
355 else
356 {
357 fprintf( stderr, "%s: Could not allocate a stream for audio\n", __FILE__ );
358 }
359
360 return st;
361 }
362
363 static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
364 {
365 // We will return the audio input size from here
366 int audio_input_frame_size = 0;
367
368 // Get the context
369 AVCodecContext *c = st->codec;
370
371 // Find the encoder
372 AVCodec *codec = avcodec_find_encoder( c->codec_id );
373
374 // Continue if codec found and we can open it
375 if ( codec != NULL && avcodec_open( c, codec ) >= 0 )
376 {
377 // ugly hack for PCM codecs (will be removed ASAP with new PCM
378 // support to compute the input frame size in samples
379 if ( c->frame_size <= 1 )
380 {
381 audio_input_frame_size = audio_outbuf_size / c->channels;
382 switch(st->codec->codec_id)
383 {
384 case CODEC_ID_PCM_S16LE:
385 case CODEC_ID_PCM_S16BE:
386 case CODEC_ID_PCM_U16LE:
387 case CODEC_ID_PCM_U16BE:
388 audio_input_frame_size >>= 1;
389 break;
390 default:
391 break;
392 }
393 }
394 else
395 {
396 audio_input_frame_size = c->frame_size;
397 }
398
399 // Some formats want stream headers to be seperate (hmm)
400 if( !strcmp( oc->oformat->name, "mp4" ) ||
401 !strcmp( oc->oformat->name, "mov" ) ||
402 !strcmp( oc->oformat->name, "3gp" ) )
403 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
404 }
405 else
406 {
407 fprintf( stderr, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ );
408 }
409
410 return audio_input_frame_size;
411 }
412
413 static void close_audio( AVFormatContext *oc, AVStream *st )
414 {
415 avcodec_close( st->codec );
416 }
417
418 /** Add a video output stream
419 */
420
421 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
422 {
423 // Get the properties
424 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
425
426 // Create a new stream
427 AVStream *st = av_new_stream( oc, 0 );
428
429 if ( st != NULL )
430 {
431 char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
432 double ar = mlt_properties_get_double( properties, "aspect_ratio" );
433 AVCodecContext *c = st->codec;
434
435 // Establish defaults from AVOptions
436 avcodec_get_context_defaults2( c, CODEC_TYPE_VIDEO );
437
438 c->codec_id = codec_id;
439 c->codec_type = CODEC_TYPE_VIDEO;
440
441 // Setup multi-threading
442 int thread_count = mlt_properties_get_int( properties, "threads" );
443 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
444 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
445 if ( thread_count > 1 )
446 avcodec_thread_init( c, thread_count );
447
448 // Process properties as AVOptions
449 apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
450
451 // Set options controlled by MLT
452 c->width = mlt_properties_get_int( properties, "width" );
453 c->height = mlt_properties_get_int( properties, "height" );
454 c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
455 c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
456 c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
457 c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
458
459 if ( mlt_properties_get_double( properties, "qscale" ) > 0 )
460 {
461 c->flags |= CODEC_FLAG_QSCALE;
462 st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
463 }
464
465 // Allow the user to override the video fourcc
466 if ( mlt_properties_get( properties, "vtag" ) )
467 {
468 char *tail = NULL;
469 const char *arg = mlt_properties_get( properties, "vtag" );
470 int tag = strtol( arg, &tail, 0);
471 if( !tail || *tail )
472 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
473 c->codec_tag = tag;
474 }
475
476 // Some formats want stream headers to be seperate
477 if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
478 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
479
480 // Translate these standard mlt consumer properties to ffmpeg
481 if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
482 mlt_properties_get_int( properties, "deinterlace" ) == 0 )
483 {
484 if ( mlt_properties_get_int( properties, "ildct" ) )
485 c->flags |= CODEC_FLAG_INTERLACED_DCT;
486 if ( mlt_properties_get_int( properties, "ilme" ) )
487 c->flags |= CODEC_FLAG_INTERLACED_ME;
488 }
489
490 // parse the ratecontrol override string
491 int i;
492 char *rc_override = mlt_properties_get( properties, "rc_override" );
493 for ( i = 0; rc_override; i++ )
494 {
495 int start, end, q;
496 int e = sscanf( rc_override, "%d,%d,%d", &start, &end, &q );
497 if ( e != 3 )
498 fprintf( stderr, "%s: Error parsing rc_override\n", __FILE__ );
499 c->rc_override = av_realloc( c->rc_override, sizeof( RcOverride ) * ( i + 1 ) );
500 c->rc_override[i].start_frame = start;
501 c->rc_override[i].end_frame = end;
502 if ( q > 0 )
503 {
504 c->rc_override[i].qscale = q;
505 c->rc_override[i].quality_factor = 1.0;
506 }
507 else
508 {
509 c->rc_override[i].qscale = 0;
510 c->rc_override[i].quality_factor = -q / 100.0;
511 }
512 rc_override = strchr( rc_override, '/' );
513 if ( rc_override )
514 rc_override++;
515 }
516 c->rc_override_count = i;
517 if ( !c->rc_initial_buffer_occupancy )
518 c->rc_initial_buffer_occupancy = c->rc_buffer_size * 3/4;
519 c->intra_dc_precision = mlt_properties_get_int( properties, "dc" ) - 8;
520
521 // Setup dual-pass
522 i = mlt_properties_get_int( properties, "pass" );
523 if ( i == 1 )
524 c->flags |= CODEC_FLAG_PASS1;
525 else if ( i == 2 )
526 c->flags |= CODEC_FLAG_PASS2;
527 if ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) )
528 {
529 char logfilename[1024];
530 FILE *f;
531 int size;
532 char *logbuffer;
533
534 snprintf( logfilename, sizeof(logfilename), "%s_2pass.log",
535 mlt_properties_get( properties, "passlogfile" ) ? mlt_properties_get( properties, "passlogfile" ) : mlt_properties_get( properties, "target" ) );
536 if ( c->flags & CODEC_FLAG_PASS1 )
537 {
538 f = fopen( logfilename, "w" );
539 if ( !f )
540 perror( logfilename );
541 else
542 mlt_properties_set_data( properties, "_logfile", f, 0, ( mlt_destructor )fclose, NULL );
543 }
544 else
545 {
546 /* read the log file */
547 f = fopen( logfilename, "r" );
548 if ( !f )
549 {
550 perror(logfilename);
551 }
552 else
553 {
554 fseek( f, 0, SEEK_END );
555 size = ftell( f );
556 fseek( f, 0, SEEK_SET );
557 logbuffer = av_malloc( size + 1 );
558 if ( !logbuffer )
559 fprintf( stderr, "%s: Could not allocate log buffer\n", __FILE__ );
560 else
561 {
562 size = fread( logbuffer, 1, size, f );
563 fclose( f );
564 logbuffer[size] = '\0';
565 c->stats_in = logbuffer;
566 mlt_properties_set_data( properties, "_logbuffer", logbuffer, 0, ( mlt_destructor )av_free, NULL );
567 }
568 }
569 }
570 }
571 }
572 else
573 {
574 fprintf( stderr, "%s: Could not allocate a stream for video\n", __FILE__ );
575 }
576
577 return st;
578 }
579
580 static AVFrame *alloc_picture( int pix_fmt, int width, int height )
581 {
582 // Allocate a frame
583 AVFrame *picture = avcodec_alloc_frame();
584
585 // Determine size of the
586 int size = avpicture_get_size(pix_fmt, width, height);
587
588 // Allocate the picture buf
589 uint8_t *picture_buf = av_malloc(size);
590
591 // If we have both, then fill the image
592 if ( picture != NULL && picture_buf != NULL )
593 {
594 // Fill the frame with the allocated buffer
595 avpicture_fill( (AVPicture *)picture, picture_buf, pix_fmt, width, height);
596 }
597 else
598 {
599 // Something failed - clean up what we can
600 av_free( picture );
601 av_free( picture_buf );
602 picture = NULL;
603 }
604
605 return picture;
606 }
607
608 static int open_video(AVFormatContext *oc, AVStream *st)
609 {
610 // Get the codec
611 AVCodecContext *video_enc = st->codec;
612
613 // find the video encoder
614 AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
615
616 if( codec && codec->pix_fmts )
617 {
618 const enum PixelFormat *p = codec->pix_fmts;
619 for( ; *p!=-1; p++ )
620 {
621 if( *p == video_enc->pix_fmt )
622 break;
623 }
624 if( *p == -1 )
625 video_enc->pix_fmt = codec->pix_fmts[ 0 ];
626 }
627
628 // Open the codec safely
629 return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
630 }
631
632 void close_video(AVFormatContext *oc, AVStream *st)
633 {
634 avcodec_close(st->codec);
635 }
636
637 static inline long time_difference( struct timeval *time1 )
638 {
639 struct timeval time2;
640 gettimeofday( &time2, NULL );
641 return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
642 }
643
644 /** The main thread - the argument is simply the consumer.
645 */
646
647 static void *consumer_thread( void *arg )
648 {
649 // Map the argument to the object
650 mlt_consumer this = arg;
651
652 // Get the properties
653 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
654
655 // Get the terminate on pause property
656 int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
657 int terminated = 0;
658
659 // Determine if feed is slow (for realtime stuff)
660 int real_time_output = mlt_properties_get_int( properties, "real_time" );
661
662 // Time structures
663 struct timeval ante;
664
665 // Get the frame rate
666 double fps = mlt_properties_get_double( properties, "fps" );
667
668 // Get width and height
669 int width = mlt_properties_get_int( properties, "width" );
670 int height = mlt_properties_get_int( properties, "height" );
671 int img_width = width;
672 int img_height = height;
673
674 // Get default audio properties
675 mlt_audio_format aud_fmt = mlt_audio_pcm;
676 int channels = mlt_properties_get_int( properties, "channels" );
677 int frequency = mlt_properties_get_int( properties, "frequency" );
678 int16_t *pcm = NULL;
679 int samples = 0;
680
681 // AVFormat audio buffer and frame size
682 int audio_outbuf_size = 10000;
683 uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
684 int audio_input_frame_size = 0;
685
686 // AVFormat video buffer and frame count
687 int frame_count = 0;
688 int video_outbuf_size = ( 1024 * 1024 );
689 uint8_t *video_outbuf = av_malloc( video_outbuf_size );
690
691 // Used for the frame properties
692 mlt_frame frame = NULL;
693 mlt_properties frame_properties = NULL;
694
695 // Get the queues
696 mlt_deque queue = mlt_properties_get_data( properties, "frame_queue", NULL );
697 sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
698
699 // Need two av pictures for converting
700 AVFrame *output = NULL;
701 AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
702
703 // For receiving images from an mlt_frame
704 uint8_t *image;
705 mlt_image_format img_fmt = mlt_image_yuv422;
706
707 // For receiving audio samples back from the fifo
708 int16_t *buffer = av_malloc( 48000 * 2 );
709 int count = 0;
710
711 // Allocate the context
712 AVFormatContext *oc = av_alloc_format_context( );
713
714 // Streams
715 AVStream *audio_st = NULL;
716 AVStream *video_st = NULL;
717
718 // Time stamps
719 double audio_pts = 0;
720 double video_pts = 0;
721
722 // Loop variable
723 int i;
724
725 // Frames despatched
726 long int frames = 0;
727 long int total_time = 0;
728
729 // Determine the format
730 AVOutputFormat *fmt = NULL;
731 char *filename = mlt_properties_get( properties, "target" );
732 char *format = mlt_properties_get( properties, "f" );
733 char *vcodec = mlt_properties_get( properties, "vcodec" );
734 char *acodec = mlt_properties_get( properties, "acodec" );
735
736 // Used to store and override codec ids
737 int audio_codec_id;
738 int video_codec_id;
739
740 // Check for user selected format first
741 if ( format != NULL )
742 fmt = guess_format( format, NULL, NULL );
743
744 // Otherwise check on the filename
745 if ( fmt == NULL && filename != NULL )
746 fmt = guess_format( NULL, filename, NULL );
747
748 // Otherwise default to mpeg
749 if ( fmt == NULL )
750 fmt = guess_format( "mpeg", NULL, NULL );
751
752 // We need a filename - default to stdout?
753 if ( filename == NULL || !strcmp( filename, "" ) )
754 filename = "pipe:";
755
756 // Get the codec ids selected
757 audio_codec_id = fmt->audio_codec;
758 video_codec_id = fmt->video_codec;
759
760 // Check for audio codec overides
761 if ( acodec != NULL )
762 {
763 AVCodec *p = first_avcodec;
764 while( p != NULL )
765 {
766 if ( !strcmp( p->name, acodec ) && p->type == CODEC_TYPE_AUDIO )
767 break;
768 p = p->next;
769 }
770 if ( p != NULL )
771 audio_codec_id = p->id;
772 else
773 fprintf( stderr, "%s: audio codec %s unrecognised - ignoring\n", __FILE__, acodec );
774 }
775
776 // Check for video codec overides
777 if ( vcodec != NULL )
778 {
779 AVCodec *p = first_avcodec;
780 while( p != NULL )
781 {
782 if ( !strcmp( p->name, vcodec ) && p->type == CODEC_TYPE_VIDEO )
783 break;
784 p = p->next;
785 }
786 if ( p != NULL )
787 video_codec_id = p->id;
788 else
789 fprintf( stderr, "%s: video codec %s unrecognised - ignoring\n", __FILE__, vcodec );
790 }
791
792 // Write metadata
793 char *tmp = NULL;
794 int metavalue;
795
796 tmp = mlt_properties_get( properties, "meta.attr.title.markup");
797 if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
798
799 tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
800 if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
801
802 tmp = mlt_properties_get( properties, "meta.attr.author.markup");
803 if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
804
805 tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
806 if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
807
808 tmp = mlt_properties_get( properties, "meta.attr.album.markup");
809 if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
810
811 metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
812 if (metavalue != 0) oc->year = metavalue;
813
814 metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
815 if (metavalue != 0) oc->track = metavalue;
816
817 oc->oformat = fmt;
818 snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
819
820 // Add audio and video streams
821 if ( fmt->video_codec != CODEC_ID_NONE )
822 video_st = add_video_stream( this, oc, video_codec_id );
823 if ( fmt->audio_codec != CODEC_ID_NONE )
824 audio_st = add_audio_stream( this, oc, audio_codec_id );
825
826 // Set the parameters (even though we have none...)
827 if ( av_set_parameters(oc, NULL) >= 0 )
828 {
829 oc->preload = ( int )( mlt_properties_get_double( properties, "muxpreload" ) * AV_TIME_BASE );
830 oc->max_delay= ( int )( mlt_properties_get_double( properties, "muxdelay" ) * AV_TIME_BASE );
831
832 // Process properties as AVOptions
833 apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM );
834
835 if ( video_st && !open_video( oc, video_st ) )
836 video_st = NULL;
837 if ( audio_st )
838 audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
839
840 // Open the output file, if needed
841 if ( !( fmt->flags & AVFMT_NOFILE ) )
842 {
843 if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 )
844 {
845 fprintf( stderr, "%s: Could not open '%s'\n", __FILE__, filename );
846 mlt_properties_set_int( properties, "running", 0 );
847 }
848 }
849
850 // Write the stream header, if any
851 if ( mlt_properties_get_int( properties, "running" ) )
852 av_write_header( oc );
853 }
854 else
855 {
856 fprintf( stderr, "%s: Invalid output format parameters\n", __FILE__ );
857 mlt_properties_set_int( properties, "running", 0 );
858 }
859
860 // Allocate picture
861 if ( video_st )
862 output = alloc_picture( video_st->codec->pix_fmt, width, height );
863
864 // Last check - need at least one stream
865 if ( audio_st == NULL && video_st == NULL )
866 mlt_properties_set_int( properties, "running", 0 );
867
868 // Get the starting time (can ignore the times above)
869 gettimeofday( &ante, NULL );
870
871 // Loop while running
872 while( mlt_properties_get_int( properties, "running" ) && !terminated )
873 {
874 // Get the frame
875 frame = mlt_consumer_rt_frame( this );
876
877 // Check that we have a frame to work with
878 if ( frame != NULL )
879 {
880 // Increment frames despatched
881 frames ++;
882
883 // Default audio args
884 frame_properties = MLT_FRAME_PROPERTIES( frame );
885
886 // Check for the terminated condition
887 terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
888
889 // Get audio and append to the fifo
890 if ( !terminated && audio_st )
891 {
892 samples = mlt_sample_calculator( fps, frequency, count ++ );
893 mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
894
895 // Create the fifo if we don't have one
896 if ( fifo == NULL )
897 {
898 fifo = sample_fifo_init( frequency, channels );
899 mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
900 }
901
902 if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
903 memset( pcm, 0, samples * channels * 2 );
904
905 // Append the samples
906 sample_fifo_append( fifo, pcm, samples * channels );
907 total_time += ( samples * 1000000 ) / frequency;
908 }
909
910 // Encode the image
911 if ( !terminated && video_st )
912 mlt_deque_push_back( queue, frame );
913 else
914 mlt_frame_close( frame );
915 }
916
917 // While we have stuff to process, process...
918 while ( 1 )
919 {
920 if (audio_st)
921 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
922 else
923 audio_pts = 0.0;
924
925 if (video_st)
926 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
927 else
928 video_pts = 0.0;
929
930 // Write interleaved audio and video frames
931 if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
932 {
933 if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) )
934 {
935 AVCodecContext *c;
936 AVPacket pkt;
937 av_init_packet( &pkt );
938
939 c = audio_st->codec;
940
941 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
942
943 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
944 // Write the compressed frame in the media file
945 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
946 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
947 pkt.flags |= PKT_FLAG_KEY;
948 pkt.stream_index= audio_st->index;
949 pkt.data= audio_outbuf;
950
951 if ( pkt.size )
952 if ( av_interleaved_write_frame( oc, &pkt ) != 0)
953 fprintf( stderr, "%s: Error while writing audio frame\n", __FILE__ );
954
955 audio_pts += c->frame_size;
956 }
957 else
958 {
959 break;
960 }
961 }
962 else if ( video_st )
963 {
964 if ( mlt_deque_count( queue ) )
965 {
966 int out_size, ret;
967 AVCodecContext *c;
968
969 frame = mlt_deque_pop_front( queue );
970 frame_properties = MLT_FRAME_PROPERTIES( frame );
971
972 c = video_st->codec;
973
974 if ( mlt_properties_get_int( frame_properties, "rendered" ) )
975 {
976 int i = 0;
977 int j = 0;
978 uint8_t *p;
979 uint8_t *q;
980
981 mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
982
983 mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
984
985 q = image;
986
987 // Convert the mlt frame to an AVPicture
988 for ( i = 0; i < height; i ++ )
989 {
990 p = input->data[ 0 ] + i * input->linesize[ 0 ];
991 j = width;
992 while( j -- )
993 {
994 *p ++ = *q ++;
995 *p ++ = *q ++;
996 }
997 }
998
999 // Do the colour space conversion
1000 #ifdef SWSCALE
1001 struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
1002 width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
1003 sws_scale( context, input->data, input->linesize, 0, height,
1004 output->data, output->linesize);
1005 sws_freeContext( context );
1006 #else
1007 img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
1008 #endif
1009
1010 // Apply the alpha if applicable
1011 if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
1012 {
1013 uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
1014 register int n;
1015
1016 for ( i = 0; i < height; i ++ )
1017 {
1018 n = ( width + 7 ) / 8;
1019 p = output->data[ 0 ] + i * output->linesize[ 0 ];
1020
1021 #ifndef __DARWIN__
1022 p += 3;
1023 #endif
1024
1025 switch( width % 8 )
1026 {
1027 case 0: do { *p = *alpha++; p += 4;
1028 case 7: *p = *alpha++; p += 4;
1029 case 6: *p = *alpha++; p += 4;
1030 case 5: *p = *alpha++; p += 4;
1031 case 4: *p = *alpha++; p += 4;
1032 case 3: *p = *alpha++; p += 4;
1033 case 2: *p = *alpha++; p += 4;
1034 case 1: *p = *alpha++; p += 4;
1035 }
1036 while( --n );
1037 }
1038 }
1039 }
1040 }
1041
1042 if (oc->oformat->flags & AVFMT_RAWPICTURE)
1043 {
1044 // raw video case. The API will change slightly in the near future for that
1045 AVPacket pkt;
1046 av_init_packet(&pkt);
1047
1048 pkt.flags |= PKT_FLAG_KEY;
1049 pkt.stream_index= video_st->index;
1050 pkt.data= (uint8_t *)output;
1051 pkt.size= sizeof(AVPicture);
1052
1053 ret = av_write_frame(oc, &pkt);
1054 video_pts += c->frame_size;
1055 }
1056 else
1057 {
1058 // Set the quality
1059 output->quality = video_st->quality;
1060
1061 // Set frame interlace hints
1062 output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
1063 output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
1064
1065 // Encode the image
1066 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
1067
1068 // If zero size, it means the image was buffered
1069 if (out_size > 0)
1070 {
1071 AVPacket pkt;
1072 av_init_packet( &pkt );
1073
1074 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1075 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1076 if( c->coded_frame && c->coded_frame->key_frame )
1077 pkt.flags |= PKT_FLAG_KEY;
1078 pkt.stream_index= video_st->index;
1079 pkt.data= video_outbuf;
1080 pkt.size= out_size;
1081
1082 // write the compressed frame in the media file
1083 ret = av_interleaved_write_frame(oc, &pkt);
1084 video_pts += c->frame_size;
1085
1086 // Dual pass logging
1087 if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out)
1088 fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out );
1089 }
1090 else
1091 {
1092 fprintf( stderr, "%s: error with video encode\n", __FILE__ );
1093 }
1094 }
1095 frame_count++;
1096 mlt_frame_close( frame );
1097 }
1098 else
1099 {
1100 break;
1101 }
1102 }
1103 }
1104
1105 if ( real_time_output == 1 && frames % 12 == 0 )
1106 {
1107 long passed = time_difference( &ante );
1108 if ( fifo != NULL )
1109 {
1110 long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000;
1111 passed -= pending;
1112 }
1113 if ( passed < total_time )
1114 {
1115 long total = ( total_time - passed );
1116 struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 };
1117 nanosleep( &t, NULL );
1118 }
1119 }
1120 }
1121
1122 #ifdef FLUSH
1123 if ( ! real_time_output )
1124 {
1125 // Flush audio fifo
1126 if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
1127 {
1128 AVCodecContext *c = audio_st->codec;
1129 AVPacket pkt;
1130 av_init_packet( &pkt );
1131 pkt.size = 0;
1132
1133 if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
1134 ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
1135 {
1136 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
1137 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
1138 }
1139 if ( pkt.size <= 0 )
1140 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
1141 if ( pkt.size <= 0 )
1142 break;
1143
1144 // Write the compressed frame in the media file
1145 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1146 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
1147 pkt.flags |= PKT_FLAG_KEY;
1148 pkt.stream_index = audio_st->index;
1149 pkt.data = audio_outbuf;
1150 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1151 {
1152 fprintf( stderr, "%s: Error while writing flushed audio frame\n", __FILE__ );
1153 break;
1154 }
1155 }
1156
1157 // Flush video
1158 if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
1159 {
1160 AVCodecContext *c = video_st->codec;
1161 AVPacket pkt;
1162 av_init_packet( &pkt );
1163
1164 // Encode the image
1165 pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
1166 if ( pkt.size <= 0 )
1167 break;
1168
1169 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1170 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1171 if( c->coded_frame && c->coded_frame->key_frame )
1172 pkt.flags |= PKT_FLAG_KEY;
1173 pkt.stream_index = video_st->index;
1174 pkt.data = video_outbuf;
1175
1176 // write the compressed frame in the media file
1177 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1178 {
1179 fprintf( stderr, "%s: Error while writing flushed video frame\n". __FILE__ );
1180 break;
1181 }
1182 }
1183 }
1184 #endif
1185
1186 // close each codec
1187 if (video_st)
1188 close_video(oc, video_st);
1189 if (audio_st)
1190 close_audio(oc, audio_st);
1191
1192 // Write the trailer, if any
1193 av_write_trailer(oc);
1194
1195 // Free the streams
1196 for(i = 0; i < oc->nb_streams; i++)
1197 av_freep(&oc->streams[i]);
1198
1199 // Close the output file
1200 if (!(fmt->flags & AVFMT_NOFILE))
1201 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
1202 url_fclose(oc->pb);
1203 #else
1204 url_fclose(&oc->pb);
1205 #endif
1206
1207 // Clean up input and output frames
1208 if ( output )
1209 av_free( output->data[0] );
1210 av_free( output );
1211 av_free( input->data[0] );
1212 av_free( input );
1213 av_free( video_outbuf );
1214 av_free( buffer );
1215
1216 // Free the stream
1217 av_free(oc);
1218
1219 // Just in case we terminated on pause
1220 mlt_properties_set_int( properties, "running", 0 );
1221
1222 mlt_consumer_stopped( this );
1223
1224 return NULL;
1225 }
1226
1227 /** Close the consumer.
1228 */
1229
1230 static void consumer_close( mlt_consumer this )
1231 {
1232 // Stop the consumer
1233 mlt_consumer_stop( this );
1234
1235 // Close the parent
1236 mlt_consumer_close( this );
1237
1238 // Free the memory
1239 free( this );
1240 }