consumer_avformat.c:
[melted] / src / modules / avformat / consumer_avformat.c
1 /*
2 * consumer_avformat.c -- an encoder based on avformat
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 // mlt Header files
23 #include <framework/mlt_consumer.h>
24 #include <framework/mlt_frame.h>
25
26 // System header files
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <pthread.h>
32 #include <sys/time.h>
33 #include <math.h>
34
35 // avformat header files
36 #include <avformat.h>
37 #ifdef SWSCALE
38 #include <swscale.h>
39 #endif
40 #include <opt.h>
41
42 //
43 // This structure should be extended and made globally available in mlt
44 //
45
46 typedef struct
47 {
48 int16_t *buffer;
49 int size;
50 int used;
51 double time;
52 int frequency;
53 int channels;
54 }
55 *sample_fifo, sample_fifo_s;
56
57 sample_fifo sample_fifo_init( int frequency, int channels )
58 {
59 sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
60 this->frequency = frequency;
61 this->channels = channels;
62 return this;
63 }
64
65 // sample_fifo_clear and check are temporarily aborted (not working as intended)
66
67 void sample_fifo_clear( sample_fifo this, double time )
68 {
69 int words = ( float )( time - this->time ) * this->frequency * this->channels;
70 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
71 {
72 memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
73 this->used -= words;
74 this->time = time;
75 }
76 else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
77 {
78 this->used = 0;
79 this->time = time;
80 }
81 }
82
83 void sample_fifo_check( sample_fifo this, double time )
84 {
85 if ( this->used == 0 )
86 {
87 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
88 this->time = time;
89 }
90 }
91
92 void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
93 {
94 if ( ( this->size - this->used ) < count )
95 {
96 this->size += count * 5;
97 this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
98 }
99
100 memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
101 this->used += count;
102 }
103
104 int sample_fifo_used( sample_fifo this )
105 {
106 return this->used;
107 }
108
109 int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
110 {
111 if ( count > this->used )
112 count = this->used;
113
114 memcpy( samples, this->buffer, count * sizeof( int16_t ) );
115 this->used -= count;
116 memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
117
118 this->time += ( double )count / this->channels / this->frequency;
119
120 return count;
121 }
122
123 void sample_fifo_close( sample_fifo this )
124 {
125 free( this->buffer );
126 free( this );
127 }
128
129 // Forward references.
130 static int consumer_start( mlt_consumer this );
131 static int consumer_stop( mlt_consumer this );
132 static int consumer_is_stopped( mlt_consumer this );
133 static void *consumer_thread( void *arg );
134 static void consumer_close( mlt_consumer this );
135
136 /** Initialise the dv consumer.
137 */
138
139 mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
140 {
141 // Allocate the consumer
142 mlt_consumer this = mlt_consumer_new( profile );
143
144 // If memory allocated and initialises without error
145 if ( this != NULL )
146 {
147 // Get properties from the consumer
148 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
149
150 // Assign close callback
151 this->close = consumer_close;
152
153 // Interpret the argument
154 if ( arg != NULL )
155 mlt_properties_set( properties, "target", arg );
156
157 // sample and frame queue
158 mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL );
159
160 // Audio options not fully handled by AVOptions
161 #define QSCALE_NONE (-99999)
162 mlt_properties_set_int( properties, "aq", QSCALE_NONE );
163
164 // Video options not fully handled by AVOptions
165 mlt_properties_set_int( properties, "dc", 8 );
166
167 // Muxer options not fully handled by AVOptions
168 mlt_properties_set_double( properties, "muxdelay", 0.7 );
169 mlt_properties_set_double( properties, "muxpreload", 0.5 );
170
171 // Ensure termination at end of the stream
172 mlt_properties_set_int( properties, "terminate_on_pause", 1 );
173
174 // Default to separate processing threads for producer and consumer with no frame dropping!
175 mlt_properties_set_int( properties, "real_time", -1 );
176
177 // Set up start/stop/terminated callbacks
178 this->start = consumer_start;
179 this->stop = consumer_stop;
180 this->is_stopped = consumer_is_stopped;
181 }
182
183 // Return this
184 return this;
185 }
186
187 /** Start the consumer.
188 */
189
190 static int consumer_start( mlt_consumer this )
191 {
192 // Get the properties
193 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
194
195 // Check that we're not already running
196 if ( !mlt_properties_get_int( properties, "running" ) )
197 {
198 // Allocate a thread
199 pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
200
201 // Get the width and height
202 int width = mlt_properties_get_int( properties, "width" );
203 int height = mlt_properties_get_int( properties, "height" );
204
205 // Obtain the size property
206 char *size = mlt_properties_get( properties, "s" );
207
208 // Interpret it
209 if ( size != NULL )
210 {
211 int tw, th;
212 if ( sscanf( size, "%dx%d", &tw, &th ) == 2 && tw > 0 && th > 0 )
213 {
214 width = tw;
215 height = th;
216 }
217 else
218 {
219 fprintf( stderr, "%s: Invalid size property %s - ignoring.\n", __FILE__, size );
220 }
221 }
222
223 // Now ensure we honour the multiple of two requested by libavformat
224 width = ( width / 2 ) * 2;
225 height = ( height / 2 ) * 2;
226 mlt_properties_set_int( properties, "width", width );
227 mlt_properties_set_int( properties, "height", height );
228
229 // We need to set these on the profile as well because the s property is
230 // an alias to mlt properties that correspond to profile settings.
231 mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( this ) );
232 if ( profile )
233 {
234 profile->width = width;
235 profile->height = height;
236 }
237
238 // Handle the ffmpeg command line "-r" property for frame rate
239 if ( mlt_properties_get( properties, "r" ) )
240 {
241 double frame_rate = mlt_properties_get_double( properties, "r" );
242 AVRational rational = av_d2q( frame_rate, 255 );
243 mlt_properties_set_int( properties, "frame_rate_num", rational.num );
244 mlt_properties_set_int( properties, "frame_rate_den", rational.den );
245 if ( profile )
246 {
247 profile->frame_rate_num = rational.num;
248 profile->frame_rate_den = rational.den;
249 mlt_properties_set_double( properties, "fps", mlt_profile_fps( profile ) );
250 }
251 }
252
253 // Apply AVOptions that are synonyms for standard mlt_consumer options
254 if ( mlt_properties_get( properties, "ac" ) )
255 mlt_properties_set_int( properties, "channels", mlt_properties_get_int( properties, "ac" ) );
256 if ( mlt_properties_get( properties, "ar" ) )
257 mlt_properties_set_int( properties, "frequency", mlt_properties_get_int( properties, "ar" ) );
258
259 // Assign the thread to properties
260 mlt_properties_set_data( properties, "thread", thread, sizeof( pthread_t ), free, NULL );
261
262 // Set the running state
263 mlt_properties_set_int( properties, "running", 1 );
264
265 // Create the thread
266 pthread_create( thread, NULL, consumer_thread, this );
267 }
268 return 0;
269 }
270
271 /** Stop the consumer.
272 */
273
274 static int consumer_stop( mlt_consumer this )
275 {
276 // Get the properties
277 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
278
279 // Check that we're running
280 if ( mlt_properties_get_int( properties, "running" ) )
281 {
282 // Get the thread
283 pthread_t *thread = mlt_properties_get_data( properties, "thread", NULL );
284
285 // Stop the thread
286 mlt_properties_set_int( properties, "running", 0 );
287
288 // Wait for termination
289 pthread_join( *thread, NULL );
290 }
291
292 return 0;
293 }
294
295 /** Determine if the consumer is stopped.
296 */
297
298 static int consumer_is_stopped( mlt_consumer this )
299 {
300 // Get the properties
301 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
302 return !mlt_properties_get_int( properties, "running" );
303 }
304
305 /** Process properties as AVOptions and apply to AV context obj
306 */
307
308 static void apply_properties( void *obj, mlt_properties properties, int flags )
309 {
310 int i;
311 int count = mlt_properties_count( properties );
312 for ( i = 0; i < count; i++ )
313 {
314 const char *opt_name = mlt_properties_get_name( properties, i );
315 const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
316 if ( opt != NULL )
317 #if LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
318 av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
319 #else
320 av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
321 #endif
322 }
323 }
324
325 /** Add an audio output stream
326 */
327
328 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
329 {
330 // Get the properties
331 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
332
333 // Create a new stream
334 AVStream *st = av_new_stream( oc, 1 );
335
336 // If created, then initialise from properties
337 if ( st != NULL )
338 {
339 AVCodecContext *c = st->codec;
340
341 // Establish defaults from AVOptions
342 avcodec_get_context_defaults2( c, CODEC_TYPE_AUDIO );
343
344 c->codec_id = codec_id;
345 c->codec_type = CODEC_TYPE_AUDIO;
346
347 // Setup multi-threading
348 int thread_count = mlt_properties_get_int( properties, "threads" );
349 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
350 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
351 if ( thread_count > 1 )
352 avcodec_thread_init( c, thread_count );
353
354 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
355 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
356
357 // Allow the user to override the audio fourcc
358 if ( mlt_properties_get( properties, "atag" ) )
359 {
360 char *tail = NULL;
361 char *arg = mlt_properties_get( properties, "atag" );
362 int tag = strtol( arg, &tail, 0);
363 if( !tail || *tail )
364 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
365 c->codec_tag = tag;
366 }
367
368 // Process properties as AVOptions
369 apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
370
371 int audio_qscale = mlt_properties_get_int( properties, "aq" );
372 if ( audio_qscale > QSCALE_NONE )
373 {
374 c->flags |= CODEC_FLAG_QSCALE;
375 c->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale;
376 }
377
378 // Set parameters controlled by MLT
379 c->sample_rate = mlt_properties_get_int( properties, "frequency" );
380 c->channels = mlt_properties_get_int( properties, "channels" );
381
382 if ( mlt_properties_get( properties, "alang" ) != NULL )
383 strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) );
384 }
385 else
386 {
387 fprintf( stderr, "%s: Could not allocate a stream for audio\n", __FILE__ );
388 }
389
390 return st;
391 }
392
393 static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
394 {
395 // We will return the audio input size from here
396 int audio_input_frame_size = 0;
397
398 // Get the context
399 AVCodecContext *c = st->codec;
400
401 // Find the encoder
402 AVCodec *codec = avcodec_find_encoder( c->codec_id );
403
404 // Continue if codec found and we can open it
405 if ( codec != NULL && avcodec_open( c, codec ) >= 0 )
406 {
407 // ugly hack for PCM codecs (will be removed ASAP with new PCM
408 // support to compute the input frame size in samples
409 if ( c->frame_size <= 1 )
410 {
411 audio_input_frame_size = audio_outbuf_size / c->channels;
412 switch(st->codec->codec_id)
413 {
414 case CODEC_ID_PCM_S16LE:
415 case CODEC_ID_PCM_S16BE:
416 case CODEC_ID_PCM_U16LE:
417 case CODEC_ID_PCM_U16BE:
418 audio_input_frame_size >>= 1;
419 break;
420 default:
421 break;
422 }
423 }
424 else
425 {
426 audio_input_frame_size = c->frame_size;
427 }
428
429 // Some formats want stream headers to be seperate (hmm)
430 if( !strcmp( oc->oformat->name, "mp4" ) ||
431 !strcmp( oc->oformat->name, "mov" ) ||
432 !strcmp( oc->oformat->name, "3gp" ) )
433 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
434 }
435 else
436 {
437 fprintf( stderr, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ );
438 }
439
440 return audio_input_frame_size;
441 }
442
443 static void close_audio( AVFormatContext *oc, AVStream *st )
444 {
445 avcodec_close( st->codec );
446 }
447
448 /** Add a video output stream
449 */
450
451 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
452 {
453 // Get the properties
454 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
455
456 // Create a new stream
457 AVStream *st = av_new_stream( oc, 0 );
458
459 if ( st != NULL )
460 {
461 char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
462 AVCodecContext *c = st->codec;
463
464 // Establish defaults from AVOptions
465 avcodec_get_context_defaults2( c, CODEC_TYPE_VIDEO );
466
467 c->codec_id = codec_id;
468 c->codec_type = CODEC_TYPE_VIDEO;
469
470 // Setup multi-threading
471 int thread_count = mlt_properties_get_int( properties, "threads" );
472 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
473 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
474 if ( thread_count > 1 )
475 avcodec_thread_init( c, thread_count );
476
477 // Process properties as AVOptions
478 apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
479
480 // Set options controlled by MLT
481 c->width = mlt_properties_get_int( properties, "width" );
482 c->height = mlt_properties_get_int( properties, "height" );
483 c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
484 c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
485 c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
486
487 if ( codec_id == CODEC_ID_DVVIDEO )
488 {
489 // Compensate for FFmpeg's notion of DV aspect ratios, which are
490 // based upon a width of 704. Since we do not have a normaliser
491 // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
492 // we just coerce the values to facilitate a passive behaviour through
493 // the rescale normaliser when using equivalent producers and consumers.
494 // = display_aspect / (width * height)
495 double ar = mlt_properties_get_double( properties, "aspect_ratio" );
496 if ( ar == 8.0/9.0 ) // 4:3 NTSC
497 {
498 c->sample_aspect_ratio.num = 10;
499 c->sample_aspect_ratio.den = 11;
500 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
501 st->sample_aspect_ratio = c->sample_aspect_ratio;
502 #endif
503 }
504 else if ( ar == 16.0/15.0 ) // 4:3 PAL
505 {
506 c->sample_aspect_ratio.num = 159;
507 c->sample_aspect_ratio.den = 54;
508 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
509 st->sample_aspect_ratio = c->sample_aspect_ratio;
510 #endif
511 }
512 else if ( ar == 32.0/27.0 ) // 16:9 NTSC
513 {
514 c->sample_aspect_ratio.num = 40;
515 c->sample_aspect_ratio.den = 33;
516 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
517 st->sample_aspect_ratio = c->sample_aspect_ratio;
518 #endif
519 }
520 else // 16:9 PAL
521 {
522 c->sample_aspect_ratio.num = 118;
523 c->sample_aspect_ratio.den = 81;
524 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
525 st->sample_aspect_ratio = c->sample_aspect_ratio;
526 #endif
527 }
528 }
529 else if ( mlt_properties_get( properties, "aspect" ) )
530 {
531 // "-aspect" on ffmpeg command line is display aspect ratio
532 double ar = mlt_properties_get_double( properties, "aspect" );
533 AVRational rational = av_d2q( ar, 255 );
534
535 // Update the profile and properties as well since this is an alias
536 // for mlt properties that correspond to profile settings
537 mlt_properties_set_int( properties, "display_aspect_num", rational.num );
538 mlt_properties_set_int( properties, "display_aspect_den", rational.den );
539 mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( this ) );
540 if ( profile )
541 {
542 profile->display_aspect_num = rational.num;
543 profile->display_aspect_den = rational.den;
544 mlt_properties_set_double( properties, "display_ratio", mlt_profile_dar( profile ) );
545 }
546
547 // Now compute the sample aspect ratio
548 rational = av_d2q( ar * c->height / c->width, 255 );
549 c->sample_aspect_ratio = rational;
550 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
551 st->sample_aspect_ratio = c->sample_aspect_ratio;
552 #endif
553 // Update the profile and properties as well since this is an alias
554 // for mlt properties that correspond to profile settings
555 mlt_properties_set_int( properties, "sample_aspect_num", rational.num );
556 mlt_properties_set_int( properties, "sample_aspect_den", rational.den );
557 if ( profile )
558 {
559 profile->sample_aspect_num = rational.num;
560 profile->sample_aspect_den = rational.den;
561 mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile ) );
562 }
563
564 }
565 else
566 {
567 c->sample_aspect_ratio.num = mlt_properties_get_int( properties, "sample_aspect_num" );
568 c->sample_aspect_ratio.den = mlt_properties_get_int( properties, "sample_aspect_den" );
569 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
570 st->sample_aspect_ratio = c->sample_aspect_ratio;
571 #endif
572 }
573
574 if ( mlt_properties_get_double( properties, "qscale" ) > 0 )
575 {
576 c->flags |= CODEC_FLAG_QSCALE;
577 st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
578 }
579
580 // Allow the user to override the video fourcc
581 if ( mlt_properties_get( properties, "vtag" ) )
582 {
583 char *tail = NULL;
584 const char *arg = mlt_properties_get( properties, "vtag" );
585 int tag = strtol( arg, &tail, 0);
586 if( !tail || *tail )
587 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
588 c->codec_tag = tag;
589 }
590
591 // Some formats want stream headers to be seperate
592 if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
593 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
594
595 // Translate these standard mlt consumer properties to ffmpeg
596 if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
597 mlt_properties_get_int( properties, "deinterlace" ) == 0 )
598 {
599 if ( mlt_properties_get_int( properties, "ildct" ) )
600 c->flags |= CODEC_FLAG_INTERLACED_DCT;
601 if ( mlt_properties_get_int( properties, "ilme" ) )
602 c->flags |= CODEC_FLAG_INTERLACED_ME;
603 }
604
605 // parse the ratecontrol override string
606 int i;
607 char *rc_override = mlt_properties_get( properties, "rc_override" );
608 for ( i = 0; rc_override; i++ )
609 {
610 int start, end, q;
611 int e = sscanf( rc_override, "%d,%d,%d", &start, &end, &q );
612 if ( e != 3 )
613 fprintf( stderr, "%s: Error parsing rc_override\n", __FILE__ );
614 c->rc_override = av_realloc( c->rc_override, sizeof( RcOverride ) * ( i + 1 ) );
615 c->rc_override[i].start_frame = start;
616 c->rc_override[i].end_frame = end;
617 if ( q > 0 )
618 {
619 c->rc_override[i].qscale = q;
620 c->rc_override[i].quality_factor = 1.0;
621 }
622 else
623 {
624 c->rc_override[i].qscale = 0;
625 c->rc_override[i].quality_factor = -q / 100.0;
626 }
627 rc_override = strchr( rc_override, '/' );
628 if ( rc_override )
629 rc_override++;
630 }
631 c->rc_override_count = i;
632 if ( !c->rc_initial_buffer_occupancy )
633 c->rc_initial_buffer_occupancy = c->rc_buffer_size * 3/4;
634 c->intra_dc_precision = mlt_properties_get_int( properties, "dc" ) - 8;
635
636 // Setup dual-pass
637 i = mlt_properties_get_int( properties, "pass" );
638 if ( i == 1 )
639 c->flags |= CODEC_FLAG_PASS1;
640 else if ( i == 2 )
641 c->flags |= CODEC_FLAG_PASS2;
642 if ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) )
643 {
644 char logfilename[1024];
645 FILE *f;
646 int size;
647 char *logbuffer;
648
649 snprintf( logfilename, sizeof(logfilename), "%s_2pass.log",
650 mlt_properties_get( properties, "passlogfile" ) ? mlt_properties_get( properties, "passlogfile" ) : mlt_properties_get( properties, "target" ) );
651 if ( c->flags & CODEC_FLAG_PASS1 )
652 {
653 f = fopen( logfilename, "w" );
654 if ( !f )
655 perror( logfilename );
656 else
657 mlt_properties_set_data( properties, "_logfile", f, 0, ( mlt_destructor )fclose, NULL );
658 }
659 else
660 {
661 /* read the log file */
662 f = fopen( logfilename, "r" );
663 if ( !f )
664 {
665 perror(logfilename);
666 }
667 else
668 {
669 fseek( f, 0, SEEK_END );
670 size = ftell( f );
671 fseek( f, 0, SEEK_SET );
672 logbuffer = av_malloc( size + 1 );
673 if ( !logbuffer )
674 fprintf( stderr, "%s: Could not allocate log buffer\n", __FILE__ );
675 else
676 {
677 size = fread( logbuffer, 1, size, f );
678 fclose( f );
679 logbuffer[size] = '\0';
680 c->stats_in = logbuffer;
681 mlt_properties_set_data( properties, "_logbuffer", logbuffer, 0, ( mlt_destructor )av_free, NULL );
682 }
683 }
684 }
685 }
686 }
687 else
688 {
689 fprintf( stderr, "%s: Could not allocate a stream for video\n", __FILE__ );
690 }
691
692 return st;
693 }
694
695 static AVFrame *alloc_picture( int pix_fmt, int width, int height )
696 {
697 // Allocate a frame
698 AVFrame *picture = avcodec_alloc_frame();
699
700 // Determine size of the
701 int size = avpicture_get_size(pix_fmt, width, height);
702
703 // Allocate the picture buf
704 uint8_t *picture_buf = av_malloc(size);
705
706 // If we have both, then fill the image
707 if ( picture != NULL && picture_buf != NULL )
708 {
709 // Fill the frame with the allocated buffer
710 avpicture_fill( (AVPicture *)picture, picture_buf, pix_fmt, width, height);
711 }
712 else
713 {
714 // Something failed - clean up what we can
715 av_free( picture );
716 av_free( picture_buf );
717 picture = NULL;
718 }
719
720 return picture;
721 }
722
723 static int open_video(AVFormatContext *oc, AVStream *st)
724 {
725 // Get the codec
726 AVCodecContext *video_enc = st->codec;
727
728 // find the video encoder
729 AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
730
731 if( codec && codec->pix_fmts )
732 {
733 const enum PixelFormat *p = codec->pix_fmts;
734 for( ; *p!=-1; p++ )
735 {
736 if( *p == video_enc->pix_fmt )
737 break;
738 }
739 if( *p == -1 )
740 video_enc->pix_fmt = codec->pix_fmts[ 0 ];
741 }
742
743 // Open the codec safely
744 return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
745 }
746
747 void close_video(AVFormatContext *oc, AVStream *st)
748 {
749 avcodec_close(st->codec);
750 }
751
752 static inline long time_difference( struct timeval *time1 )
753 {
754 struct timeval time2;
755 gettimeofday( &time2, NULL );
756 return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
757 }
758
759 /** The main thread - the argument is simply the consumer.
760 */
761
762 static void *consumer_thread( void *arg )
763 {
764 // Map the argument to the object
765 mlt_consumer this = arg;
766
767 // Get the properties
768 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
769
770 // Get the terminate on pause property
771 int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
772 int terminated = 0;
773
774 // Determine if feed is slow (for realtime stuff)
775 int real_time_output = mlt_properties_get_int( properties, "real_time" );
776
777 // Time structures
778 struct timeval ante;
779
780 // Get the frame rate
781 double fps = mlt_properties_get_double( properties, "fps" );
782
783 // Get width and height
784 int width = mlt_properties_get_int( properties, "width" );
785 int height = mlt_properties_get_int( properties, "height" );
786 int img_width = width;
787 int img_height = height;
788
789 // Get default audio properties
790 mlt_audio_format aud_fmt = mlt_audio_pcm;
791 int channels = mlt_properties_get_int( properties, "channels" );
792 int frequency = mlt_properties_get_int( properties, "frequency" );
793 int16_t *pcm = NULL;
794 int samples = 0;
795
796 // AVFormat audio buffer and frame size
797 int audio_outbuf_size = 10000;
798 uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
799 int audio_input_frame_size = 0;
800
801 // AVFormat video buffer and frame count
802 int frame_count = 0;
803 int video_outbuf_size = ( 1024 * 1024 );
804 uint8_t *video_outbuf = av_malloc( video_outbuf_size );
805
806 // Used for the frame properties
807 mlt_frame frame = NULL;
808 mlt_properties frame_properties = NULL;
809
810 // Get the queues
811 mlt_deque queue = mlt_properties_get_data( properties, "frame_queue", NULL );
812 sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
813
814 // Need two av pictures for converting
815 AVFrame *output = NULL;
816 AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
817
818 // For receiving images from an mlt_frame
819 uint8_t *image;
820 mlt_image_format img_fmt = mlt_image_yuv422;
821
822 // For receiving audio samples back from the fifo
823 int16_t *buffer = av_malloc( 48000 * 2 );
824 int count = 0;
825
826 // Allocate the context
827 AVFormatContext *oc = av_alloc_format_context( );
828
829 // Streams
830 AVStream *audio_st = NULL;
831 AVStream *video_st = NULL;
832
833 // Time stamps
834 double audio_pts = 0;
835 double video_pts = 0;
836
837 // Loop variable
838 int i;
839
840 // Frames despatched
841 long int frames = 0;
842 long int total_time = 0;
843
844 // Determine the format
845 AVOutputFormat *fmt = NULL;
846 char *filename = mlt_properties_get( properties, "target" );
847 char *format = mlt_properties_get( properties, "f" );
848 char *vcodec = mlt_properties_get( properties, "vcodec" );
849 char *acodec = mlt_properties_get( properties, "acodec" );
850
851 // Used to store and override codec ids
852 int audio_codec_id;
853 int video_codec_id;
854
855 // Check for user selected format first
856 if ( format != NULL )
857 fmt = guess_format( format, NULL, NULL );
858
859 // Otherwise check on the filename
860 if ( fmt == NULL && filename != NULL )
861 fmt = guess_format( NULL, filename, NULL );
862
863 // Otherwise default to mpeg
864 if ( fmt == NULL )
865 fmt = guess_format( "mpeg", NULL, NULL );
866
867 // We need a filename - default to stdout?
868 if ( filename == NULL || !strcmp( filename, "" ) )
869 filename = "pipe:";
870
871 // Get the codec ids selected
872 audio_codec_id = fmt->audio_codec;
873 video_codec_id = fmt->video_codec;
874
875 // Check for audio codec overides
876 if ( acodec != NULL )
877 {
878 AVCodec *p = avcodec_find_encoder_by_name( acodec );
879 if ( p != NULL )
880 audio_codec_id = p->id;
881 else
882 fprintf( stderr, "%s: audio codec %s unrecognised - ignoring\n", __FILE__, acodec );
883 }
884
885 // Check for video codec overides
886 if ( vcodec != NULL )
887 {
888 AVCodec *p = avcodec_find_encoder_by_name( vcodec );
889 if ( p != NULL )
890 video_codec_id = p->id;
891 else
892 fprintf( stderr, "%s: video codec %s unrecognised - ignoring\n", __FILE__, vcodec );
893 }
894
895 // Write metadata
896 char *tmp = NULL;
897 int metavalue;
898
899 tmp = mlt_properties_get( properties, "meta.attr.title.markup");
900 if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
901
902 tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
903 if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
904
905 tmp = mlt_properties_get( properties, "meta.attr.author.markup");
906 if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
907
908 tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
909 if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
910
911 tmp = mlt_properties_get( properties, "meta.attr.album.markup");
912 if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
913
914 metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
915 if (metavalue != 0) oc->year = metavalue;
916
917 metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
918 if (metavalue != 0) oc->track = metavalue;
919
920 oc->oformat = fmt;
921 snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
922
923 // Add audio and video streams
924 if ( fmt->video_codec != CODEC_ID_NONE )
925 video_st = add_video_stream( this, oc, video_codec_id );
926 if ( fmt->audio_codec != CODEC_ID_NONE )
927 audio_st = add_audio_stream( this, oc, audio_codec_id );
928
929 // Set the parameters (even though we have none...)
930 if ( av_set_parameters(oc, NULL) >= 0 )
931 {
932 oc->preload = ( int )( mlt_properties_get_double( properties, "muxpreload" ) * AV_TIME_BASE );
933 oc->max_delay= ( int )( mlt_properties_get_double( properties, "muxdelay" ) * AV_TIME_BASE );
934
935 // Process properties as AVOptions
936 apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM );
937
938 if ( video_st && !open_video( oc, video_st ) )
939 video_st = NULL;
940 if ( audio_st )
941 audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
942
943 // Open the output file, if needed
944 if ( !( fmt->flags & AVFMT_NOFILE ) )
945 {
946 if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 )
947 {
948 fprintf( stderr, "%s: Could not open '%s'\n", __FILE__, filename );
949 mlt_properties_set_int( properties, "running", 0 );
950 }
951 }
952
953 // Write the stream header, if any
954 if ( mlt_properties_get_int( properties, "running" ) )
955 av_write_header( oc );
956 }
957 else
958 {
959 fprintf( stderr, "%s: Invalid output format parameters\n", __FILE__ );
960 mlt_properties_set_int( properties, "running", 0 );
961 }
962
963 // Allocate picture
964 if ( video_st )
965 output = alloc_picture( video_st->codec->pix_fmt, width, height );
966
967 // Last check - need at least one stream
968 if ( audio_st == NULL && video_st == NULL )
969 mlt_properties_set_int( properties, "running", 0 );
970
971 // Get the starting time (can ignore the times above)
972 gettimeofday( &ante, NULL );
973
974 // Loop while running
975 while( mlt_properties_get_int( properties, "running" ) && !terminated )
976 {
977 // Get the frame
978 frame = mlt_consumer_rt_frame( this );
979
980 // Check that we have a frame to work with
981 if ( frame != NULL )
982 {
983 // Increment frames despatched
984 frames ++;
985
986 // Default audio args
987 frame_properties = MLT_FRAME_PROPERTIES( frame );
988
989 // Check for the terminated condition
990 terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
991
992 // Get audio and append to the fifo
993 if ( !terminated && audio_st )
994 {
995 samples = mlt_sample_calculator( fps, frequency, count ++ );
996 mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
997
998 // Create the fifo if we don't have one
999 if ( fifo == NULL )
1000 {
1001 fifo = sample_fifo_init( frequency, channels );
1002 mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
1003 }
1004
1005 if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
1006 memset( pcm, 0, samples * channels * 2 );
1007
1008 // Append the samples
1009 sample_fifo_append( fifo, pcm, samples * channels );
1010 total_time += ( samples * 1000000 ) / frequency;
1011 }
1012
1013 // Encode the image
1014 if ( !terminated && video_st )
1015 mlt_deque_push_back( queue, frame );
1016 else
1017 mlt_frame_close( frame );
1018 }
1019
1020 // While we have stuff to process, process...
1021 while ( 1 )
1022 {
1023 if (audio_st)
1024 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
1025 else
1026 audio_pts = 0.0;
1027
1028 if (video_st)
1029 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
1030 else
1031 video_pts = 0.0;
1032
1033 // Write interleaved audio and video frames
1034 if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
1035 {
1036 if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) )
1037 {
1038 AVCodecContext *c;
1039 AVPacket pkt;
1040 av_init_packet( &pkt );
1041
1042 c = audio_st->codec;
1043
1044 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
1045
1046 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
1047 // Write the compressed frame in the media file
1048 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1049 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
1050 pkt.flags |= PKT_FLAG_KEY;
1051 pkt.stream_index= audio_st->index;
1052 pkt.data= audio_outbuf;
1053
1054 if ( pkt.size )
1055 if ( av_interleaved_write_frame( oc, &pkt ) != 0)
1056 fprintf( stderr, "%s: Error while writing audio frame\n", __FILE__ );
1057
1058 audio_pts += c->frame_size;
1059 }
1060 else
1061 {
1062 break;
1063 }
1064 }
1065 else if ( video_st )
1066 {
1067 if ( mlt_deque_count( queue ) )
1068 {
1069 int out_size, ret;
1070 AVCodecContext *c;
1071
1072 frame = mlt_deque_pop_front( queue );
1073 frame_properties = MLT_FRAME_PROPERTIES( frame );
1074
1075 c = video_st->codec;
1076
1077 if ( mlt_properties_get_int( frame_properties, "rendered" ) )
1078 {
1079 int i = 0;
1080 int j = 0;
1081 uint8_t *p;
1082 uint8_t *q;
1083
1084 mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
1085
1086 mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
1087
1088 q = image;
1089
1090 // Convert the mlt frame to an AVPicture
1091 for ( i = 0; i < height; i ++ )
1092 {
1093 p = input->data[ 0 ] + i * input->linesize[ 0 ];
1094 j = width;
1095 while( j -- )
1096 {
1097 *p ++ = *q ++;
1098 *p ++ = *q ++;
1099 }
1100 }
1101
1102 // Do the colour space conversion
1103 #ifdef SWSCALE
1104 struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
1105 width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
1106 sws_scale( context, input->data, input->linesize, 0, height,
1107 output->data, output->linesize);
1108 sws_freeContext( context );
1109 #else
1110 img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
1111 #endif
1112
1113 // Apply the alpha if applicable
1114 if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
1115 {
1116 uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
1117 register int n;
1118
1119 for ( i = 0; i < height; i ++ )
1120 {
1121 n = ( width + 7 ) / 8;
1122 p = output->data[ 0 ] + i * output->linesize[ 0 ];
1123
1124 #ifndef __DARWIN__
1125 p += 3;
1126 #endif
1127
1128 switch( width % 8 )
1129 {
1130 case 0: do { *p = *alpha++; p += 4;
1131 case 7: *p = *alpha++; p += 4;
1132 case 6: *p = *alpha++; p += 4;
1133 case 5: *p = *alpha++; p += 4;
1134 case 4: *p = *alpha++; p += 4;
1135 case 3: *p = *alpha++; p += 4;
1136 case 2: *p = *alpha++; p += 4;
1137 case 1: *p = *alpha++; p += 4;
1138 }
1139 while( --n );
1140 }
1141 }
1142 }
1143 }
1144
1145 if (oc->oformat->flags & AVFMT_RAWPICTURE)
1146 {
1147 // raw video case. The API will change slightly in the near future for that
1148 AVPacket pkt;
1149 av_init_packet(&pkt);
1150
1151 pkt.flags |= PKT_FLAG_KEY;
1152 pkt.stream_index= video_st->index;
1153 pkt.data= (uint8_t *)output;
1154 pkt.size= sizeof(AVPicture);
1155
1156 ret = av_write_frame(oc, &pkt);
1157 video_pts += c->frame_size;
1158 }
1159 else
1160 {
1161 // Set the quality
1162 output->quality = video_st->quality;
1163
1164 // Set frame interlace hints
1165 output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
1166 output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
1167
1168 // Encode the image
1169 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
1170
1171 // If zero size, it means the image was buffered
1172 if (out_size > 0)
1173 {
1174 AVPacket pkt;
1175 av_init_packet( &pkt );
1176
1177 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1178 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1179 if( c->coded_frame && c->coded_frame->key_frame )
1180 pkt.flags |= PKT_FLAG_KEY;
1181 pkt.stream_index= video_st->index;
1182 pkt.data= video_outbuf;
1183 pkt.size= out_size;
1184
1185 // write the compressed frame in the media file
1186 ret = av_interleaved_write_frame(oc, &pkt);
1187 video_pts += c->frame_size;
1188
1189 // Dual pass logging
1190 if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out)
1191 fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out );
1192 }
1193 else
1194 {
1195 fprintf( stderr, "%s: error with video encode\n", __FILE__ );
1196 }
1197 }
1198 frame_count++;
1199 mlt_frame_close( frame );
1200 }
1201 else
1202 {
1203 break;
1204 }
1205 }
1206 }
1207
1208 if ( real_time_output == 1 && frames % 12 == 0 )
1209 {
1210 long passed = time_difference( &ante );
1211 if ( fifo != NULL )
1212 {
1213 long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000;
1214 passed -= pending;
1215 }
1216 if ( passed < total_time )
1217 {
1218 long total = ( total_time - passed );
1219 struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 };
1220 nanosleep( &t, NULL );
1221 }
1222 }
1223 }
1224
1225 #ifdef FLUSH
1226 if ( ! real_time_output )
1227 {
1228 // Flush audio fifo
1229 if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
1230 {
1231 AVCodecContext *c = audio_st->codec;
1232 AVPacket pkt;
1233 av_init_packet( &pkt );
1234 pkt.size = 0;
1235
1236 if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
1237 ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
1238 {
1239 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
1240 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
1241 }
1242 if ( pkt.size <= 0 )
1243 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
1244 if ( pkt.size <= 0 )
1245 break;
1246
1247 // Write the compressed frame in the media file
1248 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1249 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
1250 pkt.flags |= PKT_FLAG_KEY;
1251 pkt.stream_index = audio_st->index;
1252 pkt.data = audio_outbuf;
1253 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1254 {
1255 fprintf( stderr, "%s: Error while writing flushed audio frame\n", __FILE__ );
1256 break;
1257 }
1258 }
1259
1260 // Flush video
1261 if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
1262 {
1263 AVCodecContext *c = video_st->codec;
1264 AVPacket pkt;
1265 av_init_packet( &pkt );
1266
1267 // Encode the image
1268 pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
1269 if ( pkt.size <= 0 )
1270 break;
1271
1272 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1273 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1274 if( c->coded_frame && c->coded_frame->key_frame )
1275 pkt.flags |= PKT_FLAG_KEY;
1276 pkt.stream_index = video_st->index;
1277 pkt.data = video_outbuf;
1278
1279 // write the compressed frame in the media file
1280 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1281 {
1282 fprintf( stderr, "%s: Error while writing flushed video frame\n". __FILE__ );
1283 break;
1284 }
1285 }
1286 }
1287 #endif
1288
1289 // close each codec
1290 if (video_st)
1291 close_video(oc, video_st);
1292 if (audio_st)
1293 close_audio(oc, audio_st);
1294
1295 // Write the trailer, if any
1296 av_write_trailer(oc);
1297
1298 // Free the streams
1299 for(i = 0; i < oc->nb_streams; i++)
1300 av_freep(&oc->streams[i]);
1301
1302 // Close the output file
1303 if (!(fmt->flags & AVFMT_NOFILE))
1304 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
1305 url_fclose(oc->pb);
1306 #else
1307 url_fclose(&oc->pb);
1308 #endif
1309
1310 // Clean up input and output frames
1311 if ( output )
1312 av_free( output->data[0] );
1313 av_free( output );
1314 av_free( input->data[0] );
1315 av_free( input );
1316 av_free( video_outbuf );
1317 av_free( buffer );
1318
1319 // Free the stream
1320 av_free(oc);
1321
1322 // Just in case we terminated on pause
1323 mlt_properties_set_int( properties, "running", 0 );
1324
1325 mlt_consumer_stopped( this );
1326
1327 return NULL;
1328 }
1329
1330 /** Close the consumer.
1331 */
1332
1333 static void consumer_close( mlt_consumer this )
1334 {
1335 // Stop the consumer
1336 mlt_consumer_stop( this );
1337
1338 // Close the parent
1339 mlt_consumer_close( this );
1340
1341 // Free the memory
1342 free( this );
1343 }