add support for ffmpeg libswscale
[melted] / src / modules / avformat / consumer_avformat.c
1 /*
2 * consumer_avformat.c -- an encoder based on avformat
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 // Local header files
22 #include "consumer_avformat.h"
23
24 // mlt Header files
25 #include <framework/mlt_frame.h>
26
27 // System header files
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <pthread.h>
33 #include <sys/time.h>
34 #include <math.h>
35
36 // avformat header files
37 #include <avformat.h>
38 #ifdef SWSCALE
39 #include <swscale.h>
40 #endif
41
42 //
43 // This structure should be extended and made globally available in mlt
44 //
45
46 typedef struct
47 {
48 int16_t *buffer;
49 int size;
50 int used;
51 double time;
52 int frequency;
53 int channels;
54 }
55 *sample_fifo, sample_fifo_s;
56
57 sample_fifo sample_fifo_init( int frequency, int channels )
58 {
59 sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
60 this->frequency = frequency;
61 this->channels = channels;
62 return this;
63 }
64
65 // sample_fifo_clear and check are temporarily aborted (not working as intended)
66
67 void sample_fifo_clear( sample_fifo this, double time )
68 {
69 int words = ( float )( time - this->time ) * this->frequency * this->channels;
70 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
71 {
72 memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
73 this->used -= words;
74 this->time = time;
75 }
76 else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
77 {
78 this->used = 0;
79 this->time = time;
80 }
81 }
82
83 void sample_fifo_check( sample_fifo this, double time )
84 {
85 if ( this->used == 0 )
86 {
87 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
88 this->time = time;
89 }
90 }
91
92 void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
93 {
94 if ( ( this->size - this->used ) < count )
95 {
96 this->size += count * 5;
97 this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
98 }
99
100 memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
101 this->used += count;
102 }
103
104 int sample_fifo_used( sample_fifo this )
105 {
106 return this->used;
107 }
108
109 int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
110 {
111 if ( count > this->used )
112 count = this->used;
113
114 memcpy( samples, this->buffer, count * sizeof( int16_t ) );
115 this->used -= count;
116 memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
117
118 this->time += ( double )count / this->channels / this->frequency;
119
120 return count;
121 }
122
123 void sample_fifo_close( sample_fifo this )
124 {
125 free( this->buffer );
126 free( this );
127 }
128
129 // Forward references.
130 static int consumer_start( mlt_consumer this );
131 static int consumer_stop( mlt_consumer this );
132 static int consumer_is_stopped( mlt_consumer this );
133 static void *consumer_thread( void *arg );
134 static void consumer_close( mlt_consumer this );
135
136 /** Initialise the dv consumer.
137 */
138
139 mlt_consumer consumer_avformat_init( char *arg )
140 {
141 // Allocate the consumer
142 mlt_consumer this = mlt_consumer_new( );
143
144 // If memory allocated and initialises without error
145 if ( this != NULL )
146 {
147 // Get properties from the consumer
148 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
149
150 // Assign close callback
151 this->close = consumer_close;
152
153 // Interpret the argument
154 if ( arg != NULL )
155 mlt_properties_set( properties, "target", arg );
156
157 // sample and frame queue
158 mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL );
159
160 // Set avformat defaults (all lifted from ffmpeg.c)
161 mlt_properties_set_int( properties, "audio_bit_rate", 128000 );
162 mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 );
163 mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 );
164 mlt_properties_set_int( properties, "gop_size", 12 );
165 mlt_properties_set_int( properties, "b_frames", 0 );
166 mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
167 mlt_properties_set_double( properties, "qscale", 1 );
168 mlt_properties_set_int( properties, "me_method", ME_EPZS );
169 mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD );
170 mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD );
171 mlt_properties_set_int( properties, "sub_cmp", FF_CMP_SAD );
172 mlt_properties_set_int( properties, "cmp", FF_CMP_SAD );
173 mlt_properties_set_int( properties, "pre_cmp", FF_CMP_SAD );
174 mlt_properties_set_int( properties, "pre_me", 0 );
175 mlt_properties_set_double( properties, "lumi_mask", 0 );
176 mlt_properties_set_double( properties, "dark_mask", 0 );
177 mlt_properties_set_double( properties, "scplx_mask", 0 );
178 mlt_properties_set_double( properties, "tcplx_mask", 0 );
179 mlt_properties_set_double( properties, "p_mask", 0 );
180 mlt_properties_set_int( properties, "qns", 0 );
181 mlt_properties_set_int( properties, "video_qmin", 2 );
182 mlt_properties_set_int( properties, "video_qmax", 31 );
183 mlt_properties_set_int( properties, "video_lmin", 2*FF_QP2LAMBDA );
184 mlt_properties_set_int( properties, "video_lmax", 31*FF_QP2LAMBDA );
185 mlt_properties_set_int( properties, "video_mb_qmin", 2 );
186 mlt_properties_set_int( properties, "video_mb_qmax", 31 );
187 mlt_properties_set_int( properties, "video_qdiff", 3 );
188 mlt_properties_set_double( properties, "video_qblur", 0.5 );
189 mlt_properties_set_double( properties, "video_qcomp", 0.5 );
190 mlt_properties_set_int( properties, "video_rc_max_rate", 0 );
191 mlt_properties_set_int( properties, "video_rc_min_rate", 0 );
192 mlt_properties_set_int( properties, "video_rc_buffer_size", 0 );
193 mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 );
194 mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 );
195 mlt_properties_set_double( properties, "video_i_qfactor", 1.25 );
196 mlt_properties_set_double( properties, "video_b_qfactor", 1.25 );
197 mlt_properties_set_double( properties, "video_i_qoffset", -0.8 );
198 mlt_properties_set_double( properties, "video_b_qoffset", 0 );
199 mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS );
200 mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS );
201 mlt_properties_set_int( properties, "dct_algo", 0 );
202 mlt_properties_set_int( properties, "idct_algo", 0 );
203 mlt_properties_set_int( properties, "me_threshold", 0 );
204 mlt_properties_set_int( properties, "mb_threshold", 0 );
205 mlt_properties_set_int( properties, "intra_dc_precision", 0 );
206 mlt_properties_set_int( properties, "strict", 0 );
207 mlt_properties_set_int( properties, "error_rate", 0 );
208 mlt_properties_set_int( properties, "noise_reduction", 0 );
209 mlt_properties_set_int( properties, "sc_threshold", 0 );
210 mlt_properties_set_int( properties, "me_range", 0 );
211 mlt_properties_set_int( properties, "coder", 0 );
212 mlt_properties_set_int( properties, "context", 0 );
213 mlt_properties_set_int( properties, "predictor", 0 );
214
215 // Ensure termination at end of the stream
216 mlt_properties_set_int( properties, "terminate_on_pause", 1 );
217
218 // Set up start/stop/terminated callbacks
219 this->start = consumer_start;
220 this->stop = consumer_stop;
221 this->is_stopped = consumer_is_stopped;
222 }
223
224 // Return this
225 return this;
226 }
227
228 /** Start the consumer.
229 */
230
231 static int consumer_start( mlt_consumer this )
232 {
233 // Get the properties
234 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
235
236 // Check that we're not already running
237 if ( !mlt_properties_get_int( properties, "running" ) )
238 {
239 // Allocate a thread
240 pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
241
242 // Get the width and height
243 int width = mlt_properties_get_int( properties, "width" );
244 int height = mlt_properties_get_int( properties, "height" );
245
246 // Obtain the size property
247 char *size = mlt_properties_get( properties, "size" );
248
249 // Interpret it
250 if ( size != NULL )
251 {
252 int tw, th;
253 if ( sscanf( size, "%dx%d", &tw, &th ) == 2 && tw > 0 && th > 0 )
254 {
255 width = tw;
256 height = th;
257 }
258 else
259 {
260 fprintf( stderr, "consumer_avformat: Invalid size property %s - ignoring.\n", size );
261 }
262 }
263
264 // Now ensure we honour the multiple of two requested by libavformat
265 mlt_properties_set_int( properties, "width", ( width / 2 ) * 2 );
266 mlt_properties_set_int( properties, "height", ( height / 2 ) * 2 );
267
268 // Assign the thread to properties
269 mlt_properties_set_data( properties, "thread", thread, sizeof( pthread_t ), free, NULL );
270
271 // Set the running state
272 mlt_properties_set_int( properties, "running", 1 );
273
274 // Create the thread
275 pthread_create( thread, NULL, consumer_thread, this );
276 }
277 return 0;
278 }
279
280 /** Stop the consumer.
281 */
282
283 static int consumer_stop( mlt_consumer this )
284 {
285 // Get the properties
286 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
287
288 // Check that we're running
289 if ( mlt_properties_get_int( properties, "running" ) )
290 {
291 // Get the thread
292 pthread_t *thread = mlt_properties_get_data( properties, "thread", NULL );
293
294 // Stop the thread
295 mlt_properties_set_int( properties, "running", 0 );
296
297 // Wait for termination
298 pthread_join( *thread, NULL );
299 }
300
301 return 0;
302 }
303
304 /** Determine if the consumer is stopped.
305 */
306
307 static int consumer_is_stopped( mlt_consumer this )
308 {
309 // Get the properties
310 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
311 return !mlt_properties_get_int( properties, "running" );
312 }
313
314 /** Add an audio output stream
315 */
316
317 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
318 {
319 // Get the properties
320 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
321
322 // Create a new stream
323 AVStream *st = av_new_stream( oc, 1 );
324
325 // If created, then initialise from properties
326 if ( st != NULL )
327 {
328 AVCodecContext *c = st->codec;
329 c->codec_id = codec_id;
330 c->codec_type = CODEC_TYPE_AUDIO;
331
332 // Put sample parameters
333 c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" );
334 c->sample_rate = mlt_properties_get_int( properties, "frequency" );
335 c->channels = mlt_properties_get_int( properties, "channels" );
336
337 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
338 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
339
340 // Allow the user to override the audio fourcc
341 if ( mlt_properties_get( properties, "afourcc" ) )
342 {
343 char *tail = NULL;
344 char *arg = mlt_properties_get( properties, "afourcc" );
345 int tag = strtol( arg, &tail, 0);
346 if( !tail || *tail )
347 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
348 c->codec_tag = tag;
349 }
350 }
351 else
352 {
353 fprintf( stderr, "Could not allocate a stream for audio\n" );
354 }
355
356 return st;
357 }
358
359 static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
360 {
361 // We will return the audio input size from here
362 int audio_input_frame_size = 0;
363
364 // Get the context
365 AVCodecContext *c = st->codec;
366
367 // Find the encoder
368 AVCodec *codec = avcodec_find_encoder( c->codec_id );
369
370 // Continue if codec found and we can open it
371 if ( codec != NULL && avcodec_open(c, codec) >= 0 )
372 {
373 // ugly hack for PCM codecs (will be removed ASAP with new PCM
374 // support to compute the input frame size in samples
375 if ( c->frame_size <= 1 )
376 {
377 audio_input_frame_size = audio_outbuf_size / c->channels;
378 switch(st->codec->codec_id)
379 {
380 case CODEC_ID_PCM_S16LE:
381 case CODEC_ID_PCM_S16BE:
382 case CODEC_ID_PCM_U16LE:
383 case CODEC_ID_PCM_U16BE:
384 audio_input_frame_size >>= 1;
385 break;
386 default:
387 break;
388 }
389 }
390 else
391 {
392 audio_input_frame_size = c->frame_size;
393 }
394
395 // Some formats want stream headers to be seperate (hmm)
396 if( !strcmp( oc->oformat->name, "mp4" ) ||
397 !strcmp( oc->oformat->name, "mov" ) ||
398 !strcmp( oc->oformat->name, "3gp" ) )
399 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
400 }
401 else
402 {
403 fprintf( stderr, "Unable to encode audio - disabling audio output.\n" );
404 }
405
406 return audio_input_frame_size;
407 }
408
409 static void close_audio( AVFormatContext *oc, AVStream *st )
410 {
411 avcodec_close( st->codec );
412 }
413
414 /** Add a video output stream
415 */
416
417 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
418 {
419 // Get the properties
420 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
421
422 // Create a new stream
423 AVStream *st = av_new_stream( oc, 0 );
424
425 if ( st != NULL )
426 {
427 char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
428 double ar = mlt_properties_get_double( properties, "display_ratio" );
429 AVCodecContext *c = st->codec;
430 c->codec_id = codec_id;
431 c->codec_type = CODEC_TYPE_VIDEO;
432
433 // put sample parameters
434 c->bit_rate = mlt_properties_get_int( properties, "video_bit_rate" );
435 c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" );
436 c->width = mlt_properties_get_int( properties, "width" );
437 c->height = mlt_properties_get_int( properties, "height" );
438 c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
439 c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
440 c->gop_size = mlt_properties_get_int( properties, "gop_size" );
441 c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
442
443 if ( mlt_properties_get_int( properties, "b_frames" ) )
444 {
445 c->max_b_frames = mlt_properties_get_int( properties, "b_frames" );
446 c->b_frame_strategy = 0;
447 c->b_quant_factor = 2.0;
448 }
449
450 c->mb_decision = mlt_properties_get_int( properties, "mb_decision" );
451 c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
452 c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" );
453 c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" );
454 c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" );
455 c->me_cmp = mlt_properties_get_int( properties, "cmp" );
456 c->me_pre_cmp = mlt_properties_get_int( properties, "pre_cmp" );
457 c->pre_me = mlt_properties_get_int( properties, "pre_me" );
458 c->lumi_masking = mlt_properties_get_double( properties, "lumi_mask" );
459 c->dark_masking = mlt_properties_get_double( properties, "dark_mask" );
460 c->spatial_cplx_masking = mlt_properties_get_double( properties, "scplx_mask" );
461 c->temporal_cplx_masking = mlt_properties_get_double( properties, "tcplx_mask" );
462 c->p_masking = mlt_properties_get_double( properties, "p_mask" );
463 c->quantizer_noise_shaping= mlt_properties_get_int( properties, "qns" );
464 c->qmin = mlt_properties_get_int( properties, "video_qmin" );
465 c->qmax = mlt_properties_get_int( properties, "video_qmax" );
466 c->lmin = mlt_properties_get_int( properties, "video_lmin" );
467 c->lmax = mlt_properties_get_int( properties, "video_lmax" );
468 c->mb_qmin = mlt_properties_get_int( properties, "video_mb_qmin" );
469 c->mb_qmax = mlt_properties_get_int( properties, "video_mb_qmax" );
470 c->max_qdiff = mlt_properties_get_int( properties, "video_qdiff" );
471 c->qblur = mlt_properties_get_double( properties, "video_qblur" );
472 c->qcompress = mlt_properties_get_double( properties, "video_qcomp" );
473
474 if ( mlt_properties_get_double( properties, "qscale" ) > 0 )
475 {
476 c->flags |= CODEC_FLAG_QSCALE;
477 st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
478 }
479
480 // Allow the user to override the video fourcc
481 if ( mlt_properties_get( properties, "vfourcc" ) )
482 {
483 char *tail = NULL;
484 const char *arg = mlt_properties_get( properties, "vfourcc" );
485 int tag = strtol( arg, &tail, 0);
486 if( !tail || *tail )
487 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
488 c->codec_tag = tag;
489 }
490
491 // Some formats want stream headers to be seperate
492 if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
493 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
494
495 c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
496 c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" );
497 c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" );
498 c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" );
499 c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" );
500 c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" );
501 c->b_quant_factor = mlt_properties_get_double( properties, "video_b_qfactor" );
502 c->i_quant_offset = mlt_properties_get_double( properties, "video_i_qoffset" );
503 c->b_quant_offset = mlt_properties_get_double( properties, "video_b_qoffset" );
504 c->intra_quant_bias = mlt_properties_get_int( properties, "video_intra_quant_bias" );
505 c->inter_quant_bias = mlt_properties_get_int( properties, "video_inter_quant_bias" );
506 c->dct_algo = mlt_properties_get_int( properties, "dct_algo" );
507 c->idct_algo = mlt_properties_get_int( properties, "idct_algo" );
508 c->me_threshold= mlt_properties_get_int( properties, "me_threshold" );
509 c->mb_threshold= mlt_properties_get_int( properties, "mb_threshold" );
510 c->intra_dc_precision= mlt_properties_get_int( properties, "intra_dc_precision" );
511 c->strict_std_compliance = mlt_properties_get_int( properties, "strict" );
512 c->error_rate = mlt_properties_get_int( properties, "error_rate" );
513 c->noise_reduction= mlt_properties_get_int( properties, "noise_reduction" );
514 c->scenechange_threshold= mlt_properties_get_int( properties, "sc_threshold" );
515 c->me_range = mlt_properties_get_int( properties, "me_range" );
516 c->coder_type= mlt_properties_get_int( properties, "coder" );
517 c->context_model= mlt_properties_get_int( properties, "context" );
518 c->prediction_method= mlt_properties_get_int( properties, "predictor" );
519 c->me_method = mlt_properties_get_int( properties, "me_method" );
520 }
521 else
522 {
523 fprintf( stderr, "Could not allocate a stream for video\n" );
524 }
525
526 return st;
527 }
528
529 static AVFrame *alloc_picture( int pix_fmt, int width, int height )
530 {
531 // Allocate a frame
532 AVFrame *picture = avcodec_alloc_frame();
533
534 // Determine size of the
535 int size = avpicture_get_size(pix_fmt, width, height);
536
537 // Allocate the picture buf
538 uint8_t *picture_buf = av_malloc(size);
539
540 // If we have both, then fill the image
541 if ( picture != NULL && picture_buf != NULL )
542 {
543 // Fill the frame with the allocated buffer
544 avpicture_fill( (AVPicture *)picture, picture_buf, pix_fmt, width, height);
545 }
546 else
547 {
548 // Something failed - clean up what we can
549 av_free( picture );
550 av_free( picture_buf );
551 picture = NULL;
552 }
553
554 return picture;
555 }
556
557 static int open_video(AVFormatContext *oc, AVStream *st)
558 {
559 // Get the codec
560 AVCodecContext *video_enc = st->codec;
561
562 // find the video encoder
563 AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
564
565 if( codec && codec->pix_fmts )
566 {
567 const enum PixelFormat *p = codec->pix_fmts;
568 for( ; *p!=-1; p++ )
569 {
570 if( *p == video_enc->pix_fmt )
571 break;
572 }
573 if( *p == -1 )
574 video_enc->pix_fmt = codec->pix_fmts[ 0 ];
575 }
576
577 // Open the codec safely
578 return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
579 }
580
581 void close_video(AVFormatContext *oc, AVStream *st)
582 {
583 avcodec_close(st->codec);
584 }
585
586 static inline long time_difference( struct timeval *time1 )
587 {
588 struct timeval time2;
589 gettimeofday( &time2, NULL );
590 return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
591 }
592
593 /** The main thread - the argument is simply the consumer.
594 */
595
596 static void *consumer_thread( void *arg )
597 {
598 // Map the argument to the object
599 mlt_consumer this = arg;
600
601 // Get the properties
602 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
603
604 // Get the terminate on pause property
605 int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
606 int terminated = 0;
607
608 // Determine if feed is slow (for realtime stuff)
609 int real_time_output = mlt_properties_get_int( properties, "real_time" );
610
611 // Time structures
612 struct timeval ante;
613
614 // Get the frame rate
615 int fps = mlt_properties_get_double( properties, "fps" );
616
617 // Get width and height
618 int width = mlt_properties_get_int( properties, "width" );
619 int height = mlt_properties_get_int( properties, "height" );
620 int img_width = width;
621 int img_height = height;
622
623 // Get default audio properties
624 mlt_audio_format aud_fmt = mlt_audio_pcm;
625 int channels = mlt_properties_get_int( properties, "channels" );
626 int frequency = mlt_properties_get_int( properties, "frequency" );
627 int16_t *pcm = NULL;
628 int samples = 0;
629
630 // AVFormat audio buffer and frame size
631 int audio_outbuf_size = 10000;
632 uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
633 int audio_input_frame_size = 0;
634
635 // AVFormat video buffer and frame count
636 int frame_count = 0;
637 int video_outbuf_size = ( 1024 * 1024 );
638 uint8_t *video_outbuf = av_malloc( video_outbuf_size );
639
640 // Used for the frame properties
641 mlt_frame frame = NULL;
642 mlt_properties frame_properties = NULL;
643
644 // Get the queues
645 mlt_deque queue = mlt_properties_get_data( properties, "frame_queue", NULL );
646 sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
647
648 // Need two av pictures for converting
649 AVFrame *output = NULL;
650 AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
651
652 // For receiving images from an mlt_frame
653 uint8_t *image;
654 mlt_image_format img_fmt = mlt_image_yuv422;
655
656 // For receiving audio samples back from the fifo
657 int16_t *buffer = av_malloc( 48000 * 2 );
658 int count = 0;
659
660 // Allocate the context
661 AVFormatContext *oc = av_alloc_format_context( );
662
663 // Streams
664 AVStream *audio_st = NULL;
665 AVStream *video_st = NULL;
666
667 // Time stamps
668 double audio_pts = 0;
669 double video_pts = 0;
670
671 // Loop variable
672 int i;
673
674 // Frames despatched
675 long int frames = 0;
676 long int total_time = 0;
677
678 // Determine the format
679 AVOutputFormat *fmt = NULL;
680 char *filename = mlt_properties_get( properties, "target" );
681 char *format = mlt_properties_get( properties, "format" );
682 char *vcodec = mlt_properties_get( properties, "vcodec" );
683 char *acodec = mlt_properties_get( properties, "acodec" );
684
685 // Used to store and override codec ids
686 int audio_codec_id;
687 int video_codec_id;
688
689 // Check for user selected format first
690 if ( format != NULL )
691 fmt = guess_format( format, NULL, NULL );
692
693 // Otherwise check on the filename
694 if ( fmt == NULL && filename != NULL )
695 fmt = guess_format( NULL, filename, NULL );
696
697 // Otherwise default to mpeg
698 if ( fmt == NULL )
699 fmt = guess_format( "mpeg", NULL, NULL );
700
701 // We need a filename - default to stdout?
702 if ( filename == NULL || !strcmp( filename, "" ) )
703 filename = "pipe:";
704
705 // Get the codec ids selected
706 audio_codec_id = fmt->audio_codec;
707 video_codec_id = fmt->video_codec;
708
709 // Check for audio codec overides
710 if ( acodec != NULL )
711 {
712 AVCodec *p = first_avcodec;
713 while( p != NULL )
714 {
715 if ( !strcmp( p->name, acodec ) && p->type == CODEC_TYPE_AUDIO )
716 break;
717 p = p->next;
718 }
719 if ( p != NULL )
720 audio_codec_id = p->id;
721 else
722 fprintf( stderr, "consumer_avcodec: audio codec %s unrecognised - ignoring\n", acodec );
723 }
724
725 // Check for video codec overides
726 if ( vcodec != NULL )
727 {
728 AVCodec *p = first_avcodec;
729 while( p != NULL )
730 {
731 if ( !strcmp( p->name, vcodec ) && p->type == CODEC_TYPE_VIDEO )
732 break;
733 p = p->next;
734 }
735 if ( p != NULL )
736 video_codec_id = p->id;
737 else
738 fprintf( stderr, "consumer_avcodec: video codec %s unrecognised - ignoring\n", vcodec );
739 }
740
741 // Update the output context
742
743 // Write metadata
744 char *tmp = NULL;
745 int metavalue;
746
747 tmp = mlt_properties_get( properties, "meta.attr.title.markup");
748 if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
749
750 tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
751 if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
752
753 tmp = mlt_properties_get( properties, "meta.attr.author.markup");
754 if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
755
756 tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
757 if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
758
759 tmp = mlt_properties_get( properties, "meta.attr.album.markup");
760 if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
761
762 metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
763 if (metavalue != 0) oc->year = metavalue;
764
765 metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
766 if (metavalue != 0) oc->track = metavalue;
767
768 oc->oformat = fmt;
769 snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
770
771 // Add audio and video streams
772 if ( fmt->video_codec != CODEC_ID_NONE )
773 video_st = add_video_stream( this, oc, video_codec_id );
774 if ( fmt->audio_codec != CODEC_ID_NONE )
775 audio_st = add_audio_stream( this, oc, audio_codec_id );
776
777 // Set the parameters (even though we have none...)
778 if ( av_set_parameters(oc, NULL) >= 0 )
779 {
780 if ( video_st && !open_video( oc, video_st ) )
781 video_st = NULL;
782 if ( audio_st )
783 audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
784
785 // Open the output file, if needed
786 if ( !( fmt->flags & AVFMT_NOFILE ) )
787 {
788 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
789 {
790 fprintf(stderr, "Could not open '%s'\n", filename);
791 mlt_properties_set_int( properties, "running", 0 );
792 }
793 }
794
795 // Write the stream header, if any
796 if ( mlt_properties_get_int( properties, "running" ) )
797 av_write_header( oc );
798 }
799 else
800 {
801 fprintf(stderr, "Invalid output format parameters\n");
802 mlt_properties_set_int( properties, "running", 0 );
803 }
804
805 // Allocate picture
806 if ( video_st )
807 output = alloc_picture( video_st->codec->pix_fmt, width, height );
808
809 // Last check - need at least one stream
810 if ( audio_st == NULL && video_st == NULL )
811 mlt_properties_set_int( properties, "running", 0 );
812
813 // Get the starting time (can ignore the times above)
814 gettimeofday( &ante, NULL );
815
816 // Loop while running
817 while( mlt_properties_get_int( properties, "running" ) && !terminated )
818 {
819 // Get the frame
820 frame = mlt_consumer_rt_frame( this );
821
822 // Check that we have a frame to work with
823 if ( frame != NULL )
824 {
825 // Increment frames despatched
826 frames ++;
827
828 // Default audio args
829 frame_properties = MLT_FRAME_PROPERTIES( frame );
830
831 // Check for the terminated condition
832 terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
833
834 // Get audio and append to the fifo
835 if ( !terminated && audio_st )
836 {
837 samples = mlt_sample_calculator( fps, frequency, count ++ );
838 mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
839
840 // Create the fifo if we don't have one
841 if ( fifo == NULL )
842 {
843 fifo = sample_fifo_init( frequency, channels );
844 mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
845 }
846
847 if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
848 memset( pcm, 0, samples * channels * 2 );
849
850 // Append the samples
851 sample_fifo_append( fifo, pcm, samples * channels );
852 total_time += ( samples * 1000000 ) / frequency;
853 }
854
855 // Encode the image
856 if ( !terminated && video_st )
857 mlt_deque_push_back( queue, frame );
858 else
859 mlt_frame_close( frame );
860 }
861
862 // While we have stuff to process, process...
863 while ( 1 )
864 {
865 if (audio_st)
866 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
867 else
868 audio_pts = 0.0;
869
870 if (video_st)
871 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
872 else
873 video_pts = 0.0;
874
875 // Write interleaved audio and video frames
876 if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
877 {
878 if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) )
879 {
880 AVCodecContext *c;
881 AVPacket pkt;
882 av_init_packet( &pkt );
883
884 c = audio_st->codec;
885
886 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
887
888 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
889 // Write the compressed frame in the media file
890 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
891 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
892 pkt.flags |= PKT_FLAG_KEY;
893 pkt.stream_index= audio_st->index;
894 pkt.data= audio_outbuf;
895
896 if ( pkt.size )
897 if ( av_interleaved_write_frame( oc, &pkt ) != 0)
898 fprintf(stderr, "Error while writing audio frame\n");
899
900 audio_pts += c->frame_size;
901 }
902 else
903 {
904 break;
905 }
906 }
907 else if ( video_st )
908 {
909 if ( mlt_deque_count( queue ) )
910 {
911 int out_size, ret;
912 AVCodecContext *c;
913
914 frame = mlt_deque_pop_front( queue );
915 frame_properties = MLT_FRAME_PROPERTIES( frame );
916
917 c = video_st->codec;
918
919 if ( mlt_properties_get_int( frame_properties, "rendered" ) )
920 {
921 int i = 0;
922 int j = 0;
923 uint8_t *p;
924 uint8_t *q;
925
926 mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
927
928 mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
929
930 q = image;
931
932 // Convert the mlt frame to an AVPicture
933 for ( i = 0; i < height; i ++ )
934 {
935 p = input->data[ 0 ] + i * input->linesize[ 0 ];
936 j = width;
937 while( j -- )
938 {
939 *p ++ = *q ++;
940 *p ++ = *q ++;
941 }
942 }
943
944 // Do the colour space conversion
945 #ifdef SWSCALE
946 struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
947 width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
948 sws_scale( context, input->data, input->linesize, 0, height,
949 output->data, output->linesize);
950 sws_freeContext( context );
951 #else
952 img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
953 #endif
954
955 // Apply the alpha if applicable
956 if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
957 {
958 uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
959 register int n;
960
961 for ( i = 0; i < height; i ++ )
962 {
963 n = ( width + 7 ) / 8;
964 p = output->data[ 0 ] + i * output->linesize[ 0 ];
965
966 #ifndef __DARWIN__
967 p += 3;
968 #endif
969
970 switch( width % 8 )
971 {
972 case 0: do { *p = *alpha++; p += 4;
973 case 7: *p = *alpha++; p += 4;
974 case 6: *p = *alpha++; p += 4;
975 case 5: *p = *alpha++; p += 4;
976 case 4: *p = *alpha++; p += 4;
977 case 3: *p = *alpha++; p += 4;
978 case 2: *p = *alpha++; p += 4;
979 case 1: *p = *alpha++; p += 4;
980 }
981 while( --n );
982 }
983 }
984 }
985 }
986
987 if (oc->oformat->flags & AVFMT_RAWPICTURE)
988 {
989 // raw video case. The API will change slightly in the near future for that
990 AVPacket pkt;
991 av_init_packet(&pkt);
992
993 pkt.flags |= PKT_FLAG_KEY;
994 pkt.stream_index= video_st->index;
995 pkt.data= (uint8_t *)output;
996 pkt.size= sizeof(AVPicture);
997
998 ret = av_write_frame(oc, &pkt);
999 video_pts += c->frame_size;
1000 }
1001 else
1002 {
1003 // Set the quality
1004 output->quality = video_st->quality;
1005
1006 // Encode the image
1007 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
1008
1009 // If zero size, it means the image was buffered
1010 if (out_size > 0)
1011 {
1012 AVPacket pkt;
1013 av_init_packet( &pkt );
1014
1015 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1016 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1017 if( c->coded_frame && c->coded_frame->key_frame )
1018 pkt.flags |= PKT_FLAG_KEY;
1019 pkt.stream_index= video_st->index;
1020 pkt.data= video_outbuf;
1021 pkt.size= out_size;
1022
1023 // write the compressed frame in the media file
1024 ret = av_interleaved_write_frame(oc, &pkt);
1025 video_pts += c->frame_size;
1026 }
1027 else
1028 {
1029 fprintf( stderr, "Error with video encode\n" );
1030 }
1031 }
1032 frame_count++;
1033 mlt_frame_close( frame );
1034 }
1035 else
1036 {
1037 break;
1038 }
1039 }
1040 }
1041
1042 if ( real_time_output && frames % 12 == 0 )
1043 {
1044 long passed = time_difference( &ante );
1045 if ( fifo != NULL )
1046 {
1047 long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000;
1048 passed -= pending;
1049 }
1050 if ( passed < total_time )
1051 {
1052 long total = ( total_time - passed );
1053 struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 };
1054 nanosleep( &t, NULL );
1055 }
1056 }
1057 }
1058
1059 // close each codec
1060 if (video_st)
1061 close_video(oc, video_st);
1062 if (audio_st)
1063 close_audio(oc, audio_st);
1064
1065 // Write the trailer, if any
1066 av_write_trailer(oc);
1067
1068 // Free the streams
1069 for(i = 0; i < oc->nb_streams; i++)
1070 av_freep(&oc->streams[i]);
1071
1072 // Close the output file
1073 if (!(fmt->flags & AVFMT_NOFILE))
1074 url_fclose(&oc->pb);
1075
1076 // Clean up input and output frames
1077 if ( output )
1078 av_free( output->data[0] );
1079 av_free( output );
1080 av_free( input->data[0] );
1081 av_free( input );
1082 av_free( video_outbuf );
1083 av_free( buffer );
1084
1085 // Free the stream
1086 av_free(oc);
1087
1088 // Just in case we terminated on pause
1089 mlt_properties_set_int( properties, "running", 0 );
1090
1091 mlt_consumer_stopped( this );
1092
1093 return NULL;
1094 }
1095
1096 /** Close the consumer.
1097 */
1098
1099 static void consumer_close( mlt_consumer this )
1100 {
1101 // Stop the consumer
1102 mlt_consumer_stop( this );
1103
1104 // Close the parent
1105 mlt_consumer_close( this );
1106
1107 // Free the memory
1108 free( this );
1109 }