framework: remove global profile, rather share one mlt_profile across a service netwo...
[melted] / src / modules / avformat / consumer_avformat.c
1 /*
2 * consumer_avformat.c -- an encoder based on avformat
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 // mlt Header files
22 #include <framework/mlt_consumer.h>
23 #include <framework/mlt_frame.h>
24
25 // System header files
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <limits.h>
30 #include <pthread.h>
31 #include <sys/time.h>
32 #include <math.h>
33
34 // avformat header files
35 #include <avformat.h>
36 #ifdef SWSCALE
37 #include <swscale.h>
38 #endif
39
40 //
41 // This structure should be extended and made globally available in mlt
42 //
43
44 typedef struct
45 {
46 int16_t *buffer;
47 int size;
48 int used;
49 double time;
50 int frequency;
51 int channels;
52 }
53 *sample_fifo, sample_fifo_s;
54
55 sample_fifo sample_fifo_init( int frequency, int channels )
56 {
57 sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
58 this->frequency = frequency;
59 this->channels = channels;
60 return this;
61 }
62
63 // sample_fifo_clear and check are temporarily aborted (not working as intended)
64
65 void sample_fifo_clear( sample_fifo this, double time )
66 {
67 int words = ( float )( time - this->time ) * this->frequency * this->channels;
68 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
69 {
70 memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
71 this->used -= words;
72 this->time = time;
73 }
74 else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
75 {
76 this->used = 0;
77 this->time = time;
78 }
79 }
80
81 void sample_fifo_check( sample_fifo this, double time )
82 {
83 if ( this->used == 0 )
84 {
85 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
86 this->time = time;
87 }
88 }
89
90 void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
91 {
92 if ( ( this->size - this->used ) < count )
93 {
94 this->size += count * 5;
95 this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
96 }
97
98 memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
99 this->used += count;
100 }
101
102 int sample_fifo_used( sample_fifo this )
103 {
104 return this->used;
105 }
106
107 int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
108 {
109 if ( count > this->used )
110 count = this->used;
111
112 memcpy( samples, this->buffer, count * sizeof( int16_t ) );
113 this->used -= count;
114 memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
115
116 this->time += ( double )count / this->channels / this->frequency;
117
118 return count;
119 }
120
121 void sample_fifo_close( sample_fifo this )
122 {
123 free( this->buffer );
124 free( this );
125 }
126
127 // Forward references.
128 static int consumer_start( mlt_consumer this );
129 static int consumer_stop( mlt_consumer this );
130 static int consumer_is_stopped( mlt_consumer this );
131 static void *consumer_thread( void *arg );
132 static void consumer_close( mlt_consumer this );
133
134 /** Initialise the dv consumer.
135 */
136
137 mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
138 {
139 // Allocate the consumer
140 mlt_consumer this = mlt_consumer_new( profile );
141
142 // If memory allocated and initialises without error
143 if ( this != NULL )
144 {
145 // Get properties from the consumer
146 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
147
148 // Assign close callback
149 this->close = consumer_close;
150
151 // Interpret the argument
152 if ( arg != NULL )
153 mlt_properties_set( properties, "target", arg );
154
155 // sample and frame queue
156 mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL );
157
158 // Set avformat defaults (all lifted from ffmpeg.c)
159 mlt_properties_set_int( properties, "audio_bit_rate", 128000 );
160 mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 );
161 mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 );
162 mlt_properties_set_int( properties, "gop_size", 12 );
163 mlt_properties_set_int( properties, "b_frames", 0 );
164 mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
165 mlt_properties_set_double( properties, "qscale", 0 );
166 mlt_properties_set_int( properties, "me_method", ME_EPZS );
167 mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD );
168 mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD );
169 mlt_properties_set_int( properties, "sub_cmp", FF_CMP_SAD );
170 mlt_properties_set_int( properties, "cmp", FF_CMP_SAD );
171 mlt_properties_set_int( properties, "pre_cmp", FF_CMP_SAD );
172 mlt_properties_set_int( properties, "pre_me", 0 );
173 mlt_properties_set_double( properties, "lumi_mask", 0 );
174 mlt_properties_set_double( properties, "dark_mask", 0 );
175 mlt_properties_set_double( properties, "scplx_mask", 0 );
176 mlt_properties_set_double( properties, "tcplx_mask", 0 );
177 mlt_properties_set_double( properties, "p_mask", 0 );
178 mlt_properties_set_int( properties, "qns", 0 );
179 mlt_properties_set_int( properties, "video_qmin", 2 );
180 mlt_properties_set_int( properties, "video_qmax", 31 );
181 mlt_properties_set_int( properties, "video_lmin", 2*FF_QP2LAMBDA );
182 mlt_properties_set_int( properties, "video_lmax", 31*FF_QP2LAMBDA );
183 mlt_properties_set_int( properties, "video_mb_qmin", 2 );
184 mlt_properties_set_int( properties, "video_mb_qmax", 31 );
185 mlt_properties_set_int( properties, "video_qdiff", 3 );
186 mlt_properties_set_double( properties, "video_qblur", 0.5 );
187 mlt_properties_set_double( properties, "video_qcomp", 0.5 );
188 mlt_properties_set_int( properties, "video_rc_max_rate", 0 );
189 mlt_properties_set_int( properties, "video_rc_min_rate", 0 );
190 mlt_properties_set_int( properties, "video_rc_buffer_size", 0 );
191 mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 );
192 mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 );
193 mlt_properties_set_double( properties, "video_i_qfactor", -0.8 );
194 mlt_properties_set_double( properties, "video_b_qfactor", 1.25 );
195 mlt_properties_set_double( properties, "video_i_qoffset", 0 );
196 mlt_properties_set_double( properties, "video_b_qoffset", 1.25 );
197 mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS );
198 mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS );
199 mlt_properties_set_int( properties, "dct_algo", 0 );
200 mlt_properties_set_int( properties, "idct_algo", 0 );
201 mlt_properties_set_int( properties, "me_threshold", 0 );
202 mlt_properties_set_int( properties, "mb_threshold", 0 );
203 mlt_properties_set_int( properties, "intra_dc_precision", 0 );
204 mlt_properties_set_int( properties, "strict", 0 );
205 mlt_properties_set_int( properties, "error_rate", 0 );
206 mlt_properties_set_int( properties, "noise_reduction", 0 );
207 mlt_properties_set_int( properties, "sc_threshold", 0 );
208 mlt_properties_set_int( properties, "me_range", 0 );
209 mlt_properties_set_int( properties, "coder", 0 );
210 mlt_properties_set_int( properties, "context", 0 );
211 mlt_properties_set_int( properties, "predictor", 0 );
212 mlt_properties_set_int( properties, "ildct", 0 );
213 mlt_properties_set_int( properties, "ilme", 0 );
214
215 // Ensure termination at end of the stream
216 mlt_properties_set_int( properties, "terminate_on_pause", 1 );
217
218 // Set up start/stop/terminated callbacks
219 this->start = consumer_start;
220 this->stop = consumer_stop;
221 this->is_stopped = consumer_is_stopped;
222 }
223
224 // Return this
225 return this;
226 }
227
228 /** Start the consumer.
229 */
230
231 static int consumer_start( mlt_consumer this )
232 {
233 // Get the properties
234 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
235
236 // Check that we're not already running
237 if ( !mlt_properties_get_int( properties, "running" ) )
238 {
239 // Allocate a thread
240 pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
241
242 // Get the width and height
243 int width = mlt_properties_get_int( properties, "width" );
244 int height = mlt_properties_get_int( properties, "height" );
245
246 // Obtain the size property
247 char *size = mlt_properties_get( properties, "size" );
248
249 // Interpret it
250 if ( size != NULL )
251 {
252 int tw, th;
253 if ( sscanf( size, "%dx%d", &tw, &th ) == 2 && tw > 0 && th > 0 )
254 {
255 width = tw;
256 height = th;
257 }
258 else
259 {
260 fprintf( stderr, "consumer_avformat: Invalid size property %s - ignoring.\n", size );
261 }
262 }
263
264 // Now ensure we honour the multiple of two requested by libavformat
265 mlt_properties_set_int( properties, "width", ( width / 2 ) * 2 );
266 mlt_properties_set_int( properties, "height", ( height / 2 ) * 2 );
267
268 // Assign the thread to properties
269 mlt_properties_set_data( properties, "thread", thread, sizeof( pthread_t ), free, NULL );
270
271 // Set the running state
272 mlt_properties_set_int( properties, "running", 1 );
273
274 // Create the thread
275 pthread_create( thread, NULL, consumer_thread, this );
276 }
277 return 0;
278 }
279
280 /** Stop the consumer.
281 */
282
283 static int consumer_stop( mlt_consumer this )
284 {
285 // Get the properties
286 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
287
288 // Check that we're running
289 if ( mlt_properties_get_int( properties, "running" ) )
290 {
291 // Get the thread
292 pthread_t *thread = mlt_properties_get_data( properties, "thread", NULL );
293
294 // Stop the thread
295 mlt_properties_set_int( properties, "running", 0 );
296
297 // Wait for termination
298 pthread_join( *thread, NULL );
299 }
300
301 return 0;
302 }
303
304 /** Determine if the consumer is stopped.
305 */
306
307 static int consumer_is_stopped( mlt_consumer this )
308 {
309 // Get the properties
310 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
311 return !mlt_properties_get_int( properties, "running" );
312 }
313
314 /** Add an audio output stream
315 */
316
317 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
318 {
319 // Get the properties
320 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
321
322 // Create a new stream
323 AVStream *st = av_new_stream( oc, 1 );
324
325 // If created, then initialise from properties
326 if ( st != NULL )
327 {
328 AVCodecContext *c = st->codec;
329 c->codec_id = codec_id;
330 c->codec_type = CODEC_TYPE_AUDIO;
331
332 // Put sample parameters
333 c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" );
334 c->sample_rate = mlt_properties_get_int( properties, "frequency" );
335 c->channels = mlt_properties_get_int( properties, "channels" );
336
337 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
338 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
339
340 // Allow the user to override the audio fourcc
341 if ( mlt_properties_get( properties, "afourcc" ) )
342 {
343 char *tail = NULL;
344 char *arg = mlt_properties_get( properties, "afourcc" );
345 int tag = strtol( arg, &tail, 0);
346 if( !tail || *tail )
347 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
348 c->codec_tag = tag;
349 }
350 }
351 else
352 {
353 fprintf( stderr, "Could not allocate a stream for audio\n" );
354 }
355
356 return st;
357 }
358
359 static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
360 {
361 // We will return the audio input size from here
362 int audio_input_frame_size = 0;
363
364 // Get the context
365 AVCodecContext *c = st->codec;
366
367 // Find the encoder
368 AVCodec *codec = avcodec_find_encoder( c->codec_id );
369
370 // Continue if codec found and we can open it
371 if ( codec != NULL && avcodec_open(c, codec) >= 0 )
372 {
373 // ugly hack for PCM codecs (will be removed ASAP with new PCM
374 // support to compute the input frame size in samples
375 if ( c->frame_size <= 1 )
376 {
377 audio_input_frame_size = audio_outbuf_size / c->channels;
378 switch(st->codec->codec_id)
379 {
380 case CODEC_ID_PCM_S16LE:
381 case CODEC_ID_PCM_S16BE:
382 case CODEC_ID_PCM_U16LE:
383 case CODEC_ID_PCM_U16BE:
384 audio_input_frame_size >>= 1;
385 break;
386 default:
387 break;
388 }
389 }
390 else
391 {
392 audio_input_frame_size = c->frame_size;
393 }
394
395 // Some formats want stream headers to be seperate (hmm)
396 if( !strcmp( oc->oformat->name, "mp4" ) ||
397 !strcmp( oc->oformat->name, "mov" ) ||
398 !strcmp( oc->oformat->name, "3gp" ) )
399 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
400 }
401 else
402 {
403 fprintf( stderr, "Unable to encode audio - disabling audio output.\n" );
404 }
405
406 return audio_input_frame_size;
407 }
408
409 static void close_audio( AVFormatContext *oc, AVStream *st )
410 {
411 avcodec_close( st->codec );
412 }
413
414 /** Add a video output stream
415 */
416
417 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
418 {
419 // Get the properties
420 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
421
422 // Create a new stream
423 AVStream *st = av_new_stream( oc, 0 );
424
425 if ( st != NULL )
426 {
427 char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
428 double ar = mlt_properties_get_double( properties, "display_ratio" );
429 AVCodecContext *c = st->codec;
430 c->codec_id = codec_id;
431 c->codec_type = CODEC_TYPE_VIDEO;
432
433 // put sample parameters
434 c->bit_rate = mlt_properties_get_int( properties, "video_bit_rate" );
435 c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" );
436 c->width = mlt_properties_get_int( properties, "width" );
437 c->height = mlt_properties_get_int( properties, "height" );
438 c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
439 c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
440 c->gop_size = mlt_properties_get_int( properties, "gop_size" );
441 c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
442
443 if ( mlt_properties_get_int( properties, "b_frames" ) )
444 {
445 c->max_b_frames = mlt_properties_get_int( properties, "b_frames" );
446 c->b_frame_strategy = 0;
447 c->b_quant_factor = 2.0;
448 }
449
450 c->mb_decision = mlt_properties_get_int( properties, "mb_decision" );
451 c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
452 c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" );
453 c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" );
454 c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" );
455 c->me_cmp = mlt_properties_get_int( properties, "cmp" );
456 c->me_pre_cmp = mlt_properties_get_int( properties, "pre_cmp" );
457 c->pre_me = mlt_properties_get_int( properties, "pre_me" );
458 c->lumi_masking = mlt_properties_get_double( properties, "lumi_mask" );
459 c->dark_masking = mlt_properties_get_double( properties, "dark_mask" );
460 c->spatial_cplx_masking = mlt_properties_get_double( properties, "scplx_mask" );
461 c->temporal_cplx_masking = mlt_properties_get_double( properties, "tcplx_mask" );
462 c->p_masking = mlt_properties_get_double( properties, "p_mask" );
463 c->quantizer_noise_shaping= mlt_properties_get_int( properties, "qns" );
464 c->qmin = mlt_properties_get_int( properties, "video_qmin" );
465 c->qmax = mlt_properties_get_int( properties, "video_qmax" );
466 c->lmin = mlt_properties_get_int( properties, "video_lmin" );
467 c->lmax = mlt_properties_get_int( properties, "video_lmax" );
468 c->mb_qmin = mlt_properties_get_int( properties, "video_mb_qmin" );
469 c->mb_qmax = mlt_properties_get_int( properties, "video_mb_qmax" );
470 c->max_qdiff = mlt_properties_get_int( properties, "video_qdiff" );
471 c->qblur = mlt_properties_get_double( properties, "video_qblur" );
472 c->qcompress = mlt_properties_get_double( properties, "video_qcomp" );
473
474 if ( mlt_properties_get_double( properties, "qscale" ) > 0 )
475 {
476 c->flags |= CODEC_FLAG_QSCALE;
477 st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
478 }
479
480 // Allow the user to override the video fourcc
481 if ( mlt_properties_get( properties, "vfourcc" ) )
482 {
483 char *tail = NULL;
484 const char *arg = mlt_properties_get( properties, "vfourcc" );
485 int tag = strtol( arg, &tail, 0);
486 if( !tail || *tail )
487 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
488 c->codec_tag = tag;
489 }
490
491 // Some formats want stream headers to be seperate
492 if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
493 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
494
495 c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
496 c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" );
497 c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" );
498 c->rc_initial_buffer_occupancy = c->rc_buffer_size*3/4;
499 c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" );
500 c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" );
501 c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" );
502 c->b_quant_factor = mlt_properties_get_double( properties, "video_b_qfactor" );
503 c->i_quant_offset = mlt_properties_get_double( properties, "video_i_qoffset" );
504 c->b_quant_offset = mlt_properties_get_double( properties, "video_b_qoffset" );
505 c->intra_quant_bias = mlt_properties_get_int( properties, "video_intra_quant_bias" );
506 c->inter_quant_bias = mlt_properties_get_int( properties, "video_inter_quant_bias" );
507 c->dct_algo = mlt_properties_get_int( properties, "dct_algo" );
508 c->idct_algo = mlt_properties_get_int( properties, "idct_algo" );
509 c->me_threshold= mlt_properties_get_int( properties, "me_threshold" );
510 c->mb_threshold= mlt_properties_get_int( properties, "mb_threshold" );
511 c->intra_dc_precision= mlt_properties_get_int( properties, "intra_dc_precision" );
512 c->strict_std_compliance = mlt_properties_get_int( properties, "strict" );
513 c->error_rate = mlt_properties_get_int( properties, "error_rate" );
514 c->noise_reduction= mlt_properties_get_int( properties, "noise_reduction" );
515 c->scenechange_threshold= mlt_properties_get_int( properties, "sc_threshold" );
516 c->me_range = mlt_properties_get_int( properties, "me_range" );
517 c->coder_type= mlt_properties_get_int( properties, "coder" );
518 c->context_model= mlt_properties_get_int( properties, "context" );
519 c->prediction_method= mlt_properties_get_int( properties, "predictor" );
520 c->me_method = mlt_properties_get_int( properties, "me_method" );
521 if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
522 mlt_properties_get_int( properties, "deinterlace" ) == 0 )
523 {
524 if ( mlt_properties_get_int( properties, "ildct" ) )
525 c->flags |= CODEC_FLAG_INTERLACED_DCT;
526 if ( mlt_properties_get_int( properties, "ilme" ) )
527 c->flags |= CODEC_FLAG_INTERLACED_ME;
528 }
529 }
530 else
531 {
532 fprintf( stderr, "Could not allocate a stream for video\n" );
533 }
534
535 return st;
536 }
537
538 static AVFrame *alloc_picture( int pix_fmt, int width, int height )
539 {
540 // Allocate a frame
541 AVFrame *picture = avcodec_alloc_frame();
542
543 // Determine size of the
544 int size = avpicture_get_size(pix_fmt, width, height);
545
546 // Allocate the picture buf
547 uint8_t *picture_buf = av_malloc(size);
548
549 // If we have both, then fill the image
550 if ( picture != NULL && picture_buf != NULL )
551 {
552 // Fill the frame with the allocated buffer
553 avpicture_fill( (AVPicture *)picture, picture_buf, pix_fmt, width, height);
554 }
555 else
556 {
557 // Something failed - clean up what we can
558 av_free( picture );
559 av_free( picture_buf );
560 picture = NULL;
561 }
562
563 return picture;
564 }
565
566 static int open_video(AVFormatContext *oc, AVStream *st)
567 {
568 // Get the codec
569 AVCodecContext *video_enc = st->codec;
570
571 // find the video encoder
572 AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
573
574 if( codec && codec->pix_fmts )
575 {
576 const enum PixelFormat *p = codec->pix_fmts;
577 for( ; *p!=-1; p++ )
578 {
579 if( *p == video_enc->pix_fmt )
580 break;
581 }
582 if( *p == -1 )
583 video_enc->pix_fmt = codec->pix_fmts[ 0 ];
584 }
585
586 // Open the codec safely
587 return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
588 }
589
590 void close_video(AVFormatContext *oc, AVStream *st)
591 {
592 avcodec_close(st->codec);
593 }
594
595 static inline long time_difference( struct timeval *time1 )
596 {
597 struct timeval time2;
598 gettimeofday( &time2, NULL );
599 return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
600 }
601
602 /** The main thread - the argument is simply the consumer.
603 */
604
605 static void *consumer_thread( void *arg )
606 {
607 // Map the argument to the object
608 mlt_consumer this = arg;
609
610 // Get the properties
611 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
612
613 // Get the terminate on pause property
614 int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
615 int terminated = 0;
616
617 // Determine if feed is slow (for realtime stuff)
618 int real_time_output = mlt_properties_get_int( properties, "real_time" );
619
620 // Time structures
621 struct timeval ante;
622
623 // Get the frame rate
624 double fps = mlt_properties_get_double( properties, "fps" );
625
626 // Get width and height
627 int width = mlt_properties_get_int( properties, "width" );
628 int height = mlt_properties_get_int( properties, "height" );
629 int img_width = width;
630 int img_height = height;
631
632 // Get default audio properties
633 mlt_audio_format aud_fmt = mlt_audio_pcm;
634 int channels = mlt_properties_get_int( properties, "channels" );
635 int frequency = mlt_properties_get_int( properties, "frequency" );
636 int16_t *pcm = NULL;
637 int samples = 0;
638
639 // AVFormat audio buffer and frame size
640 int audio_outbuf_size = 10000;
641 uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
642 int audio_input_frame_size = 0;
643
644 // AVFormat video buffer and frame count
645 int frame_count = 0;
646 int video_outbuf_size = ( 1024 * 1024 );
647 uint8_t *video_outbuf = av_malloc( video_outbuf_size );
648
649 // Used for the frame properties
650 mlt_frame frame = NULL;
651 mlt_properties frame_properties = NULL;
652
653 // Get the queues
654 mlt_deque queue = mlt_properties_get_data( properties, "frame_queue", NULL );
655 sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
656
657 // Need two av pictures for converting
658 AVFrame *output = NULL;
659 AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
660
661 // For receiving images from an mlt_frame
662 uint8_t *image;
663 mlt_image_format img_fmt = mlt_image_yuv422;
664
665 // For receiving audio samples back from the fifo
666 int16_t *buffer = av_malloc( 48000 * 2 );
667 int count = 0;
668
669 // Allocate the context
670 AVFormatContext *oc = av_alloc_format_context( );
671
672 // Streams
673 AVStream *audio_st = NULL;
674 AVStream *video_st = NULL;
675
676 // Time stamps
677 double audio_pts = 0;
678 double video_pts = 0;
679
680 // Loop variable
681 int i;
682
683 // Frames despatched
684 long int frames = 0;
685 long int total_time = 0;
686
687 // Determine the format
688 AVOutputFormat *fmt = NULL;
689 char *filename = mlt_properties_get( properties, "target" );
690 char *format = mlt_properties_get( properties, "format" );
691 char *vcodec = mlt_properties_get( properties, "vcodec" );
692 char *acodec = mlt_properties_get( properties, "acodec" );
693
694 // Used to store and override codec ids
695 int audio_codec_id;
696 int video_codec_id;
697
698 // Check for user selected format first
699 if ( format != NULL )
700 fmt = guess_format( format, NULL, NULL );
701
702 // Otherwise check on the filename
703 if ( fmt == NULL && filename != NULL )
704 fmt = guess_format( NULL, filename, NULL );
705
706 // Otherwise default to mpeg
707 if ( fmt == NULL )
708 fmt = guess_format( "mpeg", NULL, NULL );
709
710 // We need a filename - default to stdout?
711 if ( filename == NULL || !strcmp( filename, "" ) )
712 filename = "pipe:";
713
714 // Get the codec ids selected
715 audio_codec_id = fmt->audio_codec;
716 video_codec_id = fmt->video_codec;
717
718 // Check for audio codec overides
719 if ( acodec != NULL )
720 {
721 AVCodec *p = first_avcodec;
722 while( p != NULL )
723 {
724 if ( !strcmp( p->name, acodec ) && p->type == CODEC_TYPE_AUDIO )
725 break;
726 p = p->next;
727 }
728 if ( p != NULL )
729 audio_codec_id = p->id;
730 else
731 fprintf( stderr, "consumer_avcodec: audio codec %s unrecognised - ignoring\n", acodec );
732 }
733
734 // Check for video codec overides
735 if ( vcodec != NULL )
736 {
737 AVCodec *p = first_avcodec;
738 while( p != NULL )
739 {
740 if ( !strcmp( p->name, vcodec ) && p->type == CODEC_TYPE_VIDEO )
741 break;
742 p = p->next;
743 }
744 if ( p != NULL )
745 video_codec_id = p->id;
746 else
747 fprintf( stderr, "consumer_avcodec: video codec %s unrecognised - ignoring\n", vcodec );
748 }
749
750 // Update the output context
751
752 // Write metadata
753 char *tmp = NULL;
754 int metavalue;
755
756 tmp = mlt_properties_get( properties, "meta.attr.title.markup");
757 if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
758
759 tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
760 if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
761
762 tmp = mlt_properties_get( properties, "meta.attr.author.markup");
763 if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
764
765 tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
766 if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
767
768 tmp = mlt_properties_get( properties, "meta.attr.album.markup");
769 if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
770
771 metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
772 if (metavalue != 0) oc->year = metavalue;
773
774 metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
775 if (metavalue != 0) oc->track = metavalue;
776
777 oc->oformat = fmt;
778 snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
779
780 // Add audio and video streams
781 if ( fmt->video_codec != CODEC_ID_NONE )
782 video_st = add_video_stream( this, oc, video_codec_id );
783 if ( fmt->audio_codec != CODEC_ID_NONE )
784 audio_st = add_audio_stream( this, oc, audio_codec_id );
785
786 // Set the parameters (even though we have none...)
787 if ( av_set_parameters(oc, NULL) >= 0 )
788 {
789 if ( video_st && !open_video( oc, video_st ) )
790 video_st = NULL;
791 if ( audio_st )
792 audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
793
794 // Open the output file, if needed
795 if ( !( fmt->flags & AVFMT_NOFILE ) )
796 {
797 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
798 {
799 fprintf(stderr, "Could not open '%s'\n", filename);
800 mlt_properties_set_int( properties, "running", 0 );
801 }
802 }
803
804 // Write the stream header, if any
805 if ( mlt_properties_get_int( properties, "running" ) )
806 av_write_header( oc );
807 }
808 else
809 {
810 fprintf(stderr, "Invalid output format parameters\n");
811 mlt_properties_set_int( properties, "running", 0 );
812 }
813
814 // Allocate picture
815 if ( video_st )
816 output = alloc_picture( video_st->codec->pix_fmt, width, height );
817
818 // Last check - need at least one stream
819 if ( audio_st == NULL && video_st == NULL )
820 mlt_properties_set_int( properties, "running", 0 );
821
822 // Get the starting time (can ignore the times above)
823 gettimeofday( &ante, NULL );
824
825 // Loop while running
826 while( mlt_properties_get_int( properties, "running" ) && !terminated )
827 {
828 // Get the frame
829 frame = mlt_consumer_rt_frame( this );
830
831 // Check that we have a frame to work with
832 if ( frame != NULL )
833 {
834 // Increment frames despatched
835 frames ++;
836
837 // Default audio args
838 frame_properties = MLT_FRAME_PROPERTIES( frame );
839
840 // Check for the terminated condition
841 terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
842
843 // Get audio and append to the fifo
844 if ( !terminated && audio_st )
845 {
846 samples = mlt_sample_calculator( fps, frequency, count ++ );
847 mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
848
849 // Create the fifo if we don't have one
850 if ( fifo == NULL )
851 {
852 fifo = sample_fifo_init( frequency, channels );
853 mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
854 }
855
856 if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
857 memset( pcm, 0, samples * channels * 2 );
858
859 // Append the samples
860 sample_fifo_append( fifo, pcm, samples * channels );
861 total_time += ( samples * 1000000 ) / frequency;
862 }
863
864 // Encode the image
865 if ( !terminated && video_st )
866 mlt_deque_push_back( queue, frame );
867 else
868 mlt_frame_close( frame );
869 }
870
871 // While we have stuff to process, process...
872 while ( 1 )
873 {
874 if (audio_st)
875 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
876 else
877 audio_pts = 0.0;
878
879 if (video_st)
880 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
881 else
882 video_pts = 0.0;
883
884 // Write interleaved audio and video frames
885 if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
886 {
887 if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) )
888 {
889 AVCodecContext *c;
890 AVPacket pkt;
891 av_init_packet( &pkt );
892
893 c = audio_st->codec;
894
895 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
896
897 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
898 // Write the compressed frame in the media file
899 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
900 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
901 pkt.flags |= PKT_FLAG_KEY;
902 pkt.stream_index= audio_st->index;
903 pkt.data= audio_outbuf;
904
905 if ( pkt.size )
906 if ( av_interleaved_write_frame( oc, &pkt ) != 0)
907 fprintf(stderr, "Error while writing audio frame\n");
908
909 audio_pts += c->frame_size;
910 }
911 else
912 {
913 break;
914 }
915 }
916 else if ( video_st )
917 {
918 if ( mlt_deque_count( queue ) )
919 {
920 int out_size, ret;
921 AVCodecContext *c;
922
923 frame = mlt_deque_pop_front( queue );
924 frame_properties = MLT_FRAME_PROPERTIES( frame );
925
926 c = video_st->codec;
927
928 if ( mlt_properties_get_int( frame_properties, "rendered" ) )
929 {
930 int i = 0;
931 int j = 0;
932 uint8_t *p;
933 uint8_t *q;
934
935 mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
936
937 mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
938
939 q = image;
940
941 // Convert the mlt frame to an AVPicture
942 for ( i = 0; i < height; i ++ )
943 {
944 p = input->data[ 0 ] + i * input->linesize[ 0 ];
945 j = width;
946 while( j -- )
947 {
948 *p ++ = *q ++;
949 *p ++ = *q ++;
950 }
951 }
952
953 // Do the colour space conversion
954 #ifdef SWSCALE
955 struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
956 width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
957 sws_scale( context, input->data, input->linesize, 0, height,
958 output->data, output->linesize);
959 sws_freeContext( context );
960 #else
961 img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
962 #endif
963
964 // Apply the alpha if applicable
965 if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
966 {
967 uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
968 register int n;
969
970 for ( i = 0; i < height; i ++ )
971 {
972 n = ( width + 7 ) / 8;
973 p = output->data[ 0 ] + i * output->linesize[ 0 ];
974
975 #ifndef __DARWIN__
976 p += 3;
977 #endif
978
979 switch( width % 8 )
980 {
981 case 0: do { *p = *alpha++; p += 4;
982 case 7: *p = *alpha++; p += 4;
983 case 6: *p = *alpha++; p += 4;
984 case 5: *p = *alpha++; p += 4;
985 case 4: *p = *alpha++; p += 4;
986 case 3: *p = *alpha++; p += 4;
987 case 2: *p = *alpha++; p += 4;
988 case 1: *p = *alpha++; p += 4;
989 }
990 while( --n );
991 }
992 }
993 }
994 }
995
996 if (oc->oformat->flags & AVFMT_RAWPICTURE)
997 {
998 // raw video case. The API will change slightly in the near future for that
999 AVPacket pkt;
1000 av_init_packet(&pkt);
1001
1002 pkt.flags |= PKT_FLAG_KEY;
1003 pkt.stream_index= video_st->index;
1004 pkt.data= (uint8_t *)output;
1005 pkt.size= sizeof(AVPicture);
1006
1007 ret = av_write_frame(oc, &pkt);
1008 video_pts += c->frame_size;
1009 }
1010 else
1011 {
1012 // Set the quality
1013 output->quality = video_st->quality;
1014
1015 // Set frame interlace hints
1016 output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
1017 output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
1018
1019 // Encode the image
1020 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
1021
1022 // If zero size, it means the image was buffered
1023 if (out_size > 0)
1024 {
1025 AVPacket pkt;
1026 av_init_packet( &pkt );
1027
1028 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1029 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1030 if( c->coded_frame && c->coded_frame->key_frame )
1031 pkt.flags |= PKT_FLAG_KEY;
1032 pkt.stream_index= video_st->index;
1033 pkt.data= video_outbuf;
1034 pkt.size= out_size;
1035
1036 // write the compressed frame in the media file
1037 ret = av_interleaved_write_frame(oc, &pkt);
1038 video_pts += c->frame_size;
1039 }
1040 else
1041 {
1042 fprintf( stderr, "Error with video encode\n" );
1043 }
1044 }
1045 frame_count++;
1046 mlt_frame_close( frame );
1047 }
1048 else
1049 {
1050 break;
1051 }
1052 }
1053 }
1054
1055 if ( real_time_output && frames % 12 == 0 )
1056 {
1057 long passed = time_difference( &ante );
1058 if ( fifo != NULL )
1059 {
1060 long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000;
1061 passed -= pending;
1062 }
1063 if ( passed < total_time )
1064 {
1065 long total = ( total_time - passed );
1066 struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 };
1067 nanosleep( &t, NULL );
1068 }
1069 }
1070 }
1071
1072 #ifdef FLUSH
1073 if ( ! real_time_output )
1074 {
1075 // Flush audio fifo
1076 if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
1077 {
1078 AVCodecContext *c = audio_st->codec;
1079 AVPacket pkt;
1080 av_init_packet( &pkt );
1081 pkt.size = 0;
1082
1083 if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
1084 ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
1085 {
1086 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
1087 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
1088 }
1089 if ( pkt.size <= 0 )
1090 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
1091 if ( pkt.size <= 0 )
1092 break;
1093
1094 // Write the compressed frame in the media file
1095 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1096 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
1097 pkt.flags |= PKT_FLAG_KEY;
1098 pkt.stream_index = audio_st->index;
1099 pkt.data = audio_outbuf;
1100 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1101 {
1102 fprintf(stderr, "Error while writing flushed audio frame\n");
1103 break;
1104 }
1105 }
1106
1107 // Flush video
1108 if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
1109 {
1110 AVCodecContext *c = video_st->codec;
1111 AVPacket pkt;
1112 av_init_packet( &pkt );
1113
1114 // Encode the image
1115 pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
1116 if ( pkt.size <= 0 )
1117 break;
1118
1119 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1120 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1121 if( c->coded_frame && c->coded_frame->key_frame )
1122 pkt.flags |= PKT_FLAG_KEY;
1123 pkt.stream_index = video_st->index;
1124 pkt.data = video_outbuf;
1125
1126 // write the compressed frame in the media file
1127 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1128 {
1129 fprintf(stderr, "Error while writing flushed video frame\n");
1130 break;
1131 }
1132 }
1133 }
1134 #endif
1135
1136 // close each codec
1137 if (video_st)
1138 close_video(oc, video_st);
1139 if (audio_st)
1140 close_audio(oc, audio_st);
1141
1142 // Write the trailer, if any
1143 av_write_trailer(oc);
1144
1145 // Free the streams
1146 for(i = 0; i < oc->nb_streams; i++)
1147 av_freep(&oc->streams[i]);
1148
1149 // Close the output file
1150 if (!(fmt->flags & AVFMT_NOFILE))
1151 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
1152 url_fclose(oc->pb);
1153 #else
1154 url_fclose(&oc->pb);
1155 #endif
1156
1157 // Clean up input and output frames
1158 if ( output )
1159 av_free( output->data[0] );
1160 av_free( output );
1161 av_free( input->data[0] );
1162 av_free( input );
1163 av_free( video_outbuf );
1164 av_free( buffer );
1165
1166 // Free the stream
1167 av_free(oc);
1168
1169 // Just in case we terminated on pause
1170 mlt_properties_set_int( properties, "running", 0 );
1171
1172 mlt_consumer_stopped( this );
1173
1174 return NULL;
1175 }
1176
1177 /** Close the consumer.
1178 */
1179
1180 static void consumer_close( mlt_consumer this )
1181 {
1182 // Stop the consumer
1183 mlt_consumer_stop( this );
1184
1185 // Close the parent
1186 mlt_consumer_close( this );
1187
1188 // Free the memory
1189 free( this );
1190 }