consumer_avformat.c, producer_avformat.c: add FFmpeg multi-thread support via
[melted] / src / modules / avformat / consumer_avformat.c
1 /*
2 * consumer_avformat.c -- an encoder based on avformat
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Charles Yates <charles.yates@pandora.be>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 // mlt Header files
22 #include <framework/mlt_consumer.h>
23 #include <framework/mlt_frame.h>
24
25 // System header files
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <limits.h>
30 #include <pthread.h>
31 #include <sys/time.h>
32 #include <math.h>
33
34 // avformat header files
35 #include <avformat.h>
36 #ifdef SWSCALE
37 #include <swscale.h>
38 #endif
39
40 //
41 // This structure should be extended and made globally available in mlt
42 //
43
44 typedef struct
45 {
46 int16_t *buffer;
47 int size;
48 int used;
49 double time;
50 int frequency;
51 int channels;
52 }
53 *sample_fifo, sample_fifo_s;
54
55 sample_fifo sample_fifo_init( int frequency, int channels )
56 {
57 sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
58 this->frequency = frequency;
59 this->channels = channels;
60 return this;
61 }
62
63 // sample_fifo_clear and check are temporarily aborted (not working as intended)
64
65 void sample_fifo_clear( sample_fifo this, double time )
66 {
67 int words = ( float )( time - this->time ) * this->frequency * this->channels;
68 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
69 {
70 memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
71 this->used -= words;
72 this->time = time;
73 }
74 else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
75 {
76 this->used = 0;
77 this->time = time;
78 }
79 }
80
81 void sample_fifo_check( sample_fifo this, double time )
82 {
83 if ( this->used == 0 )
84 {
85 if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
86 this->time = time;
87 }
88 }
89
90 void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
91 {
92 if ( ( this->size - this->used ) < count )
93 {
94 this->size += count * 5;
95 this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
96 }
97
98 memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
99 this->used += count;
100 }
101
102 int sample_fifo_used( sample_fifo this )
103 {
104 return this->used;
105 }
106
107 int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
108 {
109 if ( count > this->used )
110 count = this->used;
111
112 memcpy( samples, this->buffer, count * sizeof( int16_t ) );
113 this->used -= count;
114 memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
115
116 this->time += ( double )count / this->channels / this->frequency;
117
118 return count;
119 }
120
121 void sample_fifo_close( sample_fifo this )
122 {
123 free( this->buffer );
124 free( this );
125 }
126
127 // Forward references.
128 static int consumer_start( mlt_consumer this );
129 static int consumer_stop( mlt_consumer this );
130 static int consumer_is_stopped( mlt_consumer this );
131 static void *consumer_thread( void *arg );
132 static void consumer_close( mlt_consumer this );
133
134 /** Initialise the dv consumer.
135 */
136
137 mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
138 {
139 // Allocate the consumer
140 mlt_consumer this = mlt_consumer_new( profile );
141
142 // If memory allocated and initialises without error
143 if ( this != NULL )
144 {
145 // Get properties from the consumer
146 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
147
148 // Assign close callback
149 this->close = consumer_close;
150
151 // Interpret the argument
152 if ( arg != NULL )
153 mlt_properties_set( properties, "target", arg );
154
155 // sample and frame queue
156 mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL );
157
158 // Set avformat defaults (all lifted from ffmpeg.c)
159 mlt_properties_set_int( properties, "audio_bit_rate", 128000 );
160 mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 );
161 mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 );
162 mlt_properties_set_int( properties, "gop_size", 12 );
163 mlt_properties_set_int( properties, "b_frames", 0 );
164 mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
165 mlt_properties_set_double( properties, "qscale", 0 );
166 mlt_properties_set_int( properties, "me_method", ME_EPZS );
167 mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD );
168 mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD );
169 mlt_properties_set_int( properties, "sub_cmp", FF_CMP_SAD );
170 mlt_properties_set_int( properties, "cmp", FF_CMP_SAD );
171 mlt_properties_set_int( properties, "pre_cmp", FF_CMP_SAD );
172 mlt_properties_set_int( properties, "pre_me", 0 );
173 mlt_properties_set_double( properties, "lumi_mask", 0 );
174 mlt_properties_set_double( properties, "dark_mask", 0 );
175 mlt_properties_set_double( properties, "scplx_mask", 0 );
176 mlt_properties_set_double( properties, "tcplx_mask", 0 );
177 mlt_properties_set_double( properties, "p_mask", 0 );
178 mlt_properties_set_int( properties, "qns", 0 );
179 mlt_properties_set_int( properties, "video_qmin", 2 );
180 mlt_properties_set_int( properties, "video_qmax", 31 );
181 mlt_properties_set_int( properties, "video_lmin", 2*FF_QP2LAMBDA );
182 mlt_properties_set_int( properties, "video_lmax", 31*FF_QP2LAMBDA );
183 mlt_properties_set_int( properties, "video_mb_qmin", 2 );
184 mlt_properties_set_int( properties, "video_mb_qmax", 31 );
185 mlt_properties_set_int( properties, "video_qdiff", 3 );
186 mlt_properties_set_double( properties, "video_qblur", 0.5 );
187 mlt_properties_set_double( properties, "video_qcomp", 0.5 );
188 mlt_properties_set_int( properties, "video_rc_max_rate", 0 );
189 mlt_properties_set_int( properties, "video_rc_min_rate", 0 );
190 mlt_properties_set_int( properties, "video_rc_buffer_size", 0 );
191 mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 );
192 mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 );
193 mlt_properties_set_double( properties, "video_i_qfactor", -0.8 );
194 mlt_properties_set_double( properties, "video_b_qfactor", 1.25 );
195 mlt_properties_set_double( properties, "video_i_qoffset", 0 );
196 mlt_properties_set_double( properties, "video_b_qoffset", 1.25 );
197 mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS );
198 mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS );
199 mlt_properties_set_int( properties, "dct_algo", 0 );
200 mlt_properties_set_int( properties, "idct_algo", 0 );
201 mlt_properties_set_int( properties, "me_threshold", 0 );
202 mlt_properties_set_int( properties, "mb_threshold", 0 );
203 mlt_properties_set_int( properties, "intra_dc_precision", 0 );
204 mlt_properties_set_int( properties, "strict", 0 );
205 mlt_properties_set_int( properties, "error_rate", 0 );
206 mlt_properties_set_int( properties, "noise_reduction", 0 );
207 mlt_properties_set_int( properties, "sc_threshold", 0 );
208 mlt_properties_set_int( properties, "me_range", 0 );
209 mlt_properties_set_int( properties, "coder", 0 );
210 mlt_properties_set_int( properties, "context", 0 );
211 mlt_properties_set_int( properties, "predictor", 0 );
212 mlt_properties_set_int( properties, "ildct", 0 );
213 mlt_properties_set_int( properties, "ilme", 0 );
214
215 // Ensure termination at end of the stream
216 mlt_properties_set_int( properties, "terminate_on_pause", 1 );
217
218 // Set up start/stop/terminated callbacks
219 this->start = consumer_start;
220 this->stop = consumer_stop;
221 this->is_stopped = consumer_is_stopped;
222 }
223
224 // Return this
225 return this;
226 }
227
228 /** Start the consumer.
229 */
230
231 static int consumer_start( mlt_consumer this )
232 {
233 // Get the properties
234 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
235
236 // Check that we're not already running
237 if ( !mlt_properties_get_int( properties, "running" ) )
238 {
239 // Allocate a thread
240 pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
241
242 // Get the width and height
243 int width = mlt_properties_get_int( properties, "width" );
244 int height = mlt_properties_get_int( properties, "height" );
245
246 // Obtain the size property
247 char *size = mlt_properties_get( properties, "size" );
248
249 // Interpret it
250 if ( size != NULL )
251 {
252 int tw, th;
253 if ( sscanf( size, "%dx%d", &tw, &th ) == 2 && tw > 0 && th > 0 )
254 {
255 width = tw;
256 height = th;
257 }
258 else
259 {
260 fprintf( stderr, "consumer_avformat: Invalid size property %s - ignoring.\n", size );
261 }
262 }
263
264 // Now ensure we honour the multiple of two requested by libavformat
265 mlt_properties_set_int( properties, "width", ( width / 2 ) * 2 );
266 mlt_properties_set_int( properties, "height", ( height / 2 ) * 2 );
267
268 // Assign the thread to properties
269 mlt_properties_set_data( properties, "thread", thread, sizeof( pthread_t ), free, NULL );
270
271 // Set the running state
272 mlt_properties_set_int( properties, "running", 1 );
273
274 // Create the thread
275 pthread_create( thread, NULL, consumer_thread, this );
276 }
277 return 0;
278 }
279
280 /** Stop the consumer.
281 */
282
283 static int consumer_stop( mlt_consumer this )
284 {
285 // Get the properties
286 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
287
288 // Check that we're running
289 if ( mlt_properties_get_int( properties, "running" ) )
290 {
291 // Get the thread
292 pthread_t *thread = mlt_properties_get_data( properties, "thread", NULL );
293
294 // Stop the thread
295 mlt_properties_set_int( properties, "running", 0 );
296
297 // Wait for termination
298 pthread_join( *thread, NULL );
299 }
300
301 return 0;
302 }
303
304 /** Determine if the consumer is stopped.
305 */
306
307 static int consumer_is_stopped( mlt_consumer this )
308 {
309 // Get the properties
310 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
311 return !mlt_properties_get_int( properties, "running" );
312 }
313
314 /** Add an audio output stream
315 */
316
317 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
318 {
319 // Get the properties
320 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
321
322 // Create a new stream
323 AVStream *st = av_new_stream( oc, 1 );
324
325 // If created, then initialise from properties
326 if ( st != NULL )
327 {
328 AVCodecContext *c = st->codec;
329 int thread_count = mlt_properties_get_int( properties, "threads" );
330 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
331 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
332
333 c->codec_id = codec_id;
334 c->codec_type = CODEC_TYPE_AUDIO;
335
336 // Put sample parameters
337 c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" );
338 c->sample_rate = mlt_properties_get_int( properties, "frequency" );
339 c->channels = mlt_properties_get_int( properties, "channels" );
340
341 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
342 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
343
344 // Allow the user to override the audio fourcc
345 if ( mlt_properties_get( properties, "afourcc" ) )
346 {
347 char *tail = NULL;
348 char *arg = mlt_properties_get( properties, "afourcc" );
349 int tag = strtol( arg, &tail, 0);
350 if( !tail || *tail )
351 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
352 c->codec_tag = tag;
353 }
354 if ( thread_count > 1 )
355 {
356 avcodec_thread_init( c, thread_count );
357 c->thread_count = thread_count;
358 }
359 }
360 else
361 {
362 fprintf( stderr, "Could not allocate a stream for audio\n" );
363 }
364
365 return st;
366 }
367
368 static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
369 {
370 // We will return the audio input size from here
371 int audio_input_frame_size = 0;
372
373 // Get the context
374 AVCodecContext *c = st->codec;
375
376 // Find the encoder
377 AVCodec *codec = avcodec_find_encoder( c->codec_id );
378
379 // Continue if codec found and we can open it
380 if ( codec != NULL && avcodec_open(c, codec) >= 0 )
381 {
382 // ugly hack for PCM codecs (will be removed ASAP with new PCM
383 // support to compute the input frame size in samples
384 if ( c->frame_size <= 1 )
385 {
386 audio_input_frame_size = audio_outbuf_size / c->channels;
387 switch(st->codec->codec_id)
388 {
389 case CODEC_ID_PCM_S16LE:
390 case CODEC_ID_PCM_S16BE:
391 case CODEC_ID_PCM_U16LE:
392 case CODEC_ID_PCM_U16BE:
393 audio_input_frame_size >>= 1;
394 break;
395 default:
396 break;
397 }
398 }
399 else
400 {
401 audio_input_frame_size = c->frame_size;
402 }
403
404 // Some formats want stream headers to be seperate (hmm)
405 if( !strcmp( oc->oformat->name, "mp4" ) ||
406 !strcmp( oc->oformat->name, "mov" ) ||
407 !strcmp( oc->oformat->name, "3gp" ) )
408 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
409 }
410 else
411 {
412 fprintf( stderr, "Unable to encode audio - disabling audio output.\n" );
413 }
414
415 return audio_input_frame_size;
416 }
417
418 static void close_audio( AVFormatContext *oc, AVStream *st )
419 {
420 avcodec_close( st->codec );
421 }
422
423 /** Add a video output stream
424 */
425
426 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
427 {
428 // Get the properties
429 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
430
431 // Create a new stream
432 AVStream *st = av_new_stream( oc, 0 );
433
434 if ( st != NULL )
435 {
436 char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
437 double ar = mlt_properties_get_double( properties, "display_ratio" );
438 AVCodecContext *c = st->codec;
439 int thread_count = mlt_properties_get_int( properties, "threads" );
440 if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
441 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
442
443 c->codec_id = codec_id;
444 c->codec_type = CODEC_TYPE_VIDEO;
445
446 // put sample parameters
447 c->bit_rate = mlt_properties_get_int( properties, "video_bit_rate" );
448 c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" );
449 c->width = mlt_properties_get_int( properties, "width" );
450 c->height = mlt_properties_get_int( properties, "height" );
451 c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
452 c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
453 c->gop_size = mlt_properties_get_int( properties, "gop_size" );
454 c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
455
456 if ( mlt_properties_get_int( properties, "b_frames" ) )
457 {
458 c->max_b_frames = mlt_properties_get_int( properties, "b_frames" );
459 c->b_frame_strategy = 0;
460 c->b_quant_factor = 2.0;
461 }
462
463 c->mb_decision = mlt_properties_get_int( properties, "mb_decision" );
464 c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
465 c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" );
466 c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" );
467 c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" );
468 c->me_cmp = mlt_properties_get_int( properties, "cmp" );
469 c->me_pre_cmp = mlt_properties_get_int( properties, "pre_cmp" );
470 c->pre_me = mlt_properties_get_int( properties, "pre_me" );
471 c->lumi_masking = mlt_properties_get_double( properties, "lumi_mask" );
472 c->dark_masking = mlt_properties_get_double( properties, "dark_mask" );
473 c->spatial_cplx_masking = mlt_properties_get_double( properties, "scplx_mask" );
474 c->temporal_cplx_masking = mlt_properties_get_double( properties, "tcplx_mask" );
475 c->p_masking = mlt_properties_get_double( properties, "p_mask" );
476 c->quantizer_noise_shaping= mlt_properties_get_int( properties, "qns" );
477 c->qmin = mlt_properties_get_int( properties, "video_qmin" );
478 c->qmax = mlt_properties_get_int( properties, "video_qmax" );
479 c->lmin = mlt_properties_get_int( properties, "video_lmin" );
480 c->lmax = mlt_properties_get_int( properties, "video_lmax" );
481 c->mb_qmin = mlt_properties_get_int( properties, "video_mb_qmin" );
482 c->mb_qmax = mlt_properties_get_int( properties, "video_mb_qmax" );
483 c->max_qdiff = mlt_properties_get_int( properties, "video_qdiff" );
484 c->qblur = mlt_properties_get_double( properties, "video_qblur" );
485 c->qcompress = mlt_properties_get_double( properties, "video_qcomp" );
486
487 if ( mlt_properties_get_double( properties, "qscale" ) > 0 )
488 {
489 c->flags |= CODEC_FLAG_QSCALE;
490 st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
491 }
492
493 // Allow the user to override the video fourcc
494 if ( mlt_properties_get( properties, "vfourcc" ) )
495 {
496 char *tail = NULL;
497 const char *arg = mlt_properties_get( properties, "vfourcc" );
498 int tag = strtol( arg, &tail, 0);
499 if( !tail || *tail )
500 tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
501 c->codec_tag = tag;
502 }
503
504 // Some formats want stream headers to be seperate
505 if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
506 c->flags |= CODEC_FLAG_GLOBAL_HEADER;
507
508 c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
509 c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" );
510 c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" );
511 c->rc_initial_buffer_occupancy = c->rc_buffer_size*3/4;
512 c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" );
513 c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" );
514 c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" );
515 c->b_quant_factor = mlt_properties_get_double( properties, "video_b_qfactor" );
516 c->i_quant_offset = mlt_properties_get_double( properties, "video_i_qoffset" );
517 c->b_quant_offset = mlt_properties_get_double( properties, "video_b_qoffset" );
518 c->intra_quant_bias = mlt_properties_get_int( properties, "video_intra_quant_bias" );
519 c->inter_quant_bias = mlt_properties_get_int( properties, "video_inter_quant_bias" );
520 c->dct_algo = mlt_properties_get_int( properties, "dct_algo" );
521 c->idct_algo = mlt_properties_get_int( properties, "idct_algo" );
522 c->me_threshold= mlt_properties_get_int( properties, "me_threshold" );
523 c->mb_threshold= mlt_properties_get_int( properties, "mb_threshold" );
524 c->intra_dc_precision= mlt_properties_get_int( properties, "intra_dc_precision" );
525 c->strict_std_compliance = mlt_properties_get_int( properties, "strict" );
526 c->error_rate = mlt_properties_get_int( properties, "error_rate" );
527 c->noise_reduction= mlt_properties_get_int( properties, "noise_reduction" );
528 c->scenechange_threshold= mlt_properties_get_int( properties, "sc_threshold" );
529 c->me_range = mlt_properties_get_int( properties, "me_range" );
530 c->coder_type= mlt_properties_get_int( properties, "coder" );
531 c->context_model= mlt_properties_get_int( properties, "context" );
532 c->prediction_method= mlt_properties_get_int( properties, "predictor" );
533 c->me_method = mlt_properties_get_int( properties, "me_method" );
534 if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
535 mlt_properties_get_int( properties, "deinterlace" ) == 0 )
536 {
537 if ( mlt_properties_get_int( properties, "ildct" ) )
538 c->flags |= CODEC_FLAG_INTERLACED_DCT;
539 if ( mlt_properties_get_int( properties, "ilme" ) )
540 c->flags |= CODEC_FLAG_INTERLACED_ME;
541 }
542 if ( thread_count > 1 )
543 {
544 avcodec_thread_init( c, thread_count );
545 c->thread_count = thread_count;
546 }
547 }
548 else
549 {
550 fprintf( stderr, "Could not allocate a stream for video\n" );
551 }
552
553 return st;
554 }
555
556 static AVFrame *alloc_picture( int pix_fmt, int width, int height )
557 {
558 // Allocate a frame
559 AVFrame *picture = avcodec_alloc_frame();
560
561 // Determine size of the
562 int size = avpicture_get_size(pix_fmt, width, height);
563
564 // Allocate the picture buf
565 uint8_t *picture_buf = av_malloc(size);
566
567 // If we have both, then fill the image
568 if ( picture != NULL && picture_buf != NULL )
569 {
570 // Fill the frame with the allocated buffer
571 avpicture_fill( (AVPicture *)picture, picture_buf, pix_fmt, width, height);
572 }
573 else
574 {
575 // Something failed - clean up what we can
576 av_free( picture );
577 av_free( picture_buf );
578 picture = NULL;
579 }
580
581 return picture;
582 }
583
584 static int open_video(AVFormatContext *oc, AVStream *st)
585 {
586 // Get the codec
587 AVCodecContext *video_enc = st->codec;
588
589 // find the video encoder
590 AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
591
592 if( codec && codec->pix_fmts )
593 {
594 const enum PixelFormat *p = codec->pix_fmts;
595 for( ; *p!=-1; p++ )
596 {
597 if( *p == video_enc->pix_fmt )
598 break;
599 }
600 if( *p == -1 )
601 video_enc->pix_fmt = codec->pix_fmts[ 0 ];
602 }
603
604 // Open the codec safely
605 return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
606 }
607
608 void close_video(AVFormatContext *oc, AVStream *st)
609 {
610 avcodec_close(st->codec);
611 }
612
613 static inline long time_difference( struct timeval *time1 )
614 {
615 struct timeval time2;
616 gettimeofday( &time2, NULL );
617 return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
618 }
619
620 /** The main thread - the argument is simply the consumer.
621 */
622
623 static void *consumer_thread( void *arg )
624 {
625 // Map the argument to the object
626 mlt_consumer this = arg;
627
628 // Get the properties
629 mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
630
631 // Get the terminate on pause property
632 int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
633 int terminated = 0;
634
635 // Determine if feed is slow (for realtime stuff)
636 int real_time_output = mlt_properties_get_int( properties, "real_time" );
637
638 // Time structures
639 struct timeval ante;
640
641 // Get the frame rate
642 double fps = mlt_properties_get_double( properties, "fps" );
643
644 // Get width and height
645 int width = mlt_properties_get_int( properties, "width" );
646 int height = mlt_properties_get_int( properties, "height" );
647 int img_width = width;
648 int img_height = height;
649
650 // Get default audio properties
651 mlt_audio_format aud_fmt = mlt_audio_pcm;
652 int channels = mlt_properties_get_int( properties, "channels" );
653 int frequency = mlt_properties_get_int( properties, "frequency" );
654 int16_t *pcm = NULL;
655 int samples = 0;
656
657 // AVFormat audio buffer and frame size
658 int audio_outbuf_size = 10000;
659 uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
660 int audio_input_frame_size = 0;
661
662 // AVFormat video buffer and frame count
663 int frame_count = 0;
664 int video_outbuf_size = ( 1024 * 1024 );
665 uint8_t *video_outbuf = av_malloc( video_outbuf_size );
666
667 // Used for the frame properties
668 mlt_frame frame = NULL;
669 mlt_properties frame_properties = NULL;
670
671 // Get the queues
672 mlt_deque queue = mlt_properties_get_data( properties, "frame_queue", NULL );
673 sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
674
675 // Need two av pictures for converting
676 AVFrame *output = NULL;
677 AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
678
679 // For receiving images from an mlt_frame
680 uint8_t *image;
681 mlt_image_format img_fmt = mlt_image_yuv422;
682
683 // For receiving audio samples back from the fifo
684 int16_t *buffer = av_malloc( 48000 * 2 );
685 int count = 0;
686
687 // Allocate the context
688 AVFormatContext *oc = av_alloc_format_context( );
689
690 // Streams
691 AVStream *audio_st = NULL;
692 AVStream *video_st = NULL;
693
694 // Time stamps
695 double audio_pts = 0;
696 double video_pts = 0;
697
698 // Loop variable
699 int i;
700
701 // Frames despatched
702 long int frames = 0;
703 long int total_time = 0;
704
705 // Determine the format
706 AVOutputFormat *fmt = NULL;
707 char *filename = mlt_properties_get( properties, "target" );
708 char *format = mlt_properties_get( properties, "format" );
709 char *vcodec = mlt_properties_get( properties, "vcodec" );
710 char *acodec = mlt_properties_get( properties, "acodec" );
711
712 // Used to store and override codec ids
713 int audio_codec_id;
714 int video_codec_id;
715
716 // Check for user selected format first
717 if ( format != NULL )
718 fmt = guess_format( format, NULL, NULL );
719
720 // Otherwise check on the filename
721 if ( fmt == NULL && filename != NULL )
722 fmt = guess_format( NULL, filename, NULL );
723
724 // Otherwise default to mpeg
725 if ( fmt == NULL )
726 fmt = guess_format( "mpeg", NULL, NULL );
727
728 // We need a filename - default to stdout?
729 if ( filename == NULL || !strcmp( filename, "" ) )
730 filename = "pipe:";
731
732 // Get the codec ids selected
733 audio_codec_id = fmt->audio_codec;
734 video_codec_id = fmt->video_codec;
735
736 // Check for audio codec overides
737 if ( acodec != NULL )
738 {
739 AVCodec *p = first_avcodec;
740 while( p != NULL )
741 {
742 if ( !strcmp( p->name, acodec ) && p->type == CODEC_TYPE_AUDIO )
743 break;
744 p = p->next;
745 }
746 if ( p != NULL )
747 audio_codec_id = p->id;
748 else
749 fprintf( stderr, "consumer_avcodec: audio codec %s unrecognised - ignoring\n", acodec );
750 }
751
752 // Check for video codec overides
753 if ( vcodec != NULL )
754 {
755 AVCodec *p = first_avcodec;
756 while( p != NULL )
757 {
758 if ( !strcmp( p->name, vcodec ) && p->type == CODEC_TYPE_VIDEO )
759 break;
760 p = p->next;
761 }
762 if ( p != NULL )
763 video_codec_id = p->id;
764 else
765 fprintf( stderr, "consumer_avcodec: video codec %s unrecognised - ignoring\n", vcodec );
766 }
767
768 // Update the output context
769
770 // Write metadata
771 char *tmp = NULL;
772 int metavalue;
773
774 tmp = mlt_properties_get( properties, "meta.attr.title.markup");
775 if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
776
777 tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
778 if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
779
780 tmp = mlt_properties_get( properties, "meta.attr.author.markup");
781 if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
782
783 tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
784 if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
785
786 tmp = mlt_properties_get( properties, "meta.attr.album.markup");
787 if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
788
789 metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
790 if (metavalue != 0) oc->year = metavalue;
791
792 metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
793 if (metavalue != 0) oc->track = metavalue;
794
795 oc->oformat = fmt;
796 snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
797
798 // Add audio and video streams
799 if ( fmt->video_codec != CODEC_ID_NONE )
800 video_st = add_video_stream( this, oc, video_codec_id );
801 if ( fmt->audio_codec != CODEC_ID_NONE )
802 audio_st = add_audio_stream( this, oc, audio_codec_id );
803
804 // Set the parameters (even though we have none...)
805 if ( av_set_parameters(oc, NULL) >= 0 )
806 {
807 if ( video_st && !open_video( oc, video_st ) )
808 video_st = NULL;
809 if ( audio_st )
810 audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
811
812 // Open the output file, if needed
813 if ( !( fmt->flags & AVFMT_NOFILE ) )
814 {
815 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
816 {
817 fprintf(stderr, "Could not open '%s'\n", filename);
818 mlt_properties_set_int( properties, "running", 0 );
819 }
820 }
821
822 // Write the stream header, if any
823 if ( mlt_properties_get_int( properties, "running" ) )
824 av_write_header( oc );
825 }
826 else
827 {
828 fprintf(stderr, "Invalid output format parameters\n");
829 mlt_properties_set_int( properties, "running", 0 );
830 }
831
832 // Allocate picture
833 if ( video_st )
834 output = alloc_picture( video_st->codec->pix_fmt, width, height );
835
836 // Last check - need at least one stream
837 if ( audio_st == NULL && video_st == NULL )
838 mlt_properties_set_int( properties, "running", 0 );
839
840 // Get the starting time (can ignore the times above)
841 gettimeofday( &ante, NULL );
842
843 // Loop while running
844 while( mlt_properties_get_int( properties, "running" ) && !terminated )
845 {
846 // Get the frame
847 frame = mlt_consumer_rt_frame( this );
848
849 // Check that we have a frame to work with
850 if ( frame != NULL )
851 {
852 // Increment frames despatched
853 frames ++;
854
855 // Default audio args
856 frame_properties = MLT_FRAME_PROPERTIES( frame );
857
858 // Check for the terminated condition
859 terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
860
861 // Get audio and append to the fifo
862 if ( !terminated && audio_st )
863 {
864 samples = mlt_sample_calculator( fps, frequency, count ++ );
865 mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
866
867 // Create the fifo if we don't have one
868 if ( fifo == NULL )
869 {
870 fifo = sample_fifo_init( frequency, channels );
871 mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
872 }
873
874 if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
875 memset( pcm, 0, samples * channels * 2 );
876
877 // Append the samples
878 sample_fifo_append( fifo, pcm, samples * channels );
879 total_time += ( samples * 1000000 ) / frequency;
880 }
881
882 // Encode the image
883 if ( !terminated && video_st )
884 mlt_deque_push_back( queue, frame );
885 else
886 mlt_frame_close( frame );
887 }
888
889 // While we have stuff to process, process...
890 while ( 1 )
891 {
892 if (audio_st)
893 audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
894 else
895 audio_pts = 0.0;
896
897 if (video_st)
898 video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
899 else
900 video_pts = 0.0;
901
902 // Write interleaved audio and video frames
903 if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
904 {
905 if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) )
906 {
907 AVCodecContext *c;
908 AVPacket pkt;
909 av_init_packet( &pkt );
910
911 c = audio_st->codec;
912
913 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
914
915 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
916 // Write the compressed frame in the media file
917 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
918 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
919 pkt.flags |= PKT_FLAG_KEY;
920 pkt.stream_index= audio_st->index;
921 pkt.data= audio_outbuf;
922
923 if ( pkt.size )
924 if ( av_interleaved_write_frame( oc, &pkt ) != 0)
925 fprintf(stderr, "Error while writing audio frame\n");
926
927 audio_pts += c->frame_size;
928 }
929 else
930 {
931 break;
932 }
933 }
934 else if ( video_st )
935 {
936 if ( mlt_deque_count( queue ) )
937 {
938 int out_size, ret;
939 AVCodecContext *c;
940
941 frame = mlt_deque_pop_front( queue );
942 frame_properties = MLT_FRAME_PROPERTIES( frame );
943
944 c = video_st->codec;
945
946 if ( mlt_properties_get_int( frame_properties, "rendered" ) )
947 {
948 int i = 0;
949 int j = 0;
950 uint8_t *p;
951 uint8_t *q;
952
953 mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
954
955 mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
956
957 q = image;
958
959 // Convert the mlt frame to an AVPicture
960 for ( i = 0; i < height; i ++ )
961 {
962 p = input->data[ 0 ] + i * input->linesize[ 0 ];
963 j = width;
964 while( j -- )
965 {
966 *p ++ = *q ++;
967 *p ++ = *q ++;
968 }
969 }
970
971 // Do the colour space conversion
972 #ifdef SWSCALE
973 struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
974 width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
975 sws_scale( context, input->data, input->linesize, 0, height,
976 output->data, output->linesize);
977 sws_freeContext( context );
978 #else
979 img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
980 #endif
981
982 // Apply the alpha if applicable
983 if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
984 {
985 uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
986 register int n;
987
988 for ( i = 0; i < height; i ++ )
989 {
990 n = ( width + 7 ) / 8;
991 p = output->data[ 0 ] + i * output->linesize[ 0 ];
992
993 #ifndef __DARWIN__
994 p += 3;
995 #endif
996
997 switch( width % 8 )
998 {
999 case 0: do { *p = *alpha++; p += 4;
1000 case 7: *p = *alpha++; p += 4;
1001 case 6: *p = *alpha++; p += 4;
1002 case 5: *p = *alpha++; p += 4;
1003 case 4: *p = *alpha++; p += 4;
1004 case 3: *p = *alpha++; p += 4;
1005 case 2: *p = *alpha++; p += 4;
1006 case 1: *p = *alpha++; p += 4;
1007 }
1008 while( --n );
1009 }
1010 }
1011 }
1012 }
1013
1014 if (oc->oformat->flags & AVFMT_RAWPICTURE)
1015 {
1016 // raw video case. The API will change slightly in the near future for that
1017 AVPacket pkt;
1018 av_init_packet(&pkt);
1019
1020 pkt.flags |= PKT_FLAG_KEY;
1021 pkt.stream_index= video_st->index;
1022 pkt.data= (uint8_t *)output;
1023 pkt.size= sizeof(AVPicture);
1024
1025 ret = av_write_frame(oc, &pkt);
1026 video_pts += c->frame_size;
1027 }
1028 else
1029 {
1030 // Set the quality
1031 output->quality = video_st->quality;
1032
1033 // Set frame interlace hints
1034 output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
1035 output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
1036
1037 // Encode the image
1038 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
1039
1040 // If zero size, it means the image was buffered
1041 if (out_size > 0)
1042 {
1043 AVPacket pkt;
1044 av_init_packet( &pkt );
1045
1046 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1047 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1048 if( c->coded_frame && c->coded_frame->key_frame )
1049 pkt.flags |= PKT_FLAG_KEY;
1050 pkt.stream_index= video_st->index;
1051 pkt.data= video_outbuf;
1052 pkt.size= out_size;
1053
1054 // write the compressed frame in the media file
1055 ret = av_interleaved_write_frame(oc, &pkt);
1056 video_pts += c->frame_size;
1057 }
1058 else
1059 {
1060 fprintf( stderr, "Error with video encode\n" );
1061 }
1062 }
1063 frame_count++;
1064 mlt_frame_close( frame );
1065 }
1066 else
1067 {
1068 break;
1069 }
1070 }
1071 }
1072
1073 if ( real_time_output && frames % 12 == 0 )
1074 {
1075 long passed = time_difference( &ante );
1076 if ( fifo != NULL )
1077 {
1078 long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000;
1079 passed -= pending;
1080 }
1081 if ( passed < total_time )
1082 {
1083 long total = ( total_time - passed );
1084 struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 };
1085 nanosleep( &t, NULL );
1086 }
1087 }
1088 }
1089
1090 #ifdef FLUSH
1091 if ( ! real_time_output )
1092 {
1093 // Flush audio fifo
1094 if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
1095 {
1096 AVCodecContext *c = audio_st->codec;
1097 AVPacket pkt;
1098 av_init_packet( &pkt );
1099 pkt.size = 0;
1100
1101 if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
1102 ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
1103 {
1104 sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
1105 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
1106 }
1107 if ( pkt.size <= 0 )
1108 pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
1109 if ( pkt.size <= 0 )
1110 break;
1111
1112 // Write the compressed frame in the media file
1113 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1114 pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
1115 pkt.flags |= PKT_FLAG_KEY;
1116 pkt.stream_index = audio_st->index;
1117 pkt.data = audio_outbuf;
1118 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1119 {
1120 fprintf(stderr, "Error while writing flushed audio frame\n");
1121 break;
1122 }
1123 }
1124
1125 // Flush video
1126 if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
1127 {
1128 AVCodecContext *c = video_st->codec;
1129 AVPacket pkt;
1130 av_init_packet( &pkt );
1131
1132 // Encode the image
1133 pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
1134 if ( pkt.size <= 0 )
1135 break;
1136
1137 if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
1138 pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
1139 if( c->coded_frame && c->coded_frame->key_frame )
1140 pkt.flags |= PKT_FLAG_KEY;
1141 pkt.stream_index = video_st->index;
1142 pkt.data = video_outbuf;
1143
1144 // write the compressed frame in the media file
1145 if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
1146 {
1147 fprintf(stderr, "Error while writing flushed video frame\n");
1148 break;
1149 }
1150 }
1151 }
1152 #endif
1153
1154 // close each codec
1155 if (video_st)
1156 close_video(oc, video_st);
1157 if (audio_st)
1158 close_audio(oc, audio_st);
1159
1160 // Write the trailer, if any
1161 av_write_trailer(oc);
1162
1163 // Free the streams
1164 for(i = 0; i < oc->nb_streams; i++)
1165 av_freep(&oc->streams[i]);
1166
1167 // Close the output file
1168 if (!(fmt->flags & AVFMT_NOFILE))
1169 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
1170 url_fclose(oc->pb);
1171 #else
1172 url_fclose(&oc->pb);
1173 #endif
1174
1175 // Clean up input and output frames
1176 if ( output )
1177 av_free( output->data[0] );
1178 av_free( output );
1179 av_free( input->data[0] );
1180 av_free( input );
1181 av_free( video_outbuf );
1182 av_free( buffer );
1183
1184 // Free the stream
1185 av_free(oc);
1186
1187 // Just in case we terminated on pause
1188 mlt_properties_set_int( properties, "running", 0 );
1189
1190 mlt_consumer_stopped( this );
1191
1192 return NULL;
1193 }
1194
1195 /** Close the consumer.
1196 */
1197
1198 static void consumer_close( mlt_consumer this )
1199 {
1200 // Stop the consumer
1201 mlt_consumer_stop( this );
1202
1203 // Close the parent
1204 mlt_consumer_close( this );
1205
1206 // Free the memory
1207 free( this );
1208 }