X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fconsumer_avformat.c;h=9b431259fe7503f011a6dd9b69b6e40cd8af60fa;hb=59456c0f8d288d6bfa6df60d692b9209741ab02e;hp=cf31d58cfe372c6f5b70b3351abc77812b4d3e16;hpb=ccb141370c67b8c328244dc8931692f5d3722266;p=melted diff --git a/src/modules/avformat/consumer_avformat.c b/src/modules/avformat/consumer_avformat.c index cf31d58..9b43125 100644 --- a/src/modules/avformat/consumer_avformat.c +++ b/src/modules/avformat/consumer_avformat.c @@ -29,23 +29,60 @@ #include #include #include - +#include #include // avformat header files -#include +#include + +// +// This structure should be extended and made globally available in mlt +// typedef struct { int16_t *buffer; int size; int used; + double time; + int frequency; + int channels; } *sample_fifo, sample_fifo_s; -sample_fifo sample_fifo_init( ) +sample_fifo sample_fifo_init( int frequency, int channels ) +{ + sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) ); + this->frequency = frequency; + this->channels = channels; + return this; +} + +// sample_fifo_clear and check are temporarily aborted (not working as intended) + +void sample_fifo_clear( sample_fifo this, double time ) +{ + int words = ( float )( time - this->time ) * this->frequency * this->channels; + if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 ) + { + memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) ); + this->used -= words; + this->time = time; + } + else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) ) + { + this->used = 0; + this->time = time; + } +} + +void sample_fifo_check( sample_fifo this, double time ) { - return calloc( 1, sizeof( sample_fifo_s ) ); + if ( this->used == 0 ) + { + if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) ) + this->time = time; + } } void sample_fifo_append( sample_fifo this, int16_t *samples, int count ) @@ -74,6 +111,8 @@ int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count ) this->used -= count; memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) ); + this->time += ( double )count / this->channels / this->frequency; + return count; } @@ -112,19 +151,63 @@ mlt_consumer consumer_avformat_init( char *arg ) mlt_properties_set( properties, "target", arg ); // sample and frame queue - mlt_properties_set_data( properties, "sample_fifo", sample_fifo_init( ), 0, ( mlt_destructor )sample_fifo_close, NULL ); mlt_properties_set_data( properties, "frame_queue", mlt_deque_init( ), 0, ( mlt_destructor )mlt_deque_close, NULL ); - // Set avformat defaults + // Set avformat defaults (all lifted from ffmpeg.c) mlt_properties_set_int( properties, "audio_bit_rate", 128000 ); mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 ); mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 ); mlt_properties_set_int( properties, "frame_rate_base", 1 ); mlt_properties_set_int( properties, "gop_size", 12 ); - mlt_properties_set_int( properties, "max_b_frames", 0 ); - mlt_properties_set_int( properties, "mb_decision", 0 ); + mlt_properties_set_int( properties, "b_frames", 0 ); + mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE ); mlt_properties_set_double( properties, "qscale", 0 ); mlt_properties_set_int( properties, "me_method", ME_EPZS ); + mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD ); + mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD ); + mlt_properties_set_int( properties, "sub_cmp", FF_CMP_SAD ); + mlt_properties_set_int( properties, "cmp", FF_CMP_SAD ); + mlt_properties_set_int( properties, "pre_cmp", FF_CMP_SAD ); + mlt_properties_set_int( properties, "pre_me", 0 ); + mlt_properties_set_double( properties, "lumi_mask", 0 ); + mlt_properties_set_double( properties, "dark_mask", 0 ); + mlt_properties_set_double( properties, "scplx_mask", 0 ); + mlt_properties_set_double( properties, "tcplx_mask", 0 ); + mlt_properties_set_double( properties, "p_mask", 0 ); + mlt_properties_set_int( properties, "qns", 0 ); + mlt_properties_set_int( properties, "video_qmin", 2 ); + mlt_properties_set_int( properties, "video_qmax", 31 ); + mlt_properties_set_int( properties, "video_lmin", 2*FF_QP2LAMBDA ); + mlt_properties_set_int( properties, "video_lmax", 31*FF_QP2LAMBDA ); + mlt_properties_set_int( properties, "video_mb_qmin", 2 ); + mlt_properties_set_int( properties, "video_mb_qmax", 31 ); + mlt_properties_set_int( properties, "video_qdiff", 3 ); + mlt_properties_set_double( properties, "video_qblur", 0.5 ); + mlt_properties_set_double( properties, "video_qcomp", 0.5 ); + mlt_properties_set_int( properties, "video_rc_max_rate", 0 ); + mlt_properties_set_int( properties, "video_rc_min_rate", 0 ); + mlt_properties_set_int( properties, "video_rc_buffer_size", 0 ); + mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 ); + mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 ); + mlt_properties_set_double( properties, "video_i_qfactor", 1.25 ); + mlt_properties_set_double( properties, "video_b_qfactor", 1.25 ); + mlt_properties_set_double( properties, "video_i_qoffset", -0.8 ); + mlt_properties_set_double( properties, "video_b_qoffset", 0 ); + mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS ); + mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS ); + mlt_properties_set_int( properties, "dct_algo", 0 ); + mlt_properties_set_int( properties, "idct_algo", 0 ); + mlt_properties_set_int( properties, "me_threshold", 0 ); + mlt_properties_set_int( properties, "mb_threshold", 0 ); + mlt_properties_set_int( properties, "intra_dc_precision", 0 ); + mlt_properties_set_int( properties, "strict", 0 ); + mlt_properties_set_int( properties, "error_rate", 0 ); + mlt_properties_set_int( properties, "noise_reduction", 0 ); + mlt_properties_set_int( properties, "sc_threshold", 0 ); + mlt_properties_set_int( properties, "me_range", 0 ); + mlt_properties_set_int( properties, "coder", 0 ); + mlt_properties_set_int( properties, "context", 0 ); + mlt_properties_set_int( properties, "predictor", 0 ); // Ensure termination at end of the stream mlt_properties_set_int( properties, "terminate_on_pause", 1 ); @@ -342,16 +425,37 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c c->frame_rate_base = mlt_properties_get_double( properties, "frame_rate_base" ); c->frame_rate_base = 1; c->gop_size = mlt_properties_get_int( properties, "gop_size" ); - c->max_b_frames = mlt_properties_get_int( properties, "max_b_frames" ); - if ( c->max_b_frames ) + + if ( mlt_properties_get_int( properties, "b_frames" ) ) { + c->max_b_frames = mlt_properties_get_int( properties, "b_frames" ); c->b_frame_strategy = 0; c->b_quant_factor = 2.0; } c->mb_decision = mlt_properties_get_int( properties, "mb_decision" ); c->sample_aspect_ratio = av_d2q( mlt_properties_get_double( properties, "aspect_ratio" ), 255 ); - + c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" ); + c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" ); + c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" ); + c->me_cmp = mlt_properties_get_int( properties, "cmp" ); + c->me_pre_cmp = mlt_properties_get_int( properties, "pre_cmp" ); + c->pre_me = mlt_properties_get_int( properties, "pre_me" ); + c->lumi_masking = mlt_properties_get_double( properties, "lumi_mask" ); + c->dark_masking = mlt_properties_get_double( properties, "dark_mask" ); + c->spatial_cplx_masking = mlt_properties_get_double( properties, "scplx_mask" ); + c->temporal_cplx_masking = mlt_properties_get_double( properties, "tcplx_mask" ); + c->p_masking = mlt_properties_get_double( properties, "p_mask" ); + c->quantizer_noise_shaping= mlt_properties_get_int( properties, "qns" ); + c->qmin = mlt_properties_get_int( properties, "video_qmin" ); + c->qmax = mlt_properties_get_int( properties, "video_qmax" ); + c->lmin = mlt_properties_get_int( properties, "video_lmin" ); + c->lmax = mlt_properties_get_int( properties, "video_lmax" ); + c->mb_qmin = mlt_properties_get_int( properties, "video_mb_qmin" ); + c->mb_qmax = mlt_properties_get_int( properties, "video_mb_qmax" ); + c->max_qdiff = mlt_properties_get_int( properties, "video_qdiff" ); + c->qblur = mlt_properties_get_double( properties, "video_qblur" ); + c->qcompress = mlt_properties_get_double( properties, "video_qcomp" ); if ( mlt_properties_get_double( properties, "qscale" ) > 0 ) { @@ -365,6 +469,30 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c !strcmp( oc->oformat->name, "3gp" ) ) c->flags |= CODEC_FLAG_GLOBAL_HEADER; + c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" ); + c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" ); + c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" ); + c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" ); + c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" ); + c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" ); + c->b_quant_factor = mlt_properties_get_double( properties, "video_b_qfactor" ); + c->i_quant_offset = mlt_properties_get_double( properties, "video_i_qoffset" ); + c->b_quant_offset = mlt_properties_get_double( properties, "video_b_qoffset" ); + c->intra_quant_bias = mlt_properties_get_int( properties, "video_intra_quant_bias" ); + c->inter_quant_bias = mlt_properties_get_int( properties, "video_inter_quant_bias" ); + c->dct_algo = mlt_properties_get_int( properties, "dct_algo" ); + c->idct_algo = mlt_properties_get_int( properties, "idct_algo" ); + c->me_threshold= mlt_properties_get_int( properties, "me_threshold" ); + c->mb_threshold= mlt_properties_get_int( properties, "mb_threshold" ); + c->intra_dc_precision= mlt_properties_get_int( properties, "intra_dc_precision" ); + c->strict_std_compliance = mlt_properties_get_int( properties, "strict" ); + c->error_rate = mlt_properties_get_int( properties, "error_rate" ); + c->noise_reduction= mlt_properties_get_int( properties, "noise_reduction" ); + c->scenechange_threshold= mlt_properties_get_int( properties, "sc_threshold" ); + c->me_range = mlt_properties_get_int( properties, "me_range" ); + c->coder_type= mlt_properties_get_int( properties, "coder" ); + c->context_model= mlt_properties_get_int( properties, "context" ); + c->prediction_method= mlt_properties_get_int( properties, "predictor" ); c->me_method = mlt_properties_get_int( properties, "me_method" ); } else @@ -420,6 +548,13 @@ void close_video(AVFormatContext *oc, AVStream *st) avcodec_close(&st->codec); } +static inline long time_difference( struct timeval *time1 ) +{ + struct timeval time2; + gettimeofday( &time2, NULL ); + return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec; +} + /** The main thread - the argument is simply the consumer. */ @@ -433,6 +568,13 @@ static void *consumer_thread( void *arg ) // Get the terminate on pause property int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" ); + int terminated = 0; + + // Determine if feed is slow (for realtime stuff) + int real_time_output = mlt_properties_get_int( properties, "real_time" ); + + // Time structures + struct timeval ante; // Get the frame rate int fps = mlt_properties_get_double( properties, "fps" ); @@ -451,7 +593,7 @@ static void *consumer_thread( void *arg ) int samples = 0; // AVFormat audio buffer and frame size - int audio_outbuf_size = 2 * 128 * 1024; + int audio_outbuf_size = 10000; uint8_t *audio_outbuf = av_malloc( audio_outbuf_size ); int audio_input_frame_size = 0; @@ -476,8 +618,8 @@ static void *consumer_thread( void *arg ) uint8_t *image; mlt_image_format img_fmt = mlt_image_yuv422; - // Fo receiving audio samples back from the fifo - int16_t buffer[ 48000 * 2 ]; + // For receiving audio samples back from the fifo + int16_t *buffer = av_malloc( 48000 * 2 ); int count = 0; // Allocate the context @@ -493,6 +635,10 @@ static void *consumer_thread( void *arg ) // Loop variable int i; + // Frames despatched + long int frames = 0; + long int total_time = 0; + // Determine the format AVOutputFormat *fmt = NULL; char *filename = mlt_properties_get( properties, "target" ); @@ -517,7 +663,7 @@ static void *consumer_thread( void *arg ) fmt = guess_format( "mpeg", NULL, NULL ); // We need a filename - default to stdout? - if ( filename == NULL ) + if ( filename == NULL || !strcmp( filename, "" ) ) filename = "pipe:"; // Get the codec ids selected @@ -577,16 +723,13 @@ static void *consumer_thread( void *arg ) // Open the output file, if needed if ( !( fmt->flags & AVFMT_NOFILE ) ) { - if (url_fopen(&oc->pb, filename, URL_RDWR) < 0) + if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); mlt_properties_set_int( properties, "running", 0 ); } } - if ( url_is_streamed( &oc->pb ) ) - fprintf( stderr, "FUCK!\n" ); - // Write the stream header, if any if ( mlt_properties_get_int( properties, "running" ) ) av_write_header( oc ); @@ -601,8 +744,11 @@ static void *consumer_thread( void *arg ) if ( audio_st == NULL && video_st == NULL ) mlt_properties_set_int( properties, "running", 0 ); + // Get the starting time (can ignore the times above) + gettimeofday( &ante, NULL ); + // Loop while running - while( mlt_properties_get_int( properties, "running" ) ) + while( mlt_properties_get_int( properties, "running" ) && !terminated ) { // Get the frame frame = mlt_consumer_rt_frame( this ); @@ -610,15 +756,31 @@ static void *consumer_thread( void *arg ) // Check that we have a frame to work with if ( frame != NULL ) { + // Increment frames despatched + frames ++; + // Default audio args frame_properties = mlt_frame_properties( frame ); + // Check for the terminated condition + terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0; + // Get audio and append to the fifo if ( audio_st ) { samples = mlt_sample_calculator( fps, frequency, count ); mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples ); + + // Create the fifo if we don't have one + if ( fifo == NULL ) + { + fifo = sample_fifo_init( frequency, channels ); + mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL ); + } + + // Append the samples sample_fifo_append( fifo, pcm, samples * channels ); + total_time += ( samples * 1000000 ) / frequency; } // Encode the image @@ -626,115 +788,161 @@ static void *consumer_thread( void *arg ) mlt_deque_push_back( queue, frame ); else mlt_frame_close( frame ); + } - // While we have stuff to process, process... - while ( 1 ) + // While we have stuff to process, process... + while ( 1 ) + { + // Compute current audio and video time + if (audio_st) + audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; + else + audio_pts = 0.0; + + if (video_st) + video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; + else + video_pts = 0.0; + + // Write interleaved audio and video frames + if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) ) { - // Compute current audio and video time - if (audio_st) - audio_pts = (double)audio_st->pts.val * oc->pts_num / oc->pts_den; - else - audio_pts = 0.0; - - if (video_st) - video_pts = (double)video_st->pts.val * oc->pts_num / oc->pts_den; - else - video_pts = 0.0; - - // Write interleaved audio and video frames - if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) ) + if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) { - if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) - { - int out_size; - AVCodecContext *c; + AVCodecContext *c; + AVPacket pkt; + av_init_packet( &pkt ); - c = &audio_st->codec; + c = &audio_st->codec; - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); + sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); - out_size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); + pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); + // Write the compressed frame in the media file + if ( c->coded_frame ) + pkt.pts= c->coded_frame->pts; + pkt.flags |= PKT_FLAG_KEY; + pkt.stream_index= audio_st->index; + pkt.data= audio_outbuf; - // Write the compressed frame in the media file - if (av_write_frame(oc, audio_st->index, audio_outbuf, out_size) != 0) - fprintf(stderr, "Error while writing audio frame\n"); - } - else - { - break; - } + if ( av_interleaved_write_frame( oc, &pkt ) != 0) + fprintf(stderr, "Error while writing audio frame\n"); + } + else + { + break; } - else if ( video_st ) + } + else if ( video_st ) + { + if ( mlt_deque_count( queue ) ) { - if ( mlt_deque_count( queue ) ) + int out_size, ret; + AVCodecContext *c; + + frame = mlt_deque_pop_front( queue ); + frame_properties = mlt_frame_properties( frame ); + + c = &video_st->codec; + + if ( mlt_properties_get_int( frame_properties, "rendered" ) ) { - int out_size, ret; - AVCodecContext *c; - - frame = mlt_deque_pop_front( queue ); - frame_properties = mlt_frame_properties( frame ); + int i = 0; + int j = 0; + uint8_t *p; + uint8_t *q; - if ( terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0 ) - { - mlt_properties_set_int( properties, "running", 0 ); - break; - } + mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); - c = &video_st->codec; - - if ( mlt_properties_get_int( frame_properties, "rendered" ) ) + // This will cause some fx to go awry.... + if ( mlt_properties_get_int( properties, "transcode" ) ) { - int i = 0; - int j = 0; - uint8_t *p; - uint8_t *q; + mlt_properties_set_int( mlt_frame_properties( frame ), "normalised_width", img_height * 4.0 / 3.0 ); + mlt_properties_set_int( mlt_frame_properties( frame ), "normalised_height", img_height ); + } - mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 ); + mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 ); - q = image; + q = image; - for ( i = 0; i < height; i ++ ) + for ( i = 0; i < height; i ++ ) + { + p = input->data[ 0 ] + i * input->linesize[ 0 ]; + j = width; + while( j -- ) { - p = input->data[ 0 ] + i * input->linesize[ 0 ]; - j = width; - while( j -- ) - { - *p ++ = *q ++; - *p ++ = *q ++; - } + *p ++ = *q ++; + *p ++ = *q ++; } - - img_convert( ( AVPicture * )output, PIX_FMT_YUV420P, ( AVPicture * )input, PIX_FMT_YUV422, width, height ); } - - if (oc->oformat->flags & AVFMT_RAWPICTURE) - { - // raw video case. The API will change slightly in the near future for that - ret = av_write_frame(oc, video_st->index, (uint8_t *)output, sizeof(AVPicture)); - } - else - { - // Encode the image - out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); - // If zero size, it means the image was buffered - if (out_size != 0) - { - // write the compressed frame in the media file - // XXX: in case of B frames, the pts is not yet valid - ret = av_write_frame( oc, video_st->index, video_outbuf, out_size ); - } - } - frame_count++; - mlt_frame_close( frame ); + img_convert( ( AVPicture * )output, PIX_FMT_YUV420P, ( AVPicture * )input, PIX_FMT_YUV422, width, height ); } - else + + if (oc->oformat->flags & AVFMT_RAWPICTURE) { - break; - } + // raw video case. The API will change slightly in the near future for that + AVPacket pkt; + av_init_packet(&pkt); + + pkt.flags |= PKT_FLAG_KEY; + pkt.stream_index= video_st->index; + pkt.data= (uint8_t *)output; + pkt.size= sizeof(AVPicture); + + ret = av_write_frame(oc, &pkt); + } + else + { + // Set the quality + output->quality = video_st->quality; + + // Encode the image + out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); + + // If zero size, it means the image was buffered + if (out_size != 0) + { + AVPacket pkt; + av_init_packet( &pkt ); + + if ( c->coded_frame ) + pkt.pts= c->coded_frame->pts; + if(c->coded_frame->key_frame) + pkt.flags |= PKT_FLAG_KEY; + pkt.stream_index= video_st->index; + pkt.data= video_outbuf; + pkt.size= out_size; + + // write the compressed frame in the media file + ret = av_interleaved_write_frame(oc, &pkt); + } + } + frame_count++; + mlt_frame_close( frame ); + } + else + { + break; } } } + + if ( real_time_output && frames % 25 == 0 ) + { + long passed = time_difference( &ante ); + if ( fifo != NULL ) + { + long pending = ( ( ( long )sample_fifo_used( fifo ) * 1000 ) / frequency ) * 1000; + passed -= pending; + } + if ( passed < total_time ) + { + long total = ( total_time - passed ); + struct timespec t = { total / 1000000, ( total % 1000000 ) * 1000 }; + nanosleep( &t, NULL ); + } + } } // close each codec @@ -760,10 +968,16 @@ static void *consumer_thread( void *arg ) av_free( input->data[0] ); av_free( input ); av_free( video_outbuf ); + av_free( buffer ); // Free the stream av_free(oc); + // Just in case we terminated on pause + mlt_properties_set_int( properties, "running", 0 ); + + mlt_consumer_stopped( this ); + return NULL; }