X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fconsumer_avformat.c;h=33487555d8028eba84d573c6230ab547ab1b5e06;hb=cef70011d08af33924ab3b97d628deefce47b953;hp=febd134741dc87a02bacf4ac1c485f06f93ead2a;hpb=72815200c65ff4587df5e8da908b9281e044f1ca;p=melted diff --git a/src/modules/avformat/consumer_avformat.c b/src/modules/avformat/consumer_avformat.c index febd134..3348755 100644 --- a/src/modules/avformat/consumer_avformat.c +++ b/src/modules/avformat/consumer_avformat.c @@ -35,6 +35,9 @@ // avformat header files #include +#ifdef SWSCALE +#include +#endif // // This structure should be extended and made globally available in mlt @@ -158,11 +161,10 @@ mlt_consumer consumer_avformat_init( char *arg ) mlt_properties_set_int( properties, "audio_bit_rate", 128000 ); mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 ); mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 ); - mlt_properties_set_int( properties, "frame_rate_base", 1 ); mlt_properties_set_int( properties, "gop_size", 12 ); mlt_properties_set_int( properties, "b_frames", 0 ); mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE ); - mlt_properties_set_double( properties, "qscale", 0 ); + mlt_properties_set_double( properties, "qscale", 1 ); mlt_properties_set_int( properties, "me_method", ME_EPZS ); mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD ); mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD ); @@ -323,7 +325,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c // If created, then initialise from properties if ( st != NULL ) { - AVCodecContext *c = &st->codec; + AVCodecContext *c = st->codec; c->codec_id = codec_id; c->codec_type = CODEC_TYPE_AUDIO; @@ -331,6 +333,20 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" ); c->sample_rate = mlt_properties_get_int( properties, "frequency" ); c->channels = mlt_properties_get_int( properties, "channels" ); + + if (oc->oformat->flags & AVFMT_GLOBALHEADER) + c->flags |= CODEC_FLAG_GLOBAL_HEADER; + + // Allow the user to override the audio fourcc + if ( mlt_properties_get( properties, "afourcc" ) ) + { + char *tail = NULL; + char *arg = mlt_properties_get( properties, "afourcc" ); + int tag = strtol( arg, &tail, 0); + if( !tail || *tail ) + tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 ); + c->codec_tag = tag; + } } else { @@ -346,7 +362,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size int audio_input_frame_size = 0; // Get the context - AVCodecContext *c = &st->codec; + AVCodecContext *c = st->codec; // Find the encoder AVCodec *codec = avcodec_find_encoder( c->codec_id ); @@ -359,7 +375,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size if ( c->frame_size <= 1 ) { audio_input_frame_size = audio_outbuf_size / c->channels; - switch(st->codec.codec_id) + switch(st->codec->codec_id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: @@ -392,7 +408,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size static void close_audio( AVFormatContext *oc, AVStream *st ) { - avcodec_close( &st->codec ); + avcodec_close( st->codec ); } /** Add a video output stream @@ -408,7 +424,9 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c if ( st != NULL ) { - AVCodecContext *c = &st->codec; + char *pix_fmt = mlt_properties_get( properties, "pix_fmt" ); + double ar = mlt_properties_get_double( properties, "display_ratio" ); + AVCodecContext *c = st->codec; c->codec_id = codec_id; c->codec_type = CODEC_TYPE_VIDEO; @@ -417,10 +435,10 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" ); c->width = mlt_properties_get_int( properties, "width" ); c->height = mlt_properties_get_int( properties, "height" ); - c->time_base.den = mlt_properties_get_int( properties, "frame_rate_den" ); - c->time_base.num = mlt_properties_get_int( properties, "frame_rate_num" ); + c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" ); + c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" ); c->gop_size = mlt_properties_get_int( properties, "gop_size" ); - c->pix_fmt = PIX_FMT_YUV420P; + c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; if ( mlt_properties_get_int( properties, "b_frames" ) ) { @@ -430,7 +448,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c } c->mb_decision = mlt_properties_get_int( properties, "mb_decision" ); - c->sample_aspect_ratio = av_d2q( mlt_properties_get_double( properties, "aspect_ratio" ), 255 ); + c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255); c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" ); c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" ); c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" ); @@ -459,10 +477,19 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" ); } - // Some formats want stream headers to be seperate (hmm) - if( !strcmp( oc->oformat->name, "mp4" ) || - !strcmp( oc->oformat->name, "mov" ) || - !strcmp( oc->oformat->name, "3gp" ) ) + // Allow the user to override the video fourcc + if ( mlt_properties_get( properties, "vfourcc" ) ) + { + char *tail = NULL; + const char *arg = mlt_properties_get( properties, "vfourcc" ); + int tag = strtol( arg, &tail, 0); + if( !tail || *tail ) + tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 ); + c->codec_tag = tag; + } + + // Some formats want stream headers to be seperate + if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" ); @@ -530,32 +557,11 @@ static AVFrame *alloc_picture( int pix_fmt, int width, int height ) static int open_video(AVFormatContext *oc, AVStream *st) { // Get the codec - AVCodecContext *video_enc = &st->codec; + AVCodecContext *video_enc = st->codec; // find the video encoder AVCodec *codec = avcodec_find_encoder( video_enc->codec_id ); - if( codec && codec->supported_framerates ) - { - const AVRational *p = codec->supported_framerates; - AVRational req = ( AVRational ){ video_enc->time_base.den, video_enc->time_base.num }; - const AVRational *best = NULL; - AVRational best_error = (AVRational){ INT_MAX, 1 }; - for( ; p->den!=0; p++ ) - { - AVRational error= av_sub_q( req, *p ); - if( error.num < 0 ) - error.num *= -1; - if( av_cmp_q( error, best_error ) < 0 ) - { - best_error = error; - best = p; - } - } - video_enc->time_base.den = best->num; - video_enc->time_base.num = best->den; - } - if( codec && codec->pix_fmts ) { const enum PixelFormat *p = codec->pix_fmts; @@ -574,7 +580,7 @@ static int open_video(AVFormatContext *oc, AVStream *st) void close_video(AVFormatContext *oc, AVStream *st) { - avcodec_close(&st->codec); + avcodec_close(st->codec); } static inline long time_difference( struct timeval *time1 ) @@ -733,6 +739,32 @@ static void *consumer_thread( void *arg ) } // Update the output context + + // Write metadata + char *tmp = NULL; + int metavalue; + + tmp = mlt_properties_get( properties, "meta.attr.title.markup"); + if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp ); + + tmp = mlt_properties_get( properties, "meta.attr.comment.markup"); + if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp ); + + tmp = mlt_properties_get( properties, "meta.attr.author.markup"); + if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp ); + + tmp = mlt_properties_get( properties, "meta.attr.copyright.markup"); + if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp ); + + tmp = mlt_properties_get( properties, "meta.attr.album.markup"); + if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp ); + + metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup"); + if (metavalue != 0) oc->year = metavalue; + + metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup"); + if (metavalue != 0) oc->track = metavalue; + oc->oformat = fmt; snprintf( oc->filename, sizeof(oc->filename), "%s", filename ); @@ -772,7 +804,7 @@ static void *consumer_thread( void *arg ) // Allocate picture if ( video_st ) - output = alloc_picture( video_st->codec.pix_fmt, width, height ); + output = alloc_picture( video_st->codec->pix_fmt, width, height ); // Last check - need at least one stream if ( audio_st == NULL && video_st == NULL ) @@ -802,7 +834,7 @@ static void *consumer_thread( void *arg ) // Get audio and append to the fifo if ( !terminated && audio_st ) { - samples = mlt_sample_calculator( fps, frequency, count ); + samples = mlt_sample_calculator( fps, frequency, count ++ ); mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples ); // Create the fifo if we don't have one @@ -849,20 +881,21 @@ static void *consumer_thread( void *arg ) AVPacket pkt; av_init_packet( &pkt ); - c = &audio_st->codec; + c = audio_st->codec; sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); // Write the compressed frame in the media file - if ( c->coded_frame ) + if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); pkt.flags |= PKT_FLAG_KEY; pkt.stream_index= audio_st->index; pkt.data= audio_outbuf; - if ( av_interleaved_write_frame( oc, &pkt ) != 0) - fprintf(stderr, "Error while writing audio frame\n"); + if ( pkt.size ) + if ( av_interleaved_write_frame( oc, &pkt ) != 0) + fprintf(stderr, "Error while writing audio frame\n"); audio_pts += c->frame_size; } @@ -881,7 +914,7 @@ static void *consumer_thread( void *arg ) frame = mlt_deque_pop_front( queue ); frame_properties = MLT_FRAME_PROPERTIES( frame ); - c = &video_st->codec; + c = video_st->codec; if ( mlt_properties_get_int( frame_properties, "rendered" ) ) { @@ -892,17 +925,11 @@ static void *consumer_thread( void *arg ) mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); - // This will cause some fx to go awry.... - if ( mlt_properties_get_int( properties, "transcode" ) ) - { - mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "normalised_width", img_height * 4.0 / 3.0 ); - mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "normalised_height", img_height ); - } - mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 ); q = image; + // Convert the mlt frame to an AVPicture for ( i = 0; i < height; i ++ ) { p = input->data[ 0 ] + i * input->linesize[ 0 ]; @@ -914,7 +941,47 @@ static void *consumer_thread( void *arg ) } } - img_convert( ( AVPicture * )output, video_st->codec.pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height ); + // Do the colour space conversion +#ifdef SWSCALE + struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422, + width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); + sws_scale( context, input->data, input->linesize, 0, height, + output->data, output->linesize); + sws_freeContext( context ); +#else + img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height ); +#endif + + // Apply the alpha if applicable + if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 ) + { + uint8_t *alpha = mlt_frame_get_alpha_mask( frame ); + register int n; + + for ( i = 0; i < height; i ++ ) + { + n = ( width + 7 ) / 8; + p = output->data[ 0 ] + i * output->linesize[ 0 ]; + + #ifndef __DARWIN__ + p += 3; + #endif + + switch( width % 8 ) + { + case 0: do { *p = *alpha++; p += 4; + case 7: *p = *alpha++; p += 4; + case 6: *p = *alpha++; p += 4; + case 5: *p = *alpha++; p += 4; + case 4: *p = *alpha++; p += 4; + case 3: *p = *alpha++; p += 4; + case 2: *p = *alpha++; p += 4; + case 1: *p = *alpha++; p += 4; + } + while( --n ); + } + } + } } if (oc->oformat->flags & AVFMT_RAWPICTURE) @@ -940,14 +1007,14 @@ static void *consumer_thread( void *arg ) out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); // If zero size, it means the image was buffered - if (out_size != 0) + if (out_size > 0) { AVPacket pkt; av_init_packet( &pkt ); - if ( c->coded_frame ) + if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base ); - if(c->coded_frame->key_frame) + if( c->coded_frame && c->coded_frame->key_frame ) pkt.flags |= PKT_FLAG_KEY; pkt.stream_index= video_st->index; pkt.data= video_outbuf; @@ -957,6 +1024,10 @@ static void *consumer_thread( void *arg ) ret = av_interleaved_write_frame(oc, &pkt); video_pts += c->frame_size; } + else + { + fprintf( stderr, "Error with video encode\n" ); + } } frame_count++; mlt_frame_close( frame );