mlt_properties_set_int( properties, "audio_bit_rate", 128000 );
mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 );
mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 );
- mlt_properties_set_int( properties, "frame_rate_base", 1 );
mlt_properties_set_int( properties, "gop_size", 12 );
mlt_properties_set_int( properties, "b_frames", 0 );
mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
// If created, then initialise from properties
if ( st != NULL )
{
- AVCodecContext *c = &st->codec;
+ AVCodecContext *c = st->codec;
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_AUDIO;
c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" );
c->sample_rate = mlt_properties_get_int( properties, "frequency" );
c->channels = mlt_properties_get_int( properties, "channels" );
+
+ if (oc->oformat->flags & AVFMT_GLOBALHEADER)
+ c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+ // Allow the user to override the audio fourcc
+ if ( mlt_properties_get( properties, "afourcc" ) )
+ {
+ char *tail = NULL;
+ char *arg = mlt_properties_get( properties, "afourcc" );
+ int tag = strtol( arg, &tail, 0);
+ if( !tail || *tail )
+ tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
+ c->codec_tag = tag;
+ }
}
else
{
int audio_input_frame_size = 0;
// Get the context
- AVCodecContext *c = &st->codec;
+ AVCodecContext *c = st->codec;
// Find the encoder
AVCodec *codec = avcodec_find_encoder( c->codec_id );
if ( c->frame_size <= 1 )
{
audio_input_frame_size = audio_outbuf_size / c->channels;
- switch(st->codec.codec_id)
+ switch(st->codec->codec_id)
{
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
static void close_audio( AVFormatContext *oc, AVStream *st )
{
- avcodec_close( &st->codec );
+ avcodec_close( st->codec );
}
/** Add a video output stream
if ( st != NULL )
{
- AVCodecContext *c = &st->codec;
+ char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
+ double ar = mlt_properties_get_double( properties, "display_ratio" );
+ AVCodecContext *c = st->codec;
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_VIDEO;
c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" );
c->width = mlt_properties_get_int( properties, "width" );
c->height = mlt_properties_get_int( properties, "height" );
- c->time_base.den = mlt_properties_get_int( properties, "frame_rate_den" );
- c->time_base.num = mlt_properties_get_int( properties, "frame_rate_num" );
+ c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
+ c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
c->gop_size = mlt_properties_get_int( properties, "gop_size" );
- c->pix_fmt = PIX_FMT_YUV420P;
+ c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
if ( mlt_properties_get_int( properties, "b_frames" ) )
{
}
c->mb_decision = mlt_properties_get_int( properties, "mb_decision" );
- c->sample_aspect_ratio = av_d2q( mlt_properties_get_double( properties, "aspect_ratio" ), 255 );
+ c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" );
c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" );
c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" );
st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
}
- // Some formats want stream headers to be seperate (hmm)
- if( !strcmp( oc->oformat->name, "mp4" ) ||
- !strcmp( oc->oformat->name, "mov" ) ||
- !strcmp( oc->oformat->name, "3gp" ) )
+ // Allow the user to override the video fourcc
+ if ( mlt_properties_get( properties, "vfourcc" ) )
+ {
+ char *tail = NULL;
+ const char *arg = mlt_properties_get( properties, "vfourcc" );
+ int tag = strtol( arg, &tail, 0);
+ if( !tail || *tail )
+ tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
+ c->codec_tag = tag;
+ }
+
+ // Some formats want stream headers to be seperate
+ if ( oc->oformat->flags & AVFMT_GLOBALHEADER )
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
static int open_video(AVFormatContext *oc, AVStream *st)
{
// Get the codec
- AVCodecContext *video_enc = &st->codec;
+ AVCodecContext *video_enc = st->codec;
// find the video encoder
AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
- if( codec && codec->supported_framerates )
- {
- const AVRational *p = codec->supported_framerates;
- AVRational req = ( AVRational ){ video_enc->time_base.den, video_enc->time_base.num };
- const AVRational *best = NULL;
- AVRational best_error = (AVRational){ INT_MAX, 1 };
- for( ; p->den!=0; p++ )
- {
- AVRational error= av_sub_q( req, *p );
- if( error.num < 0 )
- error.num *= -1;
- if( av_cmp_q( error, best_error ) < 0 )
- {
- best_error = error;
- best = p;
- }
- }
- video_enc->time_base.den = best->num;
- video_enc->time_base.num = best->den;
- }
-
if( codec && codec->pix_fmts )
{
const enum PixelFormat *p = codec->pix_fmts;
void close_video(AVFormatContext *oc, AVStream *st)
{
- avcodec_close(&st->codec);
+ avcodec_close(st->codec);
}
static inline long time_difference( struct timeval *time1 )
// Allocate picture
if ( video_st )
- output = alloc_picture( video_st->codec.pix_fmt, width, height );
+ output = alloc_picture( video_st->codec->pix_fmt, width, height );
// Last check - need at least one stream
if ( audio_st == NULL && video_st == NULL )
// Get audio and append to the fifo
if ( !terminated && audio_st )
{
- samples = mlt_sample_calculator( fps, frequency, count );
+ samples = mlt_sample_calculator( fps, frequency, count ++ );
mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
// Create the fifo if we don't have one
AVPacket pkt;
av_init_packet( &pkt );
- c = &audio_st->codec;
+ c = audio_st->codec;
sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
// Write the compressed frame in the media file
- if ( c->coded_frame )
+ if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= audio_st->index;
pkt.data= audio_outbuf;
- if ( av_interleaved_write_frame( oc, &pkt ) != 0)
- fprintf(stderr, "Error while writing audio frame\n");
+ if ( pkt.size )
+ if ( av_interleaved_write_frame( oc, &pkt ) != 0)
+ fprintf(stderr, "Error while writing audio frame\n");
audio_pts += c->frame_size;
}
frame = mlt_deque_pop_front( queue );
frame_properties = MLT_FRAME_PROPERTIES( frame );
- c = &video_st->codec;
+ c = video_st->codec;
if ( mlt_properties_get_int( frame_properties, "rendered" ) )
{
mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
- // This will cause some fx to go awry....
- if ( mlt_properties_get_int( properties, "transcode" ) )
- {
- mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "normalised_width", img_height * 4.0 / 3.0 );
- mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "normalised_height", img_height );
- }
-
mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
q = image;
+ // Convert the mlt frame to an AVPicture
for ( i = 0; i < height; i ++ )
{
p = input->data[ 0 ] + i * input->linesize[ 0 ];
}
}
- img_convert( ( AVPicture * )output, video_st->codec.pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
+ // Do the colour space conversion
+ img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
+
+ // Apply the alpha if applicable
+ if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
+ {
+ uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
+ register int n;
+
+ for ( i = 0; i < height; i ++ )
+ {
+ n = ( width + 7 ) / 8;
+ p = output->data[ 0 ] + i * output->linesize[ 0 ];
+
+ #ifndef __DARWIN__
+ p += 3;
+ #endif
+
+ switch( width % 8 )
+ {
+ case 0: do { *p = *alpha++; p += 4;
+ case 7: *p = *alpha++; p += 4;
+ case 6: *p = *alpha++; p += 4;
+ case 5: *p = *alpha++; p += 4;
+ case 4: *p = *alpha++; p += 4;
+ case 3: *p = *alpha++; p += 4;
+ case 2: *p = *alpha++; p += 4;
+ case 1: *p = *alpha++; p += 4;
+ }
+ while( --n );
+ }
+ }
+ }
}
if (oc->oformat->flags & AVFMT_RAWPICTURE)
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
// If zero size, it means the image was buffered
- if (out_size != 0)
+ if (out_size > 0)
{
AVPacket pkt;
av_init_packet( &pkt );
- if ( c->coded_frame )
+ if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
- if(c->coded_frame->key_frame)
+ if( c->coded_frame && c->coded_frame->key_frame )
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= video_outbuf;
ret = av_interleaved_write_frame(oc, &pkt);
video_pts += c->frame_size;
}
+ else
+ {
+ fprintf( stderr, "Error with video encode\n" );
+ }
}
frame_count++;
mlt_frame_close( frame );