if ( output_rate == 0 )
output_rate = *frequency;
- // Restore the original get_audio
- frame->get_audio = mlt_frame_pop_audio( frame );
-
// Get the producer's audio
mlt_frame_get_audio( frame, buffer, format, frequency, &channels_avail, samples );
static mlt_frame filter_process( mlt_filter this, mlt_frame frame )
{
// Only call this if we have a means to get audio
- if ( frame->get_audio != NULL )
+ if ( mlt_frame_is_test_audio( frame ) != 0 )
{
- // Push the original method on to the stack
- mlt_frame_push_audio( frame, frame->get_audio );
-
// Push the filter on to the stack
mlt_frame_push_audio( frame, this );
// Assign our get_audio method
- frame->get_audio = resample_get_audio;
+ mlt_frame_push_audio( frame, resample_get_audio );
}
return frame;
else
{
// Get silence and don't touch the context
- frame->get_audio = NULL;
mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
}
// No codec, no show...
if ( codec != NULL )
{
- frame->get_audio = producer_get_audio;
+ mlt_frame_push_audio( frame, producer_get_audio );
mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
}
}
int from = mlt_properties_get_int( properties, "channelcopy.from" );
int to = mlt_properties_get_int( properties, "channelcopy.to" );
- // Restore the original get_audio
- frame->get_audio = mlt_properties_get_data( properties, "channelcopy.get_audio", NULL );
-
// Get the producer's audio
mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
mlt_properties_set_int( frame_props, "channelcopy.to", mlt_properties_get_int( properties, "to" ) );
mlt_properties_set_int( frame_props, "channelcopy.from", mlt_properties_get_int( properties, "from" ) );
- // Backup the original get_audio (it's still needed)
- mlt_properties_set_data( frame_props, "channelcopy.get_audio", frame->get_audio, 0, NULL, NULL );
-
// Override the get_audio method
- frame->get_audio = filter_get_audio;
+ mlt_frame_push_audio( frame, filter_get_audio );
return frame;
}
if ( rescale == NULL || !strcmp( rescale, "none" ) )
rescale = "hyper";
mlt_transition_process( composite, b_frame, frame );
- mlt_properties_set_double( b_props, "consumer_aspect_ratio", mlt_properties_get_int( a_props, "consumer_aspect_ratio" ) );
mlt_properties_set_int( a_props, "consumer_deinterlace", 1 );
mlt_properties_set_int( b_props, "consumer_deinterlace", 1 );
mlt_properties_set( a_props, "rescale.interp", rescale );
mlt_frame_push_get_image( *frame, producer_get_image );
// Specify the audio
- ( *frame )->get_audio = producer_get_audio;
+ mlt_frame_push_audio( *frame, producer_get_audio );
}
// Calculate the next timecode
mlt_properties_set_data( properties, "audio.pipe", audio, 0, NULL, NULL );
// Hmm - register audio callback
- ( *frame )->get_audio = producer_get_audio;
+ mlt_frame_push_audio( *frame, producer_get_audio );
// Update timecode on the frame we're creating
mlt_frame_set_position( *frame, mlt_producer_position( producer ) );
mlt_position in = mlt_transition_get_in( this );
mlt_position out = mlt_transition_get_out( this );
int length = out - in + 1;
+ double cycle = mlt_properties_get_double( properties, "cycle" );
// Get the new style geometry string
char *property = mlt_properties_get( properties, "geometry" );
// Allow a geometry repeat cycle
- if ( mlt_properties_get_int( properties, "cycle" ) )
- length = mlt_properties_get_int( properties, "cycle" );
+ if ( cycle >= 1 )
+ length = cycle;
+ else if ( cycle > 0 )
+ length *= cycle;
// Parse the geometry if we have one
mlt_geometry_parse( geometry, property, length, normalised_width, normalised_height );
alignment_calculate( geometry );
// Adjust to consumer scale
- int x = geometry->item.x * *width / geometry->nw;
- int y = geometry->item.y * *height / geometry->nh;
*width = geometry->sw * *width / geometry->nw;
*height = geometry->sh * *height / geometry->nh;
- //x = ( x | 1 ) ^ 1;
-
- // optimization points - no work to do
- if ( *width < 1 || *height < 1 )
- return 1;
-
- if ( ( x < 0 && -x >= *width ) || ( y < 0 && -y >= *height ) )
- return 1;
-
ret = mlt_frame_get_image( b_frame, image, &format, width, height, 1 );
return ret;
else
{
int length = mlt_transition_get_out( this ) - mlt_transition_get_in( this ) + 1;
- if ( mlt_properties_get_int( properties, "cycle" ) )
- length = mlt_properties_get_int( properties, "cycle" );
+ double cycle = mlt_properties_get_double( properties, "cycle" );
+ if ( cycle > 1 )
+ length = cycle;
+ else if ( cycle > 0 )
+ length *= cycle;
mlt_geometry_refresh( start, mlt_properties_get( properties, "geometry" ), length, normalised_width, normalised_height );
}
// Get the transition from the a frame
mlt_transition this = mlt_frame_pop_service( a_frame );
- // This compositer is yuv422 only
- *format = mlt_image_yuv422;
-
- // Get the image from the a frame
- mlt_frame_get_image( a_frame, image, format, width, height, 1 );
+ // Get in and out
+ int out = ( int )mlt_frame_pop_service( a_frame );
+ int in = ( int )mlt_frame_pop_service( a_frame );
// Get the properties from the transition
mlt_properties properties = MLT_TRANSITION_PROPERTIES( this );
+ // TODO: clean up always_active behaviour
+ if ( mlt_properties_get_int( properties, "always_active" ) )
+ {
+ mlt_events_block( properties, properties );
+ mlt_properties_set_int( properties, "in", in );
+ mlt_properties_set_int( properties, "out", out );
+ mlt_events_unblock( properties, properties );
+ }
+
+ // This compositer is yuv422 only
+ *format = mlt_image_yuv422;
+
if ( b_frame != NULL )
{
// Get the properties of the a frame
// Do the calculation
composite_calculate( this, &result, a_frame, position );
+ // Since we are the consumer of the b_frame, we must pass along these
+ // consumer properties from the a_frame
+ mlt_properties_set_double( b_props, "consumer_deinterlace", mlt_properties_get_double( a_props, "consumer_deinterlace" ) );
+ mlt_properties_set_double( b_props, "consumer_aspect_ratio", mlt_properties_get_double( a_props, "consumer_aspect_ratio" ) );
+ mlt_properties_set_int( b_props, "normalised_width", mlt_properties_get_double( a_props, "normalised_width" ) );
+ mlt_properties_set_int( b_props, "normalised_height", mlt_properties_get_double( a_props, "normalised_height" ) );
+
+ // TODO: Dangerous/temporary optimisation - if nothing to do, then do nothing
+ if ( mlt_properties_get_int( properties, "no_alpha" ) &&
+ result.item.x == 0 && result.item.y == 0 && result.item.w == *width && result.item.h == *height && result.item.mix == 100 )
+ {
+ mlt_frame_get_image( b_frame, image, format, width, height, 1 );
+ if ( !mlt_frame_is_test_card( a_frame ) )
+ mlt_frame_replace_image( a_frame, *image, *format, *width, *height );
+ return 0;
+ }
+
+ // Get the image from the a frame
+ mlt_frame_get_image( a_frame, image, format, width, height, 1 );
+
// Optimisation - no compositing required
if ( result.item.mix == 0 || ( result.item.w == 0 && result.item.h == 0 ) )
return 0;
mlt_properties_set_int( b_props, "dest_height", mlt_properties_get_int( a_props, "dest_height" ) );
}
- // Since we are the consumer of the b_frame, we must pass along these
- // consumer properties from the a_frame
- mlt_properties_set_double( b_props, "consumer_deinterlace", mlt_properties_get_double( a_props, "consumer_deinterlace" ) );
- mlt_properties_set_double( b_props, "consumer_aspect_ratio", mlt_properties_get_double( a_props, "consumer_aspect_ratio" ) );
- mlt_properties_set_int( b_props, "normalised_width", mlt_properties_get_double( a_props, "normalised_width" ) );
- mlt_properties_set_int( b_props, "normalised_height", mlt_properties_get_double( a_props, "normalised_height" ) );
-
// Special case for titling...
if ( mlt_properties_get_int( properties, "titles" ) )
{
}
}
}
+ else
+ {
+ mlt_frame_get_image( a_frame, image, format, width, height, 1 );
+ }
return 0;
}
// Get a unique name to store the frame position
char *name = mlt_properties_get( MLT_TRANSITION_PROPERTIES( this ), "_unique_id" );
- // Assign the current position to the name
- mlt_properties_set_position( MLT_FRAME_PROPERTIES( a_frame ), name, mlt_frame_get_position( a_frame ) );
+ // UGH - this is a TODO - find a more reliable means of obtaining in/out for the always_active case
+ if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES( this ), "always_active" ) == 0 )
+ {
+ mlt_frame_push_service( a_frame, ( void * )mlt_properties_get_int( MLT_TRANSITION_PROPERTIES( this ), "in" ) );
+ mlt_frame_push_service( a_frame, ( void * )mlt_properties_get_int( MLT_TRANSITION_PROPERTIES( this ), "out" ) );
+
+ // Assign the current position to the name
+ mlt_properties_set_position( MLT_FRAME_PROPERTIES( a_frame ), name, mlt_frame_get_position( a_frame ) );
+
+ // Propogate the transition properties to the b frame
+ mlt_properties_set_double( MLT_FRAME_PROPERTIES( b_frame ), "relative_position", position_calculate( this, mlt_frame_get_position( a_frame ) ) );
+ }
+ else
+ {
+ mlt_properties props = mlt_properties_get_data( MLT_FRAME_PROPERTIES( b_frame ), "_producer", NULL );
+ mlt_frame_push_service( a_frame, ( void * )mlt_properties_get_int( props, "in" ) );
+ mlt_frame_push_service( a_frame, ( void * )mlt_properties_get_int( props, "out" ) );
+ mlt_properties_set_int( MLT_FRAME_PROPERTIES( b_frame ), "relative_position", mlt_properties_get_int( props, "_frame" ) );
- // Propogate the transition properties to the b frame
- mlt_properties_set_double( MLT_FRAME_PROPERTIES( b_frame ), "relative_position", position_calculate( this, mlt_frame_get_position( a_frame ) ) );
+ // Assign the current position to the name
+ mlt_properties_set_position( MLT_FRAME_PROPERTIES( a_frame ), name, mlt_properties_get_position( MLT_FRAME_PROPERTIES( b_frame ), "relative_position" ) );
+ }
mlt_frame_push_service( a_frame, this );
mlt_frame_push_frame( a_frame, b_frame );
// Default factory
mlt_properties_set( properties, "factory", "fezzik" );
+ // Inform apps and framework that this is a video only transition
+ mlt_properties_set_int( properties, "_transition_type", 1 );
+
#ifdef USE_MMX
//mlt_properties_set_int( properties, "_MMX", composite_have_mmx() );
#endif
// Set the main property
mlt_properties_set( MLT_TRANSITION_PROPERTIES( transition ), "resource", lumafile );
+ // Inform apps and framework that this is a video only transition
+ mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( transition ), "_transition_type", 1 );
+
return transition;
}
return NULL;
static int transition_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
- // Get the properties of the a frame
- mlt_properties a_props = MLT_FRAME_PROPERTIES( frame );
-
// Get the b frame from the stack
mlt_frame b_frame = mlt_frame_pop_audio( frame );
// Get the properties of the b frame
mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );
- // Restore the original get_audio
- frame->get_audio = mlt_properties_get_data( a_props, "mix.get_audio", NULL );
-
double mix_start = 0.5, mix_end = 0.5;
if ( mlt_properties_get( b_props, "audio.previous_mix" ) != NULL )
mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" );
if ( mlt_properties_get( properties, "start" ) != NULL )
{
// Determine the time position of this frame in the transition duration
- mlt_position in = mlt_transition_get_in( this );
- mlt_position out = mlt_transition_get_out( this );
- mlt_position time = mlt_frame_get_position( b_frame );
+ mlt_properties props = mlt_properties_get_data( MLT_FRAME_PROPERTIES( b_frame ), "_producer", NULL );
+ int always_active = mlt_properties_get_int( MLT_TRANSITION_PROPERTIES( this ), "always_active" );
+ mlt_position in = !always_active ? mlt_transition_get_in( this ) : mlt_properties_get_int( props, "in" );
+ mlt_position out = !always_active ? mlt_transition_get_out( this ) : mlt_properties_get_int( props, "out" );
+ int length = mlt_properties_get_int( MLT_TRANSITION_PROPERTIES( this ), "length" );
+ mlt_position time = !always_active ? mlt_frame_get_position( b_frame ) : mlt_properties_get_int( props, "_frame" );
double mix = ( double )( time - in ) / ( double )( out - in + 1 );
-
- // If there is an end mix level adjust mix to the range
- if ( mlt_properties_get( properties, "end" ) != NULL )
- {
- double start = mlt_properties_get_double( properties, "start" );
- double end = mlt_properties_get_double( properties, "end" );
- mix = start + ( end - start ) * mix;
- }
- // A negative means total crossfade (uses position)
- else if ( mlt_properties_get_double( properties, "start" ) >= 0 )
+
+ // TODO: Check the logic here - shouldn't we be computing current and next mixing levels in all cases?
+ if ( length == 0 )
{
- // Otherwise, start/constructor is a constant mix level
- mix = mlt_properties_get_double( properties, "start" );
- }
+ // If there is an end mix level adjust mix to the range
+ if ( mlt_properties_get( properties, "end" ) != NULL )
+ {
+ double start = mlt_properties_get_double( properties, "start" );
+ double end = mlt_properties_get_double( properties, "end" );
+ mix = start + ( end - start ) * mix;
+ }
+ // A negative means total crossfade (uses position)
+ else if ( mlt_properties_get_double( properties, "start" ) >= 0 )
+ {
+ // Otherwise, start/constructor is a constant mix level
+ mix = mlt_properties_get_double( properties, "start" );
+ }
+
+ // Finally, set the mix property on the frame
+ mlt_properties_set_double( b_props, "audio.mix", mix );
- // Finally, set the mix property on the frame
- mlt_properties_set_double( b_props, "audio.mix", mix );
-
- // Initialise transition previous mix value to prevent an inadvertant jump from 0
- if ( mlt_properties_get( properties, "previous_mix" ) == NULL )
+ // Initialise transition previous mix value to prevent an inadvertant jump from 0
+ if ( mlt_properties_get( properties, "previous_mix" ) == NULL )
+ mlt_properties_set_double( properties, "previous_mix", mlt_properties_get_double( b_props, "audio.mix" ) );
+
+ // Tell b frame what the previous mix level was
+ mlt_properties_set_double( b_props, "audio.previous_mix", mlt_properties_get_double( properties, "previous_mix" ) );
+
+ // Save the current mix level for the next iteration
mlt_properties_set_double( properties, "previous_mix", mlt_properties_get_double( b_props, "audio.mix" ) );
-
- // Tell b frame what the previous mix level was
- mlt_properties_set_double( b_props, "audio.previous_mix", mlt_properties_get_double( properties, "previous_mix" ) );
-
- // Save the current mix level for the next iteration
- mlt_properties_set_double( properties, "previous_mix", mlt_properties_get_double( b_props, "audio.mix" ) );
- mlt_properties_set_double( b_props, "audio.reverse", mlt_properties_get_double( properties, "reverse" ) );
- }
-
- // Ensure that the tractor knows this isn't test audio...
- if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( a_frame ), "test_audio" ) )
- {
- mlt_properties_set_int( MLT_FRAME_PROPERTIES( a_frame ), "test_audio", 0 );
- mlt_properties_set_int( MLT_FRAME_PROPERTIES( a_frame ), "silent_audio", 1 );
+ mlt_properties_set_double( b_props, "audio.reverse", mlt_properties_get_double( properties, "reverse" ) );
+ }
+ else
+ {
+ double level = mlt_properties_get_double( properties, "start" );
+ double mix_start = level;
+ double mix_end = mix_start;
+ double mix_increment = 1.0 / length;
+ if ( time - in < length )
+ {
+ mix_start = mix_start * ( ( double )( time - in ) / length );
+ mix_end = mix_start + mix_increment;
+ }
+ else if ( time > out - length )
+ {
+ mix_end = mix_start * ( ( double )( out - time - in ) / length );
+ mix_start = mix_end - mix_increment;
+ }
+
+ mix_start = mix_start < 0 ? 0 : mix_start > level ? level : mix_start;
+ mix_end = mix_end < 0 ? 0 : mix_end > level ? level : mix_end;
+ mlt_properties_set_double( b_props, "audio.previous_mix", mix_start );
+ mlt_properties_set_double( b_props, "audio.mix", mix_end );
+ }
}
- // Backup the original get_audio (it's still needed)
- mlt_properties_set_data( MLT_FRAME_PROPERTIES( a_frame ), "mix.get_audio", a_frame->get_audio, 0, NULL, NULL );
-
// Override the get_audio method
- a_frame->get_audio = transition_get_audio;
-
mlt_frame_push_audio( a_frame, b_frame );
+ mlt_frame_push_audio( a_frame, transition_get_audio );
return a_frame;
}
this->process = transition_process;
if ( arg != NULL )
mlt_properties_set_double( MLT_TRANSITION_PROPERTIES( this ), "start", atof( arg ) );
- mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( this ), "_accepts_blanks", 1 );
+ // Inform apps and framework that this is an audio only transition
+ mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( this ), "_transition_type", 2 );
}
return this;
}
// Resource defines the shape of the region
mlt_properties_set( properties, "resource", arg == NULL ? "rectangle" : arg );
+
+ // Inform apps and framework that this is a video only transition
+ mlt_properties_set_int( properties, "_transition_type", 1 );
}
// Return the transition
else
{
// No audio available on the frame, so get test audio (silence)
- this->get_audio = NULL;
mlt_frame_get_audio( this, buffer, format, frequency, channels, samples );
}
dv_format_wide( dv_decoder ) ? ( this->is_pal ? 118.0/81.0 : 40.0/33.0 ) : ( this->is_pal ? 59.0/54.0 : 10.0/11.0 ) );
// Hmm - register audio callback
- ( *frame )->get_audio = producer_get_audio;
+ mlt_frame_push_audio( *frame, producer_get_audio );
// Push the quality string
mlt_frame_push_service( *frame, mlt_properties_get( MLT_PRODUCER_PROPERTIES( producer ), "quality" ) );
location=region
.description=Titles
-.properties.markup=filter[1].producer.markup
+.properties.markup=filter[1].producer.text
.properties.font=filter[1].producer.font
.properties.size=filter[1].producer.size
.period=2
.filter[0].resource=colour:0x6c010100
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=
+.filter[1].producer.text=
.filter[1].producer.font=Sans
.filter[1].producer.size=24
.filter[1].composite.geometry=0,0:95%x100%
courtesy=region
.description=Courtesy
-.properties.markup=filter[1].producer.markup
+.properties.markup=filter[1].producer.text
.properties.font=filter[1].producer.font
.properties.size=filter[1].producer.size
-.type.markup=text
.period=2
.properties.length[0]=composite.out
.composite.geometry=0,115:230x30:0;12=,:x:100
.filter[0].resource=colour:0x6c010100
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=ETV Exclusive
+.filter[1].producer.text=
.filter[1].producer.font=Sans
.filter[1].producer.size=24
.filter[1].composite.geometry=0,0:95%x100%
exclusive=region
.description=Exclusive
-.properties.markup=filter[1].producer.markup
+.properties.markup=filter[1].producer.text
.properties.font=filter[1].producer.font
.properties.size=filter[1].producer.size
-.type.markup=text
.period=2
.properties.length[0]=composite.out
.composite.geometry=-230,115:230x30;12=0
.filter[0].resource=colour:0x6c010100
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=ETV Exclusive
+.filter[1].producer.text=ETV Exclusive
.filter[1].producer.font=Sans
.filter[1].producer.size=24
.filter[1].producer.weight=700
.filter[0].resource=colour:0x6c010100
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=File Shot
+.filter[1].producer.text=File Shot
.filter[1].producer.font=Sans
.filter[1].producer.size=18
.filter[1].producer.weight=700
.filter[0].composite.geometry=100%,0%:100%x100%:0;12=0%,0%:x:100
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=Special
+.filter[1].producer.text=Special
.filter[1].producer.font=Sans
.filter[1].producer.size=24
.filter[1].producer.weight=700
ticker=region
.description=Tickertape
-.properties.markup=filter[1].producer.markup
+.properties.markup=filter[1].producer.text
.properties.font=filter[1].producer.font
.properties.size=filter[1].producer.size
-.type.markup=text
.properties.length[0]=filter[1].composite.out
.composite.geometry=0,500:722x75
.filter[0]=watermark
.filter[0].composite.titles=1
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.markup=Ticker - provided for reference
+.filter[1].producer.text=Ticker - provided for reference
.filter[1].producer.font=Sans
.filter[1].producer.size=24
.filter[1].producer.weight=700
super=region
.description=Transcription
-.properties.0=filter[1].producer.markup
-.properties.1=filter[2].producer.markup
+.properties.0=filter[1].producer.text
+.properties.1=filter[2].producer.text
.properties.align=filter[1].composite.valign
.properties.weight=filter[1].producer.weight
.properties.f0=filter[1].producer.font
.filter[0].composite.geometry=0,0:100%:100%:70
.filter[1]=watermark
.filter[1].resource=pango:
-.filter[1].producer.encoding=iso8859-1
-.filter[1].producer.markup=
+.filter[1].producer.text=
.filter[1].producer.font=Sans
.filter[1].producer.size=32
.filter[1].producer.weight=700
.filter[1].composite.valign=top
.filter[2]=watermark
.filter[2].resource=pango:
-.filter[2].producer.encoding=iso8859-1
-.filter[2].producer.markup=
+.filter[2].producer.text=
.filter[2].producer.font=Sans
.filter[2].producer.size=32
.filter[2].producer.fgcolour=0x6c0101ff
// Get the filter properties
mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
- // Restore the original get_audio
- frame->get_audio = mlt_frame_pop_audio( frame );
-
int jack_frequency = mlt_properties_get_int( filter_properties, "_sample_rate" );
// Get the producer's audio
static mlt_frame filter_process( mlt_filter this, mlt_frame frame )
{
- if ( frame->get_audio != NULL )
+ if ( mlt_frame_is_test_audio( frame ) != 0 )
{
mlt_properties properties = MLT_FILTER_PROPERTIES( this );
- mlt_frame_push_audio( frame, frame->get_audio );
mlt_frame_push_audio( frame, this );
- frame->get_audio = jackrack_get_audio;
+ mlt_frame_push_audio( frame, jackrack_get_audio );
if ( mlt_properties_get_int( properties, "_sync" ) )
initialise_jack_ports( properties );
if ( mlt_properties_get( properties, "volume.limiter" ) != NULL )
limiter_level = mlt_properties_get_double( properties, "volume.limiter" );
- // Restore the original get_audio
- frame->get_audio = mlt_properties_get_data( properties, "volume.get_audio", NULL );
-
// Get the producer's audio
mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
// fprintf( stderr, "filter_volume: frequency %d\n", *frequency );
// Put a filter reference onto the frame
mlt_properties_set_data( properties, "filter_volume", this, 0, NULL, NULL );
- // Backup the original get_audio (it's still needed)
- mlt_properties_set_data( properties, "volume.get_audio", frame->get_audio, 0, NULL, NULL );
-
// Override the get_audio method
- frame->get_audio = filter_get_audio;
+ mlt_frame_push_audio( frame, filter_get_audio );
return frame;
}
mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( transition ), "sy", 1 );
mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( transition ), "distort", 0 );
mlt_properties_set( MLT_TRANSITION_PROPERTIES( transition ), "start", "0,0:100%x100%" );
+ // Inform apps and framework that this is a video only transition
+ mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( transition ), "_transition_type", 1 );
transition->process = transition_process;
}
return transition;
if ( output_rate == 0 )
output_rate = *frequency;
- // Restore the original get_audio
- frame->get_audio = mlt_frame_pop_audio( frame );
-
// Get the producer's audio
mlt_frame_get_audio( frame, buffer, format, frequency, &channels_avail, samples );
static mlt_frame filter_process( mlt_filter this, mlt_frame frame )
{
- if ( frame->get_audio != NULL )
+ if ( mlt_frame_is_test_audio( frame ) != 0 )
{
- mlt_frame_push_audio( frame, frame->get_audio );
mlt_frame_push_audio( frame, this );
- frame->get_audio = resample_get_audio;
+ mlt_frame_push_audio( frame, resample_get_audio );
}
return frame;
int i; // channel
int count = mlt_properties_get_int( filter_properties, "effect_count" );
- // Restore the original get_audio
- frame->get_audio = mlt_frame_pop_audio( frame );
-
// Get the producer's audio
mlt_frame_get_audio( frame, buffer, format, frequency, &channels_avail, samples );
static mlt_frame filter_process( mlt_filter this, mlt_frame frame )
{
- if ( frame->get_audio != NULL )
+ if ( mlt_frame_is_test_audio( frame ) != 0 )
{
// Add the filter to the frame
- mlt_frame_push_audio( frame, frame->get_audio );
mlt_frame_push_audio( frame, this );
- frame->get_audio = filter_get_audio;
+ mlt_frame_push_audio( frame, filter_get_audio );
// Parse the window property and allocate smoothing buffer if needed
mlt_properties properties = MLT_FILTER_PROPERTIES( this );
mlt_position position = mlt_properties_get_position( frame_properties, "vorbis_position" );
// Get the producer
- mlt_producer this = mlt_properties_get_data( frame_properties, "vorbis_producer", NULL );
+ mlt_producer this = mlt_frame_pop_audio( frame );
// Get the producer properties
mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
}
else
{
- frame->get_audio = NULL;
mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
audio_used = 0;
}
else
{
// Get silence and don't touch the context
- frame->get_audio = NULL;
*samples = mlt_sample_calculator( fps, *frequency, position );
mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
}
return 0;
}
-/** Set up audio handling.
-*/
-
-static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
-{
- // Get the properties
- mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
-
- // Set the audio method
- frame->get_audio = producer_get_audio;
-
- // Set the producer on the frame
- mlt_properties_set_data( properties, "vorbis_producer", this, 0, NULL, NULL );
-}
-
/** Our get frame implementation.
*/
mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "vorbis_position", mlt_producer_position( this ) );
// Set up the audio
- producer_set_up_audio( this, *frame );
+ mlt_frame_push_audio( *frame, this );
+ mlt_frame_push_audio( *frame, producer_get_audio );
// Calculate the next timecode
mlt_producer_prepare_next( this );