X-Git-Url: http://research.m1stereo.tv/gitweb?a=blobdiff_plain;f=src%2Fframework%2Fmlt_frame.h;h=bd2ea27e7b9eb4895b53e10a1c6b741904b33a2a;hb=d61a4ab32e7b834996d394b2e8032680e05109ff;hp=77ca012c1012ec93d680360baf2f144e07d57044;hpb=1b9e19bba9f837a56cc70072c21342028a3b96bb;p=melted diff --git a/src/framework/mlt_frame.h b/src/framework/mlt_frame.h index 77ca012..bd2ea27 100644 --- a/src/framework/mlt_frame.h +++ b/src/framework/mlt_frame.h @@ -43,6 +43,8 @@ struct mlt_frame_s #define MLT_FRAME_PROPERTIES( frame ) ( &( frame )->parent ) #define MLT_FRAME_SERVICE_STACK( frame ) ( ( frame )->stack_service ) +#define MLT_FRAME_IMAGE_STACK( frame ) ( ( frame )->stack_image ) +#define MLT_FRAME_AUDIO_STACK( frame ) ( ( frame )->stack_audio ) extern mlt_frame mlt_frame_init( ); extern mlt_properties mlt_frame_properties( mlt_frame self ); @@ -74,24 +76,23 @@ extern void mlt_frame_close( mlt_frame self ); /* convenience functions */ extern int mlt_convert_rgb24a_to_yuv422( uint8_t *rgba, int width, int height, int stride, uint8_t *yuv, uint8_t *alpha ); extern int mlt_convert_rgb24_to_yuv422( uint8_t *rgb, int width, int height, int stride, uint8_t *yuv ); +extern int mlt_convert_bgr24a_to_yuv422( uint8_t *rgba, int width, int height, int stride, uint8_t *yuv, uint8_t *alpha ); +extern int mlt_convert_argb_to_yuv422( uint8_t *rgba, int width, int height, int stride, uint8_t *yuv, uint8_t *alpha ); +extern int mlt_convert_bgr24_to_yuv422( uint8_t *rgb, int width, int height, int stride, uint8_t *yuv ); extern int mlt_convert_yuv420p_to_yuv422( uint8_t *yuv420p, int width, int height, int stride, uint8_t *yuv ); extern uint8_t *mlt_frame_resize_yuv422( mlt_frame self, int owidth, int oheight ); extern uint8_t *mlt_frame_rescale_yuv422( mlt_frame self, int owidth, int oheight ); extern void mlt_resize_yuv422( uint8_t *output, int owidth, int oheight, uint8_t *input, int iwidth, int iheight ); extern int mlt_frame_mix_audio( mlt_frame self, mlt_frame that, float weight_start, float weight_end, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ); +extern int mlt_frame_combine_audio( mlt_frame self, mlt_frame that, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ); extern int mlt_sample_calculator( float fps, int frequency, int64_t position ); +extern int64_t mlt_sample_calculator_to_now( float fps, int frequency, int64_t position ); /* this macro scales rgb into the yuv gamut, y is scaled by 219/255 and uv by 224/255 */ #define RGB2YUV(r, g, b, y, u, v)\ - y = ((257*r + 504*g + 98*b) >> 10) + 16;\ - u = ((-148*r - 291*g + 439*b) >> 10) + 128;\ - v = ((439*r - 368*g - 71*b) >> 10) + 128;\ - y = y < 16 ? 16 : y;\ - u = u < 16 ? 16 : u;\ - v = v < 16 ? 16 : v;\ - y = y > 235 ? 235 : y;\ - u = u > 240 ? 240 : u;\ - v = v > 240 ? 240 : v + y = ((263*r + 516*g + 100*b) >> 10) + 16;\ + u = ((-152*r - 298*g + 450*b) >> 10) + 128;\ + v = ((450*r - 377*g - 73*b) >> 10) + 128; /* this macro assumes the user has already scaled their rgb down into the broadcast limits */ #define RGB2YUV_UNSCALED(r, g, b, y, u, v)\ @@ -105,4 +106,12 @@ extern int mlt_sample_calculator( float fps, int frequency, int64_t position ); u = u > 240 ? 240 : u;\ v = v > 240 ? 240 : v +#define YUV2RGB( y, u, v, r, g, b ) \ + r = ((1192 * ( y - 16 ) + 1634 * ( v - 128 ) ) >> 10 ); \ + g = ((1192 * ( y - 16 ) - 832 * ( v - 128 ) - 400 * ( u - 128 ) ) >> 10 ); \ + b = ((1192 * ( y - 16 ) + 2066 * ( u - 128 ) ) >> 10 ); \ + r = r < 0 ? 0 : r > 255 ? 255 : r; \ + g = g < 0 ? 0 : g > 255 ? 255 : g; \ + b = b < 0 ? 0 : b > 255 ? 255 : b; + #endif