for( i = 1; i < 3; i++ )
{
CHECKED_MALLOC( frame->buffer[i], luma_plane_size/4 );
- frame->plane[i] = frame->buffer[i] + (frame->i_stride[i] * i_padv + PADH)/2;
+ frame->plane[i] = (uint8_t*)frame->buffer[i] + (frame->i_stride[i] * i_padv + PADH)/2;
}
/* all 4 luma planes allocated together, since the cacheline split code
* requires them to be in-phase wrt cacheline alignment. */
CHECKED_MALLOC( frame->buffer[0], 4*luma_plane_size);
for( i = 0; i < 4; i++ )
- frame->filtered[i] = frame->buffer[0] + i*luma_plane_size + frame->i_stride[0] * i_padv + PADH;
+ frame->filtered[i] = (uint8_t*)frame->buffer[0] + i*luma_plane_size + frame->i_stride[0] * i_padv + PADH;
frame->plane[0] = frame->filtered[0];
if( h->frames.b_have_lowres )
void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
+void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta );
+void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta );
#ifdef ARCH_X86
void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
+void x264_deblock_h_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
+void x264_deblock_v8_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
void x264_deblock_v_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
{
x264_deblock_v8_luma_mmxext( pix, stride, alpha, beta, tc0 );
x264_deblock_v8_luma_mmxext( pix+8, stride, alpha, beta, tc0+2 );
}
+void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta )
+{
+ x264_deblock_v8_luma_intra_mmxext( pix, stride, alpha, beta );
+ x264_deblock_v8_luma_intra_mmxext( pix+8, stride, alpha, beta );
+}
#endif
#endif
#ifdef ARCH_X86
pf->deblock_v_luma = x264_deblock_v_luma_mmxext;
pf->deblock_h_luma = x264_deblock_h_luma_mmxext;
+ pf->deblock_v_luma_intra = x264_deblock_v_luma_intra_mmxext;
+ pf->deblock_h_luma_intra = x264_deblock_h_luma_intra_mmxext;
#endif
if( cpu&X264_CPU_SSE2 )
{
pf->deblock_v_luma = x264_deblock_v_luma_sse2;
pf->deblock_h_luma = x264_deblock_h_luma_sse2;
+ pf->deblock_v_luma_intra = x264_deblock_v_luma_intra_sse2;
+ pf->deblock_h_luma_intra = x264_deblock_h_luma_intra_sse2;
}
}
#endif
x264_pthread_mutex_unlock( &frame->mutex );
}
+void x264_frame_size_estimated_set( x264_t *h, int bits )
+{
+ x264_pthread_mutex_lock( &h->fenc->mutex );
+ x264_ratecontrol_set_estimated_size(h, bits);
+ x264_pthread_mutex_unlock( &h->fenc->mutex );
+}
+
+int x264_frame_size_estimated_get( x264_t const *h)
+{
+ int size;
+ x264_pthread_mutex_lock( &h->fenc->mutex );
+ size = x264_ratecontrol_get_estimated_size(h);
+ x264_pthread_mutex_unlock( &h->fenc->mutex );
+ return size;
+}
+
#else
void x264_frame_cond_broadcast( x264_frame_t *frame, int i_lines_completed )
{}
void x264_frame_cond_wait( x264_frame_t *frame, int i_lines_completed )
{}
+
+void x264_frame_size_estimated_set( x264_t *h, int bits )
+{
+ x264_ratecontrol_set_estimated_size(h, bits);
+}
+
+int x264_frame_size_estimated_get( x264_t const *h)
+{
+ int size;
+ size = x264_ratecontrol_set_estimated_size(h);
+ return size;
+}
#endif