#include <assert.h>
#include <limits.h>
+#if HAVE_INTERLACED
+# define MB_INTERLACED h->mb.b_interlaced
+# define SLICE_MBAFF h->sh.b_mbaff
+# define PARAM_INTERLACED h->param.b_interlaced
+#else
+# define MB_INTERLACED 0
+# define SLICE_MBAFF 0
+# define PARAM_INTERLACED 0
+#endif
+
/* Unions for type-punning.
* Mn: load or store n bits, aligned, native-endian
* CPn: copy n bits, aligned, native-endian
{
bs[dir][edge][i] = 2;
}
- else if( (edge == 0 && h->mb.b_interlaced != neighbour_field[dir]) ||
+ else if( (edge == 0 && MB_INTERLACED != neighbour_field[dir]) ||
ref[0][q] != ref[0][p] ||
abs( mv[0][q][0] - mv[0][p][0] ) >= 4 ||
abs( mv[0][q][1] - mv[0][p][1] ) >= mvy_limit ||
if( h->mb.i_neighbour & MB_LEFT )
{
- if( h->mb.field[h->mb.i_mb_left_xy[0]] != h->mb.b_interlaced )
+ if( h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
{
static const uint8_t offset[2][2][8] = {
{ { 0, 0, 0, 0, 1, 1, 1, 1 },
memset( bS, 4, 8 );
else
{
- const uint8_t *off = offset[h->mb.b_interlaced][h->mb.i_mb_y&1];
+ const uint8_t *off = offset[MB_INTERLACED][h->mb.i_mb_y&1];
uint8_t (*nnz)[24] = h->mb.non_zero_count;
for( int i = 0; i < 8; i++ )
{
- int left = h->mb.i_mb_left_xy[h->mb.b_interlaced ? i>>2 : i&1];
+ int left = h->mb.i_mb_left_xy[MB_INTERLACED ? i>>2 : i&1];
int nnz_this = h->mb.cache.non_zero_count[x264_scan8[0]+8*(i>>1)];
int nnz_left = nnz[left][3 + 4*off[i]];
if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
}
}
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
for( int i = 0; i < 4; i++ ) bs[0][0][i] = bS[i];
for( int i = 0; i < 4; i++ ) bs[0][4][i] = bS[4+i];
if( h->mb.i_neighbour & MB_TOP )
{
- if( !(h->mb.i_mb_y&1) && !h->mb.b_interlaced && h->mb.field[h->mb.i_mb_top_xy] )
+ if( !(h->mb.i_mb_y&1) && !MB_INTERLACED && h->mb.field[h->mb.i_mb_top_xy] )
{
/* Need to filter both fields (even for frame macroblocks).
* Filter top two rows using the top macroblock of the above
void x264_frame_deblock_row( x264_t *h, int mb_y )
{
- int b_interlaced = h->sh.b_mbaff;
- int qp_thresh = 15 - X264_MIN( h->sh.i_alpha_c0_offset, h->sh.i_beta_offset ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
+ int b_interlaced = SLICE_MBAFF;
+ int qp_thresh = 15 - X264_MIN( h->sh.i_alpha_c0_offset, h->sh.i_beta_offset ) - X264_MAX( 0, h->param.analyse.i_chroma_qp_offset );
int stridey = h->fdec->i_stride[0];
int strideuv = h->fdec->i_stride[1];
pixel *pixy = h->fdec->plane[0] + 16*mb_y*stridey + 16*mb_x;
pixel *pixuv = h->fdec->plane[1] + 8*mb_y*strideuv + 16*mb_x;
- if( mb_y & h->mb.b_interlaced )
+ if( mb_y & MB_INTERLACED )
{
pixy -= 15*stridey;
pixuv -= 7*strideuv;
}
- int stride2y = stridey << h->mb.b_interlaced;
- int stride2uv = strideuv << h->mb.b_interlaced;
+ int stride2y = stridey << MB_INTERLACED;
+ int stride2uv = strideuv << MB_INTERLACED;
int qp = h->mb.qp[mb_xy];
int qpc = h->chroma_qp_table[qp];
int first_edge_only = h->mb.type[mb_xy] == P_SKIP || qp <= qp_thresh;
if( h->mb.i_neighbour & MB_LEFT )
{
- if( b_interlaced && h->mb.field[h->mb.i_mb_left_xy[0]] != h->mb.b_interlaced )
+ if( b_interlaced && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
{
int luma_qp[2];
int chroma_qp[2];
deblock_edge( h, pixuv + 1, 2*strideuv, bs[0][0], chroma_qp[0], 1, deblock_v_chroma_mbaff_c );
}
- int offy = h->mb.b_interlaced ? 4 : 0;
- int offuv = h->mb.b_interlaced ? 3 : 0;
+ int offy = MB_INTERLACED ? 4 : 0;
+ int offuv = MB_INTERLACED ? 3 : 0;
left_qp[1] = h->mb.qp[h->mb.i_mb_left_xy[1]];
luma_qp[1] = (current_qp + left_qp[1] + 1) >> 1;
chroma_qp[1] = (h->chroma_qp_table[current_qp] + h->chroma_qp_table[left_qp[1]] + 1) >> 1;
if( h->mb.i_neighbour & MB_TOP )
{
- if( b_interlaced && !(mb_y&1) && !h->mb.b_interlaced && h->mb.field[h->mb.i_mb_top_xy] )
+ if( b_interlaced && !(mb_y&1) && !MB_INTERLACED && h->mb.field[h->mb.i_mb_top_xy] )
{
int mbn_xy = mb_xy - 2 * h->mb.i_mb_stride;
int qpc_top = (h->chroma_qp_table[qp] + h->chroma_qp_table[qpt] + 1) >> 1;
int intra_top = IS_INTRA( h->mb.type[h->mb.i_mb_top_xy] );
- if( (!b_interlaced || (!h->mb.b_interlaced && !h->mb.field[h->mb.i_mb_top_xy]))
+ if( (!b_interlaced || (!MB_INTERLACED && !h->mb.field[h->mb.i_mb_top_xy]))
&& (intra_cur || intra_top) )
{
FILTER( _intra, 1, 0, qp_top, qpc_top );
memset( bs, 3, 2*8*4*sizeof(uint8_t) );
else
h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
- bs, 4 >> h->sh.b_mbaff, h->sh.i_type == SLICE_TYPE_B, h );
+ bs, 4 >> SLICE_MBAFF, h->sh.i_type == SLICE_TYPE_B, h );
int transform_8x8 = h->mb.b_transform_8x8;
pixel *fdec = h->mb.pic.p_fdec[0];
int i_mb_count = h->mb.i_mb_count;
int i_stride, i_width, i_lines;
- int i_padv = PADV << h->param.b_interlaced;
+ int i_padv = PADV << PARAM_INTERLACED;
int luma_plane_size, chroma_plane_size;
int align = h->param.cpu&X264_CPU_CACHELINE_64 ? 64 : h->param.cpu&X264_CPU_CACHELINE_32 ? 32 : 16;
int disalign = h->param.cpu&X264_CPU_ALTIVEC ? 1<<9 : 1<<10;
CHECKED_MALLOC( frame->buffer[1], chroma_plane_size * sizeof(pixel) );
frame->plane[1] = frame->buffer[1] + frame->i_stride[1] * i_padv/2 + PADH;
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
CHECKED_MALLOC( frame->buffer_fld[1], chroma_plane_size * sizeof(pixel) );
frame->plane_fld[1] = frame->buffer_fld[1] + frame->i_stride[1] * i_padv/2 + PADH;
{
/* FIXME: Don't allocate both buffers in non-adaptive MBAFF. */
CHECKED_MALLOC( frame->buffer[0], 4*luma_plane_size * sizeof(pixel) );
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
CHECKED_MALLOC( frame->buffer_fld[0], 4*luma_plane_size * sizeof(pixel) );
for( int i = 0; i < 4; i++ )
{
else
{
CHECKED_MALLOC( frame->buffer[0], luma_plane_size * sizeof(pixel) );
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
CHECKED_MALLOC( frame->buffer_fld[0], luma_plane_size * sizeof(pixel) );
frame->filtered[0] = frame->plane[0] = frame->buffer[0] + frame->i_stride[0] * i_padv + PADH;
frame->filtered_fld[0] = frame->plane_fld[0] = frame->buffer_fld[0] + frame->i_stride[0] * i_padv + PADH;
frame->i_stride[0] * (frame->i_lines[0] + 2*i_padv) * sizeof(uint16_t) << h->frames.b_have_sub8x8_esa );
frame->integral = (uint16_t*)frame->buffer[3] + frame->i_stride[0] * i_padv + PADH;
}
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
CHECKED_MALLOC( frame->field, i_mb_count * sizeof(uint8_t) );
}
else /* fenc frame */
void x264_frame_expand_border( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
{
int b_start = !mb_y;
- if( mb_y & h->sh.b_mbaff )
+ if( mb_y & SLICE_MBAFF )
return;
for( int i = 0; i < frame->i_plane; i++ )
{
int stride = frame->i_stride[i];
int width = 16*h->sps->i_mb_width;
- int height = (b_end ? 16*(h->mb.i_mb_height - mb_y) >> h->sh.b_mbaff : 16) >> !!i;
+ int height = (b_end ? 16*(h->mb.i_mb_height - mb_y) >> SLICE_MBAFF : 16) >> !!i;
int padh = PADH;
int padv = PADV >> !!i;
// buffer: 2 chroma, 3 luma (rounded to 4) because deblocking goes beyond the top of the mb
if( b_end && !b_start )
- height += 4 >> (!!i + h->sh.b_mbaff);
+ height += 4 >> (!!i + SLICE_MBAFF);
pixel *pix;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
// border samples for each field are extended separately
pix = frame->plane_fld[i] + X264_MAX(0, (16*mb_y-4)*stride >> !!i);
int b_start = !mb_y;
int stride = frame->i_stride[0];
int width = 16*h->mb.i_mb_width + 8;
- int height = b_end ? (16*(h->mb.i_mb_height - mb_y) >> h->sh.b_mbaff) + 16 : 16;
+ int height = b_end ? (16*(h->mb.i_mb_height - mb_y) >> SLICE_MBAFF) + 16 : 16;
int padh = PADH - 4;
int padv = PADV - 8;
for( int i = 1; i < 4; i++ )
{
// buffer: 8 luma, to match the hpel filter
pixel *pix;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
pix = frame->filtered_fld[i] + (16*mb_y - 16) * stride - 4;
plane_expand_border( pix, stride*2, width, height, padh, padv, b_start, b_end, 0 );
}
pix = frame->filtered[i] + (16*mb_y - 8) * stride - 4;
- plane_expand_border( pix, stride, width, height << h->sh.b_mbaff, padh, padv, b_start, b_end, 0 );
+ plane_expand_border( pix, stride, width, height << SLICE_MBAFF, padh, padv, b_start, b_end, 0 );
}
}
{
for( int y = i_height; y < i_height + i_pady; y++ )
memcpy( &frame->plane[i][y*frame->i_stride[i]],
- &frame->plane[i][(i_height-(~y&h->param.b_interlaced)-1)*frame->i_stride[i]],
+ &frame->plane[i][(i_height-(~y&PARAM_INTERLACED)-1)*frame->i_stride[i]],
(i_width + i_padx) * sizeof(pixel) );
}
}
mvx, mvy, 4*width, 4*height, &h->sh.weight[i_ref][0] );
// chroma is offset if MCing from a field of opposite parity
- if( h->mb.b_interlaced & i_ref )
+ if( MB_INTERLACED & i_ref )
mvy += (h->mb.i_mb_y & 1)*4 - 2;
h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x],
h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0],
mvx, mvy, 4*width, 4*height, weight_none );
- if( h->mb.b_interlaced & i_ref )
+ if( MB_INTERLACED & i_ref )
mvy += (h->mb.i_mb_y & 1)*4 - 2;
h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x],
h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
src0, i_stride0, src1, i_stride1, weight );
- if( h->mb.b_interlaced & i_ref0 )
+ if( MB_INTERLACED & i_ref0 )
mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
- if( h->mb.b_interlaced & i_ref1 )
+ if( MB_INTERLACED & i_ref1 )
mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
h->mc.mc_chroma( tmp0, tmp0+8, 16, h->mb.pic.p_fref[0][i_ref0][4], h->mb.pic.i_stride[1],
h->mb.i_b8_stride = h->mb.i_mb_width * 2;
h->mb.i_b4_stride = h->mb.i_mb_width * 4;
- h->mb.b_interlaced = h->param.b_interlaced;
+ h->mb.b_interlaced = PARAM_INTERLACED;
CHECKED_MALLOC( h->mb.qp, i_mb_count * sizeof(int8_t) );
CHECKED_MALLOC( h->mb.cbp, i_mb_count * sizeof(int16_t) );
for( int i = 0; i < 2; i++ )
{
- int i_refs = X264_MIN(X264_REF_MAX, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << h->param.b_interlaced;
+ int i_refs = X264_MIN(X264_REF_MAX, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
if( h->param.analyse.i_weighted_pred )
{
- int i_padv = PADV << h->param.b_interlaced;
+ int i_padv = PADV << PARAM_INTERLACED;
int luma_plane_size = 0;
int numweightbuf;
{
if( !b_lookahead )
{
- for( int i = 0; i <= 4*h->param.b_interlaced; i++ )
+ for( int i = 0; i <= 4*PARAM_INTERLACED; i++ )
for( int j = 0; j < 2; j++ )
{
/* shouldn't really be initialized, just silences a valgrind false-positive in predict_8x8_filter_mmx */
CHECKED_MALLOCZERO( h->intra_border_backup[i][j], (h->sps->i_mb_width*16+32) * sizeof(pixel) );
h->intra_border_backup[i][j] += 16;
- if( !h->param.b_interlaced )
+ if( !PARAM_INTERLACED )
h->intra_border_backup[1][j] = h->intra_border_backup[i][j];
}
- for( int i = 0; i <= h->param.b_interlaced; i++ )
+ for( int i = 0; i <= PARAM_INTERLACED; i++ )
{
CHECKED_MALLOC( h->deblock_strength[i], sizeof(**h->deblock_strength) * h->mb.i_mb_width );
h->deblock_strength[1] = h->deblock_strength[i];
{
if( !b_lookahead )
{
- for( int i = 0; i <= h->param.b_interlaced; i++ )
+ for( int i = 0; i <= PARAM_INTERLACED; i++ )
x264_free( h->deblock_strength[i] );
- for( int i = 0; i <= 4*h->param.b_interlaced; i++ )
+ for( int i = 0; i <= 4*PARAM_INTERLACED; i++ )
for( int j = 0; j < 2; j++ )
x264_free( h->intra_border_backup[i][j] - 16 );
}
{
deblock_ref_table(-2) = -2;
deblock_ref_table(-1) = -1;
- for( int i = 0; i < h->i_ref[0] << h->sh.b_mbaff; i++ )
+ for( int i = 0; i < h->i_ref[0] << SLICE_MBAFF; i++ )
{
/* Mask off high bits to avoid frame num collisions with -1/-2.
* In current x264 frame num values don't cover a range of more
* than 32, so 6 bits is enough for uniqueness. */
- if( !h->mb.b_interlaced )
+ if( !MB_INTERLACED )
deblock_ref_table(i) = h->fref[0][i]->i_frame_num&63;
else
deblock_ref_table(i) = ((h->fref[0][i>>1]->i_frame_num&63)<<1) + (i&1);
memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
if( h->i_ref[0] > 0 )
- for( int field = 0; field <= h->sh.b_mbaff; field++ )
+ for( int field = 0; field <= SLICE_MBAFF; field++ )
{
int curpoc = h->fdec->i_poc + h->fdec->i_delta_poc[field];
int refpoc = h->fref[0][0]->i_poc + h->fref[0][0]->i_delta_poc[field];
{
int w = (i ? 8 : 16);
int i_stride = h->fdec->i_stride[i];
- int i_stride2 = i_stride << h->mb.b_interlaced;
- int i_pix_offset = h->mb.b_interlaced
+ int i_stride2 = i_stride << MB_INTERLACED;
+ int i_pix_offset = MB_INTERLACED
? 16 * mb_x + w * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
: 16 * mb_x + w * mb_y * i_stride;
pixel *plane_fdec = &h->fdec->plane[i][i_pix_offset];
- int fdec_idx = b_mbaff ? (h->mb.b_interlaced ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : 0;
+ int fdec_idx = b_mbaff ? (MB_INTERLACED ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : 0;
pixel *intra_fdec = &h->intra_border_backup[fdec_idx][i][mb_x*16];
int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
/* ref_pix_offset[0] references the current field and [1] the opposite field. */
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
ref_pix_offset[1] += (1-2*(mb_y&1)) * i_stride;
h->mb.pic.i_stride[i] = i_stride2;
h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
{
// Interpolate between pixels in same field.
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
plane_src = h->fref[0][j>>1]->plane_fld[i];
filtered_src = h->fref[0][j>>1]->filtered_fld;
for( int k = 1; k < 4; k++ )
h->mb.pic.p_fref[0][j][k] = filtered_src[k] + ref_pix_offset[j&1];
if( h->sh.weight[j][0].weightfn )
- h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> h->mb.b_interlaced][ref_pix_offset[j&1]];
+ h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> MB_INTERLACED][ref_pix_offset[j&1]];
else
h->mb.pic.p_fref_w[j] = h->mb.pic.p_fref[0][j][0];
}
if( h->sh.i_type == SLICE_TYPE_B )
for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
{
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
plane_src = h->fref[1][j>>1]->plane_fld[i];
filtered_src = h->fref[1][j>>1]->filtered_fld;
static void inline x264_macroblock_cache_load_neighbours( x264_t *h, int mb_x, int mb_y )
{
- int top_y = mb_y - (1 << h->mb.b_interlaced);
+ int top_y = mb_y - (1 << MB_INTERLACED);
int top = top_y * h->mb.i_mb_stride + mb_x;
h->mb.i_mb_x = mb_x;
h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2;
h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
if( mb_y&1 )
{
- if( mb_x && h->mb.b_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
+ if( mb_x && MB_INTERLACED != h->mb.field[h->mb.i_mb_xy-1] )
{
left[0] = left[1] = h->mb.i_mb_xy - 1 - h->mb.i_mb_stride;
h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2 - 2*h->mb.i_b8_stride;
h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4 - 4*h->mb.i_b4_stride;
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
h->mb.left_index_table = &left_indices[2];
left[1] += h->mb.i_mb_stride;
h->mb.topleft_partition = 1;
}
}
- if( !h->mb.b_interlaced )
+ if( !MB_INTERLACED )
topright_y = -1;
}
else
{
- if( h->mb.b_interlaced && top >= 0 )
+ if( MB_INTERLACED && top >= 0 )
{
if( !h->mb.field[top] )
{
if( mb_x < h->mb.i_mb_width-1 )
topright_y += !h->mb.field[h->mb.i_mb_stride*topright_y + mb_x + 1];
}
- if( mb_x && h->mb.b_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
+ if( mb_x && MB_INTERLACED != h->mb.field[h->mb.i_mb_xy-1] )
{
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
h->mb.left_index_table = &left_indices[2];
left[1] += h->mb.i_mb_stride;
}
/* We can't predict from the previous threadslice since it hasn't been encoded yet. */
- if( (h->i_threadslice_start >> h->mb.b_interlaced) != (mb_y >> h->mb.b_interlaced) )
+ if( (h->i_threadslice_start >> MB_INTERLACED) != (mb_y >> MB_INTERLACED) )
{
if( top >= 0 )
{
}
}
+#define LTOP 0
+#if HAVE_INTERLACED
+# define LBOT 1
+#else
+# define LBOT 0
+#endif
+
void x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y )
{
x264_macroblock_cache_load_neighbours( h, mb_x, mb_y );
if( h->mb.i_neighbour & MB_LEFT )
{
- const int16_t top_luma = (cbp[left[0]] >> (left_index_table->mv[0]&(~1))) & 2;
- const int16_t bot_luma = (cbp[left[1]] >> (left_index_table->mv[2]&(~1))) & 2;
- h->mb.cache.i_cbp_left = (cbp[left[0]] & 0xfff0) | (bot_luma<<2) | top_luma;
+ if( SLICE_MBAFF )
+ {
+ const int16_t top_luma = (cbp[left[LTOP]] >> (left_index_table->mv[0]&(~1))) & 2;
+ const int16_t bot_luma = (cbp[left[LBOT]] >> (left_index_table->mv[2]&(~1))) & 2;
+ h->mb.cache.i_cbp_left = (cbp[left[LTOP]] & 0xfff0) | (bot_luma<<2) | top_luma;
+ }
+ else
+ h->mb.cache.i_cbp_left = cbp[left[0]];
/* load intra4x4 */
- h->mb.cache.intra4x4_pred_mode[x264_scan8[0 ] - 1] = i4x4[left[0]][left_index_table->intra[0]];
- h->mb.cache.intra4x4_pred_mode[x264_scan8[2 ] - 1] = i4x4[left[0]][left_index_table->intra[1]];
- h->mb.cache.intra4x4_pred_mode[x264_scan8[8 ] - 1] = i4x4[left[1]][left_index_table->intra[2]];
- h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = i4x4[left[1]][left_index_table->intra[3]];
+ h->mb.cache.intra4x4_pred_mode[x264_scan8[0 ] - 1] = i4x4[left[LTOP]][left_index_table->intra[0]];
+ h->mb.cache.intra4x4_pred_mode[x264_scan8[2 ] - 1] = i4x4[left[LTOP]][left_index_table->intra[1]];
+ h->mb.cache.intra4x4_pred_mode[x264_scan8[8 ] - 1] = i4x4[left[LBOT]][left_index_table->intra[2]];
+ h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = i4x4[left[LBOT]][left_index_table->intra[3]];
/* load non_zero_count */
- h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = nnz[left[0]][left_index_table->nnz[0]];
- h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = nnz[left[0]][left_index_table->nnz[1]];
- h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = nnz[left[1]][left_index_table->nnz[2]];
- h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[left[1]][left_index_table->nnz[3]];
+ h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = nnz[left[LTOP]][left_index_table->nnz[0]];
+ h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = nnz[left[LTOP]][left_index_table->nnz[1]];
+ h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = nnz[left[LBOT]][left_index_table->nnz[2]];
+ h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[left[LBOT]][left_index_table->nnz[3]];
- h->mb.cache.non_zero_count[x264_scan8[16+0] - 1] = nnz[left[0]][left_index_table->nnz_chroma[0]];
- h->mb.cache.non_zero_count[x264_scan8[16+2] - 1] = nnz[left[1]][left_index_table->nnz_chroma[1]];
+ h->mb.cache.non_zero_count[x264_scan8[16+0] - 1] = nnz[left[LTOP]][left_index_table->nnz_chroma[0]];
+ h->mb.cache.non_zero_count[x264_scan8[16+2] - 1] = nnz[left[LBOT]][left_index_table->nnz_chroma[1]];
- h->mb.cache.non_zero_count[x264_scan8[16+4+0] - 1] = nnz[left[0]][left_index_table->nnz_chroma[2]];
- h->mb.cache.non_zero_count[x264_scan8[16+4+2] - 1] = nnz[left[1]][left_index_table->nnz_chroma[3]];
+ h->mb.cache.non_zero_count[x264_scan8[16+4+0] - 1] = nnz[left[LTOP]][left_index_table->nnz_chroma[2]];
+ h->mb.cache.non_zero_count[x264_scan8[16+4+2] - 1] = nnz[left[LBOT]][left_index_table->nnz_chroma[3]];
}
else
{
+ ( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[top] );
}
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
- h->mb.pic.i_fref[0] = h->i_ref[0] << h->mb.b_interlaced;
- h->mb.pic.i_fref[1] = h->i_ref[1] << h->mb.b_interlaced;
+ h->mb.pic.i_fref[0] = h->i_ref[0] << MB_INTERLACED;
+ h->mb.pic.i_fref[1] = h->i_ref[1] << MB_INTERLACED;
}
- if( !h->param.b_interlaced )
+ if( !PARAM_INTERLACED )
{
x264_copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
x264_copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
int i8 = x264_scan8[0] - 1 - 1*8;
if( h->mb.i_neighbour & MB_TOPLEFT )
{
- int ir = 2*(s8x8*h->mb.i_mb_topleft_y + mb_x-1)+1+s8x8;
- int iv = 4*(s4x4*h->mb.i_mb_topleft_y + mb_x-1)+3+3*s4x4;
- if( h->mb.topleft_partition )
+ int ir = SLICE_MBAFF ? 2*(s8x8*h->mb.i_mb_topleft_y + mb_x-1)+1+s8x8 : top_8x8 - 1;
+ int iv = SLICE_MBAFF ? 4*(s4x4*h->mb.i_mb_topleft_y + mb_x-1)+3+3*s4x4 : top_4x4 - 1;
+ if( SLICE_MBAFF && h->mb.topleft_partition )
{
/* Take motion vector from the middle of macroblock instead of
* the bottom right as usual. */
i8 = x264_scan8[0] + 4 - 1*8;
if( h->mb.i_neighbour & MB_TOPRIGHT )
{
- h->mb.cache.ref[l][i8] = ref[2*(s8x8*h->mb.i_mb_topright_y + (mb_x+1))+s8x8];
- CP32( h->mb.cache.mv[l][i8], mv[4*(s4x4*h->mb.i_mb_topright_y + (mb_x+1))+3*s4x4] );
+ int ir = SLICE_MBAFF ? 2*(s8x8*h->mb.i_mb_topright_y + (mb_x+1))+s8x8 : top_8x8 + 2;
+ int iv = SLICE_MBAFF ? 4*(s4x4*h->mb.i_mb_topright_y + (mb_x+1))+3*s4x4 : top_4x4 + 4;
+ h->mb.cache.ref[l][i8] = ref[ir];
+ CP32( h->mb.cache.mv[l][i8], mv[iv] );
}
else
h->mb.cache.ref[l][i8] = -2;
i8 = x264_scan8[0] - 1;
if( h->mb.i_neighbour & MB_LEFT )
{
- h->mb.cache.ref[l][i8+0*8] = ref[h->mb.left_b8[0] + 1 + s8x8*left_index_table->ref[0]];
- h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[0] + 1 + s8x8*left_index_table->ref[1]];
- h->mb.cache.ref[l][i8+2*8] = ref[h->mb.left_b8[1] + 1 + s8x8*left_index_table->ref[2]];
- h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[1] + 1 + s8x8*left_index_table->ref[3]];
-
- CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[0]] );
- CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[1]] );
- CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[2]] );
- CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[3]] );
+ if( SLICE_MBAFF )
+ {
+ h->mb.cache.ref[l][i8+0*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[0]];
+ h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[1]];
+ h->mb.cache.ref[l][i8+2*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[2]];
+ h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[3]];
+
+ CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[0]] );
+ CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[1]] );
+ CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[2]] );
+ CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[3]] );
+ }
+ else
+ {
+ const int ir = h->mb.i_b8_xy - 1;
+ const int iv = h->mb.i_b4_xy - 1;
+ h->mb.cache.ref[l][i8+0*8] =
+ h->mb.cache.ref[l][i8+1*8] = ref[ir + 0*s8x8];
+ h->mb.cache.ref[l][i8+2*8] =
+ h->mb.cache.ref[l][i8+3*8] = ref[ir + 1*s8x8];
+
+ CP32( h->mb.cache.mv[l][i8+0*8], mv[iv + 0*s4x4] );
+ CP32( h->mb.cache.mv[l][i8+1*8], mv[iv + 1*s4x4] );
+ CP32( h->mb.cache.mv[l][i8+2*8], mv[iv + 2*s4x4] );
+ CP32( h->mb.cache.mv[l][i8+3*8], mv[iv + 3*s4x4] );
+ }
}
else
{
* above diagram do not exist, but the entries d, e and f exist (in
* the macroblock to the left) then use those instead.
*/
- if( h->sh.b_mbaff && (h->mb.i_neighbour & MB_LEFT) )
+ if( SLICE_MBAFF && (h->mb.i_neighbour & MB_LEFT) )
{
- if( h->mb.b_interlaced && !h->mb.field[h->mb.i_mb_xy-1] )
+ if( MB_INTERLACED && !h->mb.field[h->mb.i_mb_xy-1] )
{
h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*0];
h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*1];
CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[1]+1)] );
CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[1] + 3 + s4x4*(left_index_table->mv[2]+1)] );
}
- else if( !h->mb.b_interlaced && h->mb.field[h->mb.i_mb_xy-1] )
+ else if( !MB_INTERLACED && h->mb.field[h->mb.i_mb_xy-1] )
{
// Looking at the bottom field so always take the bottom macroblock of the pair.
h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
else
M64( h->mb.cache.mvd[l][x264_scan8[0] - 8] ) = 0;
- if( h->mb.cache.ref[l][x264_scan8[0]-1] >= 0 )
+ if( h->mb.i_neighbour & MB_LEFT && (!SLICE_MBAFF || h->mb.cache.ref[l][x264_scan8[0]-1] >= 0) )
{
- CP16( h->mb.cache.mvd[l][x264_scan8[0 ] - 1], mvd[left[0]][left_index_table->intra[0]] );
- CP16( h->mb.cache.mvd[l][x264_scan8[2 ] - 1], mvd[left[0]][left_index_table->intra[1]] );
+ CP16( h->mb.cache.mvd[l][x264_scan8[0 ] - 1], mvd[left[LTOP]][left_index_table->intra[0]] );
+ CP16( h->mb.cache.mvd[l][x264_scan8[2 ] - 1], mvd[left[LTOP]][left_index_table->intra[1]] );
}
else
{
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
}
- if( h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0 )
+ if( h->mb.i_neighbour & MB_LEFT && (!SLICE_MBAFF || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0) )
{
- CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[1]][left_index_table->intra[2]] );
- CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[1]][left_index_table->intra[3]] );
+ CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
+ CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
}
else
{
/* If motion vectors are cached from frame macroblocks but this
* macroblock is a field macroblock then the motion vector must be
* halved. Similarly, motion vectors from field macroblocks are doubled. */
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
#define MAP_MVS\
if( FIELD_DIFFERENT(h->mb.i_mb_topleft_xy) )\
MAP_F2F(topright_mv, topright_ref, 2)\
}
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
#define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && !h->mb.field[macroblock])
#define MAP_F2F(varmv, varref, index)\
}
}
- if( h->sh.b_mbaff && mb_x == 0 && !(mb_y&1) && mb_y > 0 )
+ if( SLICE_MBAFF && mb_x == 0 && !(mb_y&1) && mb_y > 0 )
h->mb.field_decoding_flag = h->mb.field[h->mb.i_mb_xy - h->mb.i_mb_stride];
/* Check whether skip here would cause decoder to predict interlace mode incorrectly.
* FIXME: It might be better to change the interlace type rather than forcing a skip to be non-skip. */
h->mb.b_allow_skip = 1;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
- if( h->mb.b_interlaced != h->mb.field_decoding_flag &&
+ if( MB_INTERLACED != h->mb.field_decoding_flag &&
h->mb.i_mb_prev_xy >= 0 && IS_SKIP(h->mb.type[h->mb.i_mb_prev_xy]) )
h->mb.b_allow_skip = 0;
if( (mb_y&1) && IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride]) )
{
if( h->mb.i_neighbour & MB_LEFT )
{
- if( h->mb.field[h->mb.i_mb_xy - 1] != h->mb.b_interlaced )
+ if( h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
h->mb.b_allow_skip = 0;
}
else if( h->mb.i_neighbour & MB_TOP )
{
- if( h->mb.field[h->mb.i_mb_top_xy] != h->mb.b_interlaced )
+ if( h->mb.field[h->mb.i_mb_top_xy] != MB_INTERLACED )
h->mb.b_allow_skip = 0;
}
else // Frame mb pair is predicted
{
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
h->mb.b_allow_skip = 0;
}
}
if( h->param.b_cabac )
{
int left_xy, top_xy;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
/* Neighbours here are calculated based on field_decoding_flag */
int mb_xy = mb_x + (mb_y&~1)*h->mb.i_mb_stride;
/* load skip */
if( h->sh.i_type == SLICE_TYPE_B )
{
- h->mb.bipred_weight = h->mb.bipred_weight_buf[h->mb.b_interlaced][h->mb.b_interlaced&(mb_y&1)];
- h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[h->mb.b_interlaced][h->mb.b_interlaced&(mb_y&1)];
+ h->mb.bipred_weight = h->mb.bipred_weight_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
+ h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
if( h->param.b_cabac )
{
uint8_t skipbp;
x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
- skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[0]] : 0;
+ skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LTOP]] : 0;
h->mb.cache.skip[x264_scan8[0] - 1] = (skipbp >> (1+(left_index_table->mv[0]&~1))) & 1;
- skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[1]] : 0;
+ skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LBOT]] : 0;
h->mb.cache.skip[x264_scan8[8] - 1] = (skipbp >> (1+(left_index_table->mv[2]&~1))) & 1;
skipbp = (h->mb.i_neighbour & MB_TOP) ? h->mb.skipbp[top] : 0;
h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
h->mb.i_neighbour = 0;
h->mb.i_mb_xy = mb_y * h->mb.i_mb_stride + mb_x;
- h->mb.b_interlaced = h->param.b_interlaced && h->mb.field[h->mb.i_mb_xy];
- h->mb.i_mb_top_y = mb_y - (1 << h->mb.b_interlaced);
+ h->mb.b_interlaced = PARAM_INTERLACED && h->mb.field[h->mb.i_mb_xy];
+ h->mb.i_mb_top_y = mb_y - (1 << MB_INTERLACED);
h->mb.i_mb_top_xy = mb_x + h->mb.i_mb_stride*h->mb.i_mb_top_y;
h->mb.i_mb_left_xy[1] =
h->mb.i_mb_left_xy[0] = h->mb.i_mb_xy - 1;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
if( mb_y&1 )
{
- if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != h->mb.b_interlaced )
+ if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
h->mb.i_mb_left_xy[0] -= h->mb.i_mb_stride;
}
else
{
- if( h->mb.i_mb_top_xy >= 0 && h->mb.b_interlaced && !h->mb.field[h->mb.i_mb_top_xy] )
+ if( h->mb.i_mb_top_xy >= 0 && MB_INTERLACED && !h->mb.field[h->mb.i_mb_top_xy] )
{
h->mb.i_mb_top_xy += h->mb.i_mb_stride;
h->mb.i_mb_top_y++;
}
- if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != h->mb.b_interlaced )
+ if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
h->mb.i_mb_left_xy[1] += h->mb.i_mb_stride;
}
}
if( mb_x > 0 && (deblock_on_slice_edges ||
h->mb.slice_table[h->mb.i_mb_left_xy[0]] == h->mb.slice_table[h->mb.i_mb_xy]) )
h->mb.i_neighbour |= MB_LEFT;
- if( mb_y > h->mb.b_interlaced && (deblock_on_slice_edges
+ if( mb_y > MB_INTERLACED && (deblock_on_slice_edges
|| h->mb.slice_table[h->mb.i_mb_top_xy] == h->mb.slice_table[h->mb.i_mb_xy]) )
h->mb.i_neighbour |= MB_TOP;
}
{
memset( bs[0], 3, 4*4*sizeof(uint8_t) );
memset( bs[1], 3, 4*4*sizeof(uint8_t) );
- if( !h->sh.b_mbaff ) return;
+ if( !SLICE_MBAFF ) return;
}
/* If we have multiple slices and we're deblocking on slice edges, we
* have to reload neighbour data. */
- if( h->sh.b_mbaff || (h->sh.i_first_mb && h->sh.i_disable_deblocking_filter_idc != 2) )
+ if( SLICE_MBAFF || (h->sh.i_first_mb && h->sh.i_disable_deblocking_filter_idc != 2) )
{
int old_neighbour = h->mb.i_neighbour;
int mb_x = h->mb.i_mb_x;
int s4x4 = h->mb.i_b4_stride;
uint8_t (*nnz)[24] = h->mb.non_zero_count;
- x264_left_table_t *left_index_table = h->mb.left_index_table;
+ x264_left_table_t *left_index_table = SLICE_MBAFF ? h->mb.left_index_table : &left_indices[3];
if( h->mb.i_neighbour & MB_TOP )
CP32( &h->mb.cache.non_zero_count[x264_scan8[0] - 8], &nnz[h->mb.i_mb_top_xy][12] );
}
}
- int mvy_limit = 4 >> h->mb.b_interlaced;
+ int mvy_limit = 4 >> MB_INTERLACED;
h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
bs, mvy_limit, h->sh.i_type == SLICE_TYPE_B, h );
}
{
int w = i ? 8 : 16;
int i_stride = h->fdec->i_stride[i];
- int i_stride2 = i_stride << (b_mbaff && h->mb.b_interlaced);
- int i_pix_offset = (b_mbaff && h->mb.b_interlaced)
+ int i_stride2 = i_stride << (b_mbaff && MB_INTERLACED);
+ int i_pix_offset = (b_mbaff && MB_INTERLACED)
? 16 * mb_x + w * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
: 16 * mb_x + w * mb_y * i_stride;
if( i )
* For progressive mbs this is the bottom two rows, and for interlaced the
* bottom row of each field. We also store samples needed for the next
* mbpair in intra_border_backup[2]. */
- int backup_dst = !b_mbaff ? 0 : (mb_y&1) ? 1 : h->mb.b_interlaced ? 0 : 2;
+ int backup_dst = !b_mbaff ? 0 : (mb_y&1) ? 1 : MB_INTERLACED ? 0 : 2;
memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+FDEC_STRIDE*15, 16*sizeof(pixel) );
memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+FDEC_STRIDE*7, 8*sizeof(pixel) );
memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+FDEC_STRIDE*7, 8*sizeof(pixel) );
{
if( mb_y&1 )
{
- int backup_src = (h->mb.b_interlaced ? 7 : 14) * FDEC_STRIDE;
- backup_dst = h->mb.b_interlaced ? 2 : 0;
+ int backup_src = (MB_INTERLACED ? 7 : 14) * FDEC_STRIDE;
+ backup_dst = MB_INTERLACED ? 2 : 0;
memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+backup_src, 16*sizeof(pixel) );
- backup_src = (h->mb.b_interlaced ? 3 : 6) * FDEC_STRIDE;
+ backup_src = (MB_INTERLACED ? 3 : 6) * FDEC_STRIDE;
memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 8*sizeof(pixel) );
memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src, 8*sizeof(pixel) );
}
int8_t *i4x4 = h->mb.intra4x4_pred_mode[i_mb_xy];
uint8_t *nnz = h->mb.non_zero_count[i_mb_xy];
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
x264_macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 1 );
x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 1 );
void x264_macroblock_bipred_init( x264_t *h )
{
- for( int mbfield = 0; mbfield <= h->sh.b_mbaff; mbfield++ )
- for( int field = 0; field <= h->sh.b_mbaff; field++ )
+ for( int mbfield = 0; mbfield <= SLICE_MBAFF; mbfield++ )
+ for( int field = 0; field <= SLICE_MBAFF; field++ )
for( int i_ref0 = 0; i_ref0 < (h->i_ref[0]<<mbfield); i_ref0++ )
{
x264_frame_t *l0 = h->fref[0][i_ref0>>mbfield];
void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
{
- const int b_interlaced = h->param.b_interlaced;
+ const int b_interlaced = PARAM_INTERLACED;
int stride = frame->i_stride[0];
const int width = frame->i_width[0];
int start = mb_y*16 - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
- int height = (b_end ? frame->i_lines[0] + 16*h->param.b_interlaced : (mb_y+b_interlaced)*16) + 8;
+ int height = (b_end ? frame->i_lines[0] + 16*PARAM_INTERLACED : (mb_y+b_interlaced)*16) + 8;
int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
if( mb_y & b_interlaced )
i_refc = h->mb.cache.ref[i_list][i8 - 8 - 1];
mv_c = h->mb.cache.mv[i_list][i8 - 8 - 1];
- if( h->sh.b_mbaff
+ if( SLICE_MBAFF
&& h->mb.cache.ref[i_list][x264_scan8[0]-1] != -2
- && h->mb.b_interlaced != h->mb.field[h->mb.i_mb_left_xy[0]] )
+ && MB_INTERLACED != h->mb.field[h->mb.i_mb_left_xy[0]] )
{
if( idx == 2 )
{
int mb_xy = h->mb.i_mb_xy;
int type_col[2] = { h->fref[1][0]->mb_type[mb_xy], h->fref[1][0]->mb_type[mb_xy] };
int partition_col[2] = { h->fref[1][0]->mb_partition[mb_xy], h->fref[1][0]->mb_partition[mb_xy] };
- int preshift = h->mb.b_interlaced;
- int postshift = h->mb.b_interlaced;
+ int preshift = MB_INTERLACED;
+ int postshift = MB_INTERLACED;
int offset = 1;
int yshift = 1;
h->mb.i_partition = partition_col[0];
- if( h->param.b_interlaced && h->fref[1][0]->field[mb_xy] != h->mb.b_interlaced )
+ if( PARAM_INTERLACED && h->fref[1][0]->field[mb_xy] != MB_INTERLACED )
{
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
mb_y = h->mb.i_mb_y&~1;
mb_xy = mb_x + h->mb.i_mb_stride * mb_y;
}
else
{
- int cur_poc = h->fdec->i_poc + h->fdec->i_delta_poc[h->mb.b_interlaced&h->mb.i_mb_y&1];
+ int cur_poc = h->fdec->i_poc + h->fdec->i_delta_poc[MB_INTERLACED&h->mb.i_mb_y&1];
int col_parity = abs(h->fref[1][0]->i_poc + h->fref[1][0]->i_delta_poc[0] - cur_poc)
>= abs(h->fref[1][0]->i_poc + h->fref[1][0]->i_delta_poc[1] - cur_poc);
mb_y = (h->mb.i_mb_y&~1) + col_parity;
{
int x8 = i8&1;
int y8 = i8>>1;
- int ypart = (h->sh.b_mbaff && h->fref[1][0]->field[mb_xy] != h->mb.b_interlaced) ?
- h->mb.b_interlaced ? y8*6 : 2*(h->mb.i_mb_y&1) + y8 :
+ int ypart = (SLICE_MBAFF && h->fref[1][0]->field[mb_xy] != MB_INTERLACED) ?
+ MB_INTERLACED ? y8*6 : 2*(h->mb.i_mb_y&1) + y8 :
3*y8;
if( IS_INTRA( type_col[y8] ) )
int i_part_8x8 = i_mb_8x8 + x8 + (ypart>>1) * h->mb.i_b8_stride;
int i_ref1_ref = h->fref[1][0]->ref[0][i_part_8x8];
- int i_ref = (map_col_to_list0(i_ref1_ref>>preshift) << postshift) + (offset&i_ref1_ref&h->mb.b_interlaced);
+ int i_ref = (map_col_to_list0(i_ref1_ref>>preshift) << postshift) + (offset&i_ref1_ref&MB_INTERLACED);
if( i_ref >= 0 )
{
int type_col[2] = { h->fref[1][0]->mb_type[mb_xy], h->fref[1][0]->mb_type[mb_xy] };
int partition_col[2] = { h->fref[1][0]->mb_partition[mb_xy], h->fref[1][0]->mb_partition[mb_xy] };
h->mb.i_partition = partition_col[0];
- if( h->sh.b_mbaff && h->fref[1][0]->field[mb_xy] != h->mb.b_interlaced )
+ if( SLICE_MBAFF && h->fref[1][0]->field[mb_xy] != MB_INTERLACED )
{
- if( h->mb.b_interlaced )
+ if( MB_INTERLACED )
{
mb_y = h->mb.i_mb_y&~1;
mb_xy = mb_x + h->mb.i_mb_stride * mb_y;
}
else
{
- int cur_poc = h->fdec->i_poc + h->fdec->i_delta_poc[h->mb.b_interlaced&h->mb.i_mb_y&1];
+ int cur_poc = h->fdec->i_poc + h->fdec->i_delta_poc[MB_INTERLACED&h->mb.i_mb_y&1];
int col_parity = abs(h->fref[1][0]->i_poc + h->fref[1][0]->i_delta_poc[0] - cur_poc)
>= abs(h->fref[1][0]->i_poc + h->fref[1][0]->i_delta_poc[1] - cur_poc);
mb_y = (h->mb.i_mb_y&~1) + col_parity;
{
const int x8 = i8&1;
const int y8 = i8>>1;
- int ypart = (h->sh.b_mbaff && h->fref[1][0]->field[mb_xy] != h->mb.b_interlaced) ?
- h->mb.b_interlaced ? y8*6 : 2*(h->mb.i_mb_y&1) + y8 :
+ int ypart = (SLICE_MBAFF && h->fref[1][0]->field[mb_xy] != MB_INTERLACED) ?
+ MB_INTERLACED ? y8*6 : 2*(h->mb.i_mb_y&1) + y8 :
3*y8;
int o8 = x8 + (ypart>>1) * h->mb.i_b8_stride;
int o4 = 3*x8 + ypart * h->mb.i_b4_stride;
x264_frame_t *l0 = h->fref[0][0];
int field = h->mb.i_mb_y&1;
int curpoc = h->fdec->i_poc + h->fdec->i_delta_poc[field];
- int refpoc = h->fref[i_list][i_ref>>h->sh.b_mbaff]->i_poc;
+ int refpoc = h->fref[i_list][i_ref>>SLICE_MBAFF]->i_poc;
refpoc += l0->i_delta_poc[field^(i_ref&1)];
#define SET_TMVP( dx, dy ) \
{ \
int mb_index = h->mb.i_mb_xy + dx + dy*h->mb.i_mb_stride; \
- int scale = (curpoc - refpoc) * l0->inv_ref_poc[h->mb.b_interlaced&field]; \
+ int scale = (curpoc - refpoc) * l0->inv_ref_poc[MB_INTERLACED&field]; \
mvc[i][0] = (l0->mv16x16[mb_index][0]*scale + 128) >> 8; \
mvc[i][1] = (l0->mv16x16[mb_index][1]*scale + 128) >> 8; \
i++; \
--enable-win32thread use win32threads (windows only)
--disable-swscale disables swscale support
--disable-asm disables platform-specific assembly optimizations
+ --disable-interlaced disables interlaced encoding support
--enable-debug adds -g, doesn't strip
--enable-gprof adds -pg, doesn't strip
--enable-visualize enables visualization (X11 only)
thread="auto"
swscale="auto"
asm="auto"
+interlaced="yes"
debug="no"
gprof="no"
pic="no"
EXE=""
# list of all preprocessor HAVE values we can define
-CONFIG_HAVE="MALLOC_H ALTIVEC ALTIVEC_H MMX ARMV6 ARMV6T2 NEON BEOSTHREAD POSIXTHREAD WIN32THREAD THREAD LOG2F VISUALIZE SWSCALE LAVF FFMS GPAC GF_MALLOC AVS GPL VECTOREXT"
+CONFIG_HAVE="MALLOC_H ALTIVEC ALTIVEC_H MMX ARMV6 ARMV6T2 NEON BEOSTHREAD POSIXTHREAD WIN32THREAD THREAD LOG2F VISUALIZE SWSCALE LAVF FFMS GPAC GF_MALLOC AVS GPL VECTOREXT INTERLACED"
# parse options
--disable-asm)
asm="no"
;;
+ --disable-interlaced)
+ interlaced="no"
+ ;;
--disable-avs)
avs="no"
;;
[ $gpl = yes ] && define HAVE_GPL && x264_gpl=1 || x264_gpl=0
+[ $interlaced = yes ] && define HAVE_INTERLACED && x264_interlaced=1 || x264_interlaced=0
+
#define undefined vars as 0
for var in $CONFIG_HAVE; do
grep -q "HAVE_$var 1" config.h || define HAVE_$var 0
# generate exported config file
cat > x264_config.h << EOF
-#define X264_BIT_DEPTH $bit_depth
-#define X264_GPL $x264_gpl
+#define X264_BIT_DEPTH $bit_depth
+#define X264_GPL $x264_gpl
+#define X264_INTERLACED $x264_interlaced
EOF
# generate config files
Platform: $ARCH
System: $SYS
asm: $asm
+interlaced: $interlaced
avs: $avs
lavf: $lavf
ffms: $ffms
{
x264_frame_t *frame = h->fref[0][j];
int width = frame->i_width[0] + 2*PADH;
- int i_padv = PADV << h->param.b_interlaced;
+ int i_padv = PADV << PARAM_INTERLACED;
int offset, height;
pixel *src = frame->filtered[0] - frame->i_stride[0]*i_padv - PADH;
height = X264_MIN( 16 + end + i_padv, h->fref[0][j]->i_lines[0] + i_padv*2 ) - h->fenc->i_lines_weighted;
}
h->mb.mv_min_fpel[0] = (h->mb.mv_min_spel[0]>>2) + i_fpel_border;
h->mb.mv_max_fpel[0] = (h->mb.mv_max_spel[0]>>2) - i_fpel_border;
- if( h->mb.i_mb_x == 0 && !(h->mb.i_mb_y & h->param.b_interlaced) )
+ if( h->mb.i_mb_x == 0 && !(h->mb.i_mb_y & PARAM_INTERLACED) )
{
- int mb_y = h->mb.i_mb_y >> h->sh.b_mbaff;
+ int mb_y = h->mb.i_mb_y >> SLICE_MBAFF;
int thread_mvy_range = i_fmv_range;
if( h->i_thread_frames > 1 )
{
- int pix_y = (h->mb.i_mb_y | h->param.b_interlaced) * 16;
+ int pix_y = (h->mb.i_mb_y | PARAM_INTERLACED) * 16;
int thresh = pix_y + h->param.analyse.i_mv_range_thread;
for( int i = (h->sh.i_type == SLICE_TYPE_B); i >= 0; i-- )
for( int j = 0; j < h->i_ref[i]; j++ )
if( h->param.b_deterministic )
thread_mvy_range = h->param.analyse.i_mv_range_thread;
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
thread_mvy_range >>= 1;
x264_analyse_weight_frame( h, pix_y + thread_mvy_range );
}
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
/* 0 == top progressive, 1 == bot progressive, 2 == interlaced */
for( int i = 0; i < 3; i++ )
h->mb.mv_max_fpel[1] = (h->mb.mv_max_spel[1]>>2) - i_fpel_border;
}
}
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
- int i = h->mb.b_interlaced ? 2 : h->mb.i_mb_y&1;
+ int i = MB_INTERLACED ? 2 : h->mb.i_mb_y&1;
h->mb.mv_min[1] = h->mb.mv_miny_row[i];
h->mb.mv_max[1] = h->mb.mv_maxy_row[i];
h->mb.mv_min_spel[1] = h->mb.mv_miny_spel_row[i];
const int or = 8*(i8x8&1) + 2*(i8x8&2)*i_stride;
const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE;
const int i_ref = a->l0.me8x8[i8x8].i_ref;
- const int mvy_offset = h->mb.b_interlaced & i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mvy_offset = MB_INTERLACED & i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
x264_weight_t *weight = h->sh.weight[i_ref];
// FIXME weight can be done on 4x4 blocks even if mc is smaller
#define COST_BI_CHROMA( m0, m1, width, height ) \
{ \
- l0_mvy_offset = h->mb.b_interlaced & m0.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
- l1_mvy_offset = h->mb.b_interlaced & m1.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
+ l0_mvy_offset = MB_INTERLACED & m0.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
+ l1_mvy_offset = MB_INTERLACED & m1.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
h->mc.mc_chroma( pix[0], pix[1], 8, m0.p_fref[4], m0.i_stride[1], m0.mv[0], m0.mv[1] + l0_mvy_offset, width, height ); \
h->mc.mc_chroma( pix[2], pix[3], 8, m1.p_fref[4], m1.i_stride[1], m1.mv[0], m1.mv[1] + l1_mvy_offset, width, height ); \
h->mc.avg[i_pixel+3]( bi[0], 8, pix[0], 8, pix[2], 8, h->mb.bipred_weight[m0.i_ref][m1.i_ref] ); \
ALIGNED_ARRAY_16( pixel, pixuv, [2],[8*FENC_STRIDE] );
ALIGNED_ARRAY_16( pixel, bi, [8*FENC_STRIDE] );
- if( h->mb.b_interlaced & a->l0.bi16x16.i_ref )
+ if( MB_INTERLACED & a->l0.bi16x16.i_ref )
{
- int l0_mvy_offset = h->mb.b_interlaced & a->l0.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ int l0_mvy_offset = MB_INTERLACED & a->l0.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
h->mc.mc_chroma( pixuv[0], pixuv[0]+8, FENC_STRIDE, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4],
h->mb.pic.i_stride[1], 0, 0 + l0_mvy_offset, 8, 8 );
}
else
h->mc.load_deinterleave_8x8x2_fenc( pixuv[0], h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4], h->mb.pic.i_stride[1] );
- if( h->mb.b_interlaced & a->l1.bi16x16.i_ref )
+ if( MB_INTERLACED & a->l1.bi16x16.i_ref )
{
- int l1_mvy_offset = h->mb.b_interlaced & a->l1.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ int l1_mvy_offset = MB_INTERLACED & a->l1.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
h->mc.mc_chroma( pixuv[1], pixuv[1]+8, FENC_STRIDE, h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4],
h->mb.pic.i_stride[1], 0, 0 + l1_mvy_offset, 8, 8 );
}
int ref = h->mb.cache.ref[l][x264_scan8[0]];
if( ref < 0 )
continue;
- completed = h->fref[l][ ref >> h->mb.b_interlaced ]->orig->i_lines_completed;
- if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - h->mb.b_interlaced)) + h->mb.i_mb_y*16 > completed )
+ completed = h->fref[l][ ref >> MB_INTERLACED ]->orig->i_lines_completed;
+ if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - MB_INTERLACED)) + h->mb.i_mb_y*16 > completed )
{
x264_log( h, X264_LOG_WARNING, "internal error (MV out of thread range)\n");
x264_log( h, X264_LOG_DEBUG, "mb type: %d \n", h->mb.i_type);
&& h->mb.slice_table[h->mb.i_mb_top_mbpair_xy] == h->sh.i_first_mb
&& h->mb.field[h->mb.i_mb_top_mbpair_xy]);
- x264_cabac_encode_decision_noup( cb, 70 + ctx, h->mb.b_interlaced );
- h->mb.field_decoding_flag = h->mb.b_interlaced;
+ x264_cabac_encode_decision_noup( cb, 70 + ctx, MB_INTERLACED );
+ h->mb.field_decoding_flag = MB_INTERLACED;
}
#endif
const int i_mb_type = h->mb.i_type;
#if !RDO_SKIP_BS
- if( h->sh.b_mbaff &&
+ if( SLICE_MBAFF &&
(!(h->mb.i_mb_y & 1) || IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride])) )
{
x264_cabac_field_decoding_flag( h, cb );
#if !RDO_SKIP_BS
static void block_residual_write_cabac( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
{
- const uint8_t *sig_offset = significant_coeff_flag_offset_8x8[h->mb.b_interlaced];
- int ctx_sig = significant_coeff_flag_offset[h->mb.b_interlaced][ctx_block_cat];
- int ctx_last = last_coeff_flag_offset[h->mb.b_interlaced][ctx_block_cat];
+ const uint8_t *sig_offset = significant_coeff_flag_offset_8x8[MB_INTERLACED];
+ int ctx_sig = significant_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
+ int ctx_last = last_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
int ctx_level = coeff_abs_level_m1_offset[ctx_block_cat];
int coeff_idx = -1, node_ctx = 0, last;
int coeffs[64];
* for this (~0.001db) and the speed boost (~30%) is worth it. */
static void ALWAYS_INLINE block_residual_write_cabac_internal( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l, int b_8x8 )
{
- const uint8_t *sig_offset = significant_coeff_flag_offset_8x8[h->mb.b_interlaced];
- int ctx_sig = significant_coeff_flag_offset[h->mb.b_interlaced][ctx_block_cat];
- int ctx_last = last_coeff_flag_offset[h->mb.b_interlaced][ctx_block_cat];
+ const uint8_t *sig_offset = significant_coeff_flag_offset_8x8[MB_INTERLACED];
+ int ctx_sig = significant_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
+ int ctx_last = last_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
int ctx_level = coeff_abs_level_m1_offset[ctx_block_cat];
int last = h->quantf.coeff_last[ctx_block_cat]( l );
int coeff_abs = abs(l[last]);
int i_mb_pos_tex;
#endif
- if( h->sh.b_mbaff
+ if( SLICE_MBAFF
&& (!(h->mb.i_mb_y & 1) || IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride])) )
{
- bs_write1( s, h->mb.b_interlaced );
+ bs_write1( s, MB_INTERLACED );
}
#if !RDO_SKIP_BS
sh->i_frame_num = i_frame;
- sh->b_mbaff = h->param.b_interlaced;
+ sh->b_mbaff = PARAM_INTERLACED;
sh->b_field_pic = 0; /* no field support for now */
sh->b_bottom_field = 0; /* not yet used */
return -1;
}
+#if HAVE_INTERLACED
+ h->param.b_interlaced = !!PARAM_INTERLACED;
+#else
+ if( h->param.b_interlaced )
+ {
+ x264_log( h, X264_LOG_ERROR, "not compiled with interlaced support\n" );
+ return -1;
+ }
+#endif
+
if( (h->param.crop_rect.i_left + h->param.crop_rect.i_right ) >= h->param.i_width ||
(h->param.crop_rect.i_top + h->param.crop_rect.i_bottom) >= h->param.i_height )
{
h->param.i_slice_max_size = X264_MAX( h->param.i_slice_max_size, 0 );
h->param.i_slice_max_mbs = X264_MAX( h->param.i_slice_max_mbs, 0 );
- h->param.b_interlaced = !!h->param.b_interlaced;
- int max_slices = (h->param.i_height+((16<<h->param.b_interlaced)-1))/(16<<h->param.b_interlaced);
+ int max_slices = (h->param.i_height+((16<<PARAM_INTERLACED)-1))/(16<<PARAM_INTERLACED);
if( h->param.b_sliced_threads )
h->param.i_slice_count = x264_clip3( h->param.i_threads, 0, max_slices );
else
}
}
if( h->param.analyse.i_mv_range <= 0 )
- h->param.analyse.i_mv_range = l->mv_range >> h->param.b_interlaced;
+ h->param.analyse.i_mv_range = l->mv_range >> PARAM_INTERLACED;
else
- h->param.analyse.i_mv_range = x264_clip3(h->param.analyse.i_mv_range, 32, 512 >> h->param.b_interlaced);
+ h->param.analyse.i_mv_range = x264_clip3(h->param.analyse.i_mv_range, 32, 512 >> PARAM_INTERLACED);
}
h->param.analyse.i_weighted_pred = x264_clip3( h->param.analyse.i_weighted_pred, X264_WEIGHTP_NONE, X264_WEIGHTP_SMART );
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
if( h->param.analyse.i_me_method >= X264_ME_ESA )
{
h->param.i_sps_id &= 31;
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
h->param.b_pic_struct = 1;
h->param.i_nal_hrd = x264_clip3( h->param.i_nal_hrd, X264_NAL_HRD_NONE, X264_NAL_HRD_CBR );
/* Adaptive MBAFF and subme 0 are not supported as we require halving motion
* vectors during prediction, resulting in hpel mvs.
* The chosen solution is to make MBAFF non-adaptive in this case. */
- h->mb.b_adaptive_mbaff = h->param.b_interlaced && h->param.analyse.i_subpel_refine;
+ h->mb.b_adaptive_mbaff = PARAM_INTERLACED && h->param.analyse.i_subpel_refine;
/* Init frames. */
if( h->param.i_bframe_adaptive == X264_B_ADAPT_TRELLIS && !h->param.rc.b_stat_read )
x264_pixel_init( h->param.cpu, &h->pixf );
x264_dct_init( h->param.cpu, &h->dctf );
x264_zigzag_init( h->param.cpu, &h->zigzagf_progressive, &h->zigzagf_interlaced );
- memcpy( &h->zigzagf, h->param.b_interlaced ? &h->zigzagf_interlaced : &h->zigzagf_progressive, sizeof(h->zigzagf) );
+ memcpy( &h->zigzagf, PARAM_INTERLACED ? &h->zigzagf_interlaced : &h->zigzagf_progressive, sizeof(h->zigzagf) );
x264_mc_init( h->param.cpu, &h->mc );
x264_quant_init( h, h->param.cpu, &h->quantf );
- x264_deblock_init( h->param.cpu, &h->loopf, h->param.b_interlaced );
+ x264_deblock_init( h->param.cpu, &h->loopf, PARAM_INTERLACED );
x264_bitstream_init( h->param.cpu, &h->bsf );
x264_dct_init_weights();
// and duplicates of that frame.
h->fenc->i_lines_weighted = 0;
- for( int i_ref = 0; i_ref < (h->i_ref[0] << h->sh.b_mbaff); i_ref++ )
+ for( int i_ref = 0; i_ref < (h->i_ref[0] << SLICE_MBAFF); i_ref++ )
for( int i = 0; i < 3; i++ )
h->sh.weight[i_ref][i].weightfn = NULL;
if( h->sh.i_type != SLICE_TYPE_P || h->param.analyse.i_weighted_pred <= 0 )
return;
- int i_padv = PADV << h->param.b_interlaced;
+ int i_padv = PADV << PARAM_INTERLACED;
int denom = -1;
int weightplane[2] = { 0, 0 };
int buffer_next = 0;
int b_deblock = h->sh.i_disable_deblocking_filter_idc != 1;
int b_end = mb_y == h->i_threadslice_end;
int b_measure_quality = 1;
- int min_y = mb_y - (1 << h->sh.b_mbaff);
+ int min_y = mb_y - (1 << SLICE_MBAFF);
int b_start = min_y == h->i_threadslice_start;
/* Even in interlaced mode, deblocking never modifies more than 4 pixels
* above each MB, as bS=4 doesn't happen for the top of interlaced mbpairs. */
b_deblock = 0; /* We already deblocked on the inloop pass. */
b_measure_quality = 0; /* We already measured quality on the inloop pass. */
}
- if( mb_y & h->sh.b_mbaff )
+ if( mb_y & SLICE_MBAFF )
return;
if( min_y < h->i_threadslice_start )
return;
if( b_deblock )
- for( int y = min_y; y < mb_y; y += (1 << h->sh.b_mbaff) )
+ for( int y = min_y; y < mb_y; y += (1 << SLICE_MBAFF) )
x264_frame_deblock_row( h, y );
/* FIXME: Prediction requires different borders for interlaced/progressive mc,
* but the actual image data is equivalent. For now, maintain this
* consistency by copying deblocked pixels between planes. */
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
for( int p = 0; p < 2; p++ )
for( int i = minpix_y>>p; i < maxpix_y>>p; i++ )
memcpy( h->fdec->plane_fld[p] + i*h->fdec->i_stride[p],
}
}
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
for( int i = 0; i < 2; i++ )
{
XCHG( pixel *, h->intra_border_backup[0][i], h->intra_border_backup[3][i] );
}
if( h->i_thread_frames > 1 && h->fdec->b_kept_as_ref )
- x264_frame_cond_broadcast( h->fdec, mb_y*16 + (b_end ? 10000 : -(X264_THREAD_HEIGHT << h->sh.b_mbaff)) );
+ x264_frame_cond_broadcast( h->fdec, mb_y*16 + (b_end ? 10000 : -(X264_THREAD_HEIGHT << SLICE_MBAFF)) );
if( b_measure_quality )
{
if( h->sps->i_poc_type == 0 )
{
h->sh.i_poc = h->fdec->i_poc;
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
h->sh.i_delta_poc_bottom = h->param.b_tff ? 1 : -1;
h->sh.i_poc += h->sh.i_delta_poc_bottom == -1;
if( x264_bitstream_check_buffer( h ) )
return -1;
- if( back_up_bitstream && (!h->sh.b_mbaff || (i_mb_y&1) == 0) )
+ if( back_up_bitstream && (!SLICE_MBAFF || (i_mb_y&1) == 0) )
{
mv_bits_bak = h->stat.frame.i_mv_bits;
tex_bits_bak = h->stat.frame.i_tex_bits;
if( i_mb_x == 0 && !h->mb.b_reencode_mb )
x264_fdec_filter_row( h, i_mb_y, 1 );
- if( h->param.b_interlaced )
+ if( PARAM_INTERLACED )
{
if( h->mb.b_adaptive_mbaff )
{
int stride = h->fenc->i_stride[0];
pixel *fenc = h->fenc->plane[0] + 16 * (i_mb_x + i_mb_y * stride);
h->mb.b_interlaced = x264_field_vsad( h, fenc, stride );
- memcpy( &h->zigzagf, h->mb.b_interlaced ? &h->zigzagf_interlaced : &h->zigzagf_progressive, sizeof(h->zigzagf) );
+ memcpy( &h->zigzagf, MB_INTERLACED ? &h->zigzagf_interlaced : &h->zigzagf_progressive, sizeof(h->zigzagf) );
}
}
- h->mb.field[mb_xy] = h->mb.b_interlaced;
+ h->mb.field[mb_xy] = MB_INTERLACED;
}
/* load cache */
if( h->param.b_cabac )
{
- if( mb_xy > h->sh.i_first_mb && !(h->sh.b_mbaff && (i_mb_y&1)) )
+ if( mb_xy > h->sh.i_first_mb && !(SLICE_MBAFF && (i_mb_y&1)) )
x264_cabac_encode_terminal( &h->cabac );
if( IS_SKIP( h->mb.i_type ) )
i_skip = i_skip_bak;
}
h->mb.b_reencode_mb = 1;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
// set to bottom of previous mbpair
if( i_mb_x )
if( b_deblock )
x264_macroblock_deblock_strength( h );
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
/* update ratecontrol per-mbpair in MBAFF */
if( i_mb_y&1 )
if( mb_xy == h->sh.i_last_mb )
break;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
i_mb_x += i_mb_y & 1;
i_mb_y ^= i_mb_x < h->mb.i_mb_width;
/* init stats */
memset( &h->stat.frame, 0, sizeof(h->stat.frame) );
h->mb.b_reencode_mb = 0;
- while( h->sh.i_first_mb + h->sh.b_mbaff*h->mb.i_mb_stride <= last_thread_mb )
+ while( h->sh.i_first_mb + SLICE_MBAFF*h->mb.i_mb_stride <= last_thread_mb )
{
h->sh.i_last_mb = last_thread_mb;
if( h->param.i_slice_max_mbs )
{
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
// convert first to mbaff form, add slice-max-mbs, then convert back to normal form
int last_mbaff = 2*(h->sh.i_first_mb % h->mb.i_mb_width)
}
else if( h->param.i_slice_count && !h->param.b_sliced_threads )
{
- int height = h->mb.i_mb_height >> h->param.b_interlaced;
- int width = h->mb.i_mb_width << h->param.b_interlaced;
+ int height = h->mb.i_mb_height >> PARAM_INTERLACED;
+ int width = h->mb.i_mb_width << PARAM_INTERLACED;
i_slice_num++;
h->sh.i_last_mb = (height * i_slice_num + h->param.i_slice_count/2) / h->param.i_slice_count * width - 1;
}
return (void *)-1;
h->sh.i_first_mb = h->sh.i_last_mb + 1;
// if i_first_mb is not the last mb in a row then go to the next mb in MBAFF order
- if( h->sh.b_mbaff && h->sh.i_first_mb % h->mb.i_mb_width )
+ if( SLICE_MBAFF && h->sh.i_first_mb % h->mb.i_mb_width )
h->sh.i_first_mb -= h->mb.i_mb_stride;
}
t->param = h->param;
memcpy( &t->i_frame, &h->i_frame, offsetof(x264_t, rc) - offsetof(x264_t, i_frame) );
}
- int height = h->mb.i_mb_height >> h->param.b_interlaced;
- t->i_threadslice_start = ((height * i + h->param.i_slice_count/2) / h->param.i_threads) << h->param.b_interlaced;
- t->i_threadslice_end = ((height * (i+1) + h->param.i_slice_count/2) / h->param.i_threads) << h->param.b_interlaced;
+ int height = h->mb.i_mb_height >> PARAM_INTERLACED;
+ t->i_threadslice_start = ((height * i + h->param.i_slice_count/2) / h->param.i_threads) << PARAM_INTERLACED;
+ t->i_threadslice_end = ((height * (i+1) + h->param.i_slice_count/2) / h->param.i_threads) << PARAM_INTERLACED;
t->sh.i_first_mb = t->i_threadslice_start * h->mb.i_mb_width;
t->sh.i_last_mb = t->i_threadslice_end * h->mb.i_mb_width - 1;
}
for( int i = 1; i < h->param.i_threads; i++ )
{
x264_fdec_filter_row( h->thread[i], h->thread[i]->i_threadslice_start + 1, 0 );
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
x264_fdec_filter_row( h->thread[i], h->thread[i]->i_threadslice_start + 2, 0 );
}
if( fenc->i_pic_struct == PIC_STRUCT_AUTO )
{
+#if HAVE_INTERLACED
int b_interlaced = fenc->param ? fenc->param->b_interlaced : h->param.b_interlaced;
+#else
+ int b_interlaced = 0;
+#endif
if( b_interlaced )
{
int b_tff = fenc->param ? fenc->param->b_tff : h->param.b_tff;
void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int idx, int i_mode )
{
- int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
+ int stride = h->fenc->i_stride[0] << MB_INTERLACED;
pixel *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
if( i_mode == I_PRED_4x4_V )
void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int idx, int i_mode, pixel edge[33] )
{
- int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
+ int stride = h->fenc->i_stride[0] << MB_INTERLACED;
pixel *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
if( i_mode == I_PRED_8x8_V )
void x264_predict_lossless_16x16( x264_t *h, int i_mode )
{
- int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
+ int stride = h->fenc->i_stride[0] << MB_INTERLACED;
if( i_mode == I_PRED_16x16_V )
h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
else if( i_mode == I_PRED_16x16_H )
const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
const int i_pixel = m->i_pixel;
const int b_chroma_me = h->mb.b_chroma_me && i_pixel <= PIXEL_8x8;
- const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mvy_offset = MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
ALIGNED_ARRAY_16( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment
pixel *pixv = &h->mb.pic.p_fdec[2][4*x + 4*y*FDEC_STRIDE];
int ref0 = h->mb.cache.ref[0][s8];
int ref1 = h->mb.cache.ref[1][s8];
- const int mv0y_offset = h->mb.b_interlaced & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
- const int mv1y_offset = h->mb.b_interlaced & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mv0y_offset = MB_INTERLACED & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mv1y_offset = MB_INTERLACED & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
int stride[2][9];
int bm0x = m0->mv[0];
int bm0y = m0->mv[1];
const int bw = x264_pixel_size[m->i_pixel].w;
const int bh = x264_pixel_size[m->i_pixel].h;
const int i_pixel = m->i_pixel;
- const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mvy_offset = MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
uint64_t bcost = COST_MAX64;
int bmx = m->mv[0];
}
else
{
- var = ac_energy_plane( h, mb_x, mb_y, frame, 0, h->param.b_interlaced, 1 );
- var += ac_energy_plane( h, mb_x, mb_y, frame, 1, h->param.b_interlaced, 1 );
+ var = ac_energy_plane( h, mb_x, mb_y, frame, 0, PARAM_INTERLACED, 1 );
+ var += ac_energy_plane( h, mb_x, mb_y, frame, 1, PARAM_INTERLACED, 1 );
}
x264_emms();
return var;
static double row_bits_so_far( x264_t *h, int y )
{
double bits = 0;
- for( int i = h->i_threadslice_start+h->sh.b_mbaff; i <= y; i+=(h->sh.b_mbaff+1) )
+ for( int i = h->i_threadslice_start+SLICE_MBAFF; i <= y; i+=(SLICE_MBAFF+1) )
bits += h->fdec->i_row_bits[i];
return bits;
}
static double predict_row_size_sum( x264_t *h, int y, double qp )
{
double bits = row_bits_so_far(h, y);
- for( int i = y+1+h->sh.b_mbaff; i < h->i_threadslice_end; i+=(1+h->sh.b_mbaff) )
+ for( int i = y+1+SLICE_MBAFF; i < h->i_threadslice_end; i+=(1+SLICE_MBAFF) )
bits += predict_row_size( h, i, qp );
return bits;
}
x264_emms();
h->fdec->i_row_bits[y] += bits;
- if( h->sh.b_mbaff )
+ if( SLICE_MBAFF )
{
rc->qpa_rc += rc->qpm*2.0f;
rc->qpa_aq += h->mb.i_qp + h->mb.i_last_qp;
/* B-frames shouldn't use lower QP than their reference frames. */
if( h->sh.i_type == SLICE_TYPE_B )
{
- qp_min = X264_MAX( qp_min, X264_MAX( h->fref[0][0]->f_row_qp[y+1+h->sh.b_mbaff], h->fref[1][0]->f_row_qp[y+1+h->sh.b_mbaff] ) );
+ qp_min = X264_MAX( qp_min, X264_MAX( h->fref[0][0]->f_row_qp[y+1+SLICE_MBAFF], h->fref[1][0]->f_row_qp[y+1+SLICE_MBAFF] ) );
rc->qpm = X264_MAX( rc->qpm, qp_min );
}
for( int i = 0; i < (use_old_stats ? rc->rce->refs : h->i_ref[0]); i++ )
{
int refcount = use_old_stats ? rc->rce->refcount[i]
- : h->param.b_interlaced ? h->stat.frame.i_mb_count_ref[0][i*2]
+ : PARAM_INTERLACED ? h->stat.frame.i_mb_count_ref[0][i*2]
+ h->stat.frame.i_mb_count_ref[0][i*2+1]
: h->stat.frame.i_mb_count_ref[0][i];
if( fprintf( rc->p_stat_file_out, "%d ", refcount ) < 0 )
/* Really should be 15 bytes, but rounding up a byte saves some
* instructions and is faster, and copying extra data doesn't hurt. */
- COPY_CABAC_PART( significant_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
- COPY_CABAC_PART( last_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
+ COPY_CABAC_PART( significant_coeff_flag_offset[MB_INTERLACED][cat], 16 );
+ COPY_CABAC_PART( last_coeff_flag_offset[MB_INTERLACED][cat], 16 );
COPY_CABAC_PART( coeff_abs_level_m1_offset[cat], 10 );
cb->f8_bits_encoded = 0;
}
trellis_node_t *nodes_cur = nodes[0];
trellis_node_t *nodes_prev = nodes[1];
trellis_node_t *bnode;
- const int b_interlaced = h->mb.b_interlaced;
+ const int b_interlaced = MB_INTERLACED;
uint8_t *cabac_state_sig = &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
uint8_t *cabac_state_last = &h->cabac.state[ last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
const int f = 1 << 15; // no deadzone
if( h->param.b_cabac )
return quant_trellis_cabac( h, dct,
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[h->mb.b_interlaced],
+ NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[MB_INTERLACED],
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, 1, ctx_block_cat==DCT_CHROMA_DC ? 4 : 16, 0 );
return quant_trellis_cavlc( h, dct,
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[h->mb.b_interlaced],
+ NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[MB_INTERLACED],
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, 1, ctx_block_cat==DCT_CHROMA_DC ? 4 : 16, 0, 0 );
}
if( h->param.b_cabac )
return quant_trellis_cabac( h, dct,
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- x264_dct4_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan4[h->mb.b_interlaced],
+ x264_dct4_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan4[MB_INTERLACED],
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, 0, 16, idx );
return quant_trellis_cavlc( h, dct,
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- x264_dct4_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan4[h->mb.b_interlaced],
+ x264_dct4_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan4[MB_INTERLACED],
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, 0, 16, idx, 0 );
}
{
return quant_trellis_cabac( h, dct,
h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
- x264_dct8_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan8[h->mb.b_interlaced],
+ x264_dct8_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan8[MB_INTERLACED],
DCT_LUMA_8x8, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 64, idx );
}
{
int nz = quant_trellis_cavlc( h, dct,
h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
- x264_dct8_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan8[h->mb.b_interlaced],
+ x264_dct8_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan8[MB_INTERLACED],
DCT_LUMA_4x4, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 16, idx*4+i, 1 );
/* Set up nonzero count for future calls */
h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
if( !b_user_interlaced && info.interlaced )
{
+#if HAVE_INTERLACED
x264_cli_log( "x264", X264_LOG_WARNING, "input appears to be interlaced, enabling %cff interlaced mode.\n"
" If you want otherwise, use --no-interlaced or --%cff\n",
info.tff ? 't' : 'b', info.tff ? 'b' : 't' );
param->b_interlaced = 1;
param->b_tff = !!info.tff;
+#else
+ x264_cli_log( "x264", X264_LOG_WARNING, "input appears to be interlaced, but not compiled with interlaced support\n" );
+#endif
}
/* Automatically reduce reference frame count to match the user's target level