X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fh264.c;h=4fd8e874110d0c1b7295808d874c08f991c481fe;hb=47bbf8a71d22a5e2d5d0152b213b4ed6ec6778c6;hp=145c6b010f97e49d9411d98bb660c81c178aa3b7;hpb=3425501d3b09650c6b295ba225e02e97c002372c;p=ffmpeg diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 145c6b010f9..4fd8e874110 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -544,7 +544,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){ const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]); const int curr_mb_frame_flag = !IS_INTERLACED(mb_type); const int bottom = (s->mb_y & 1); - tprintf("fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag); + tprintf(s->avctx, "fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag); if (bottom ? !curr_mb_frame_flag // bottom macroblock : (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock @@ -812,34 +812,21 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){ *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101; } - //FIXME unify cleanup or sth - if(USES_LIST(left_type[0], list)){ - const int b_xy= h->mb2b_xy[left_xy[0]] + 3; - const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]]; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]]; - h->ref_cache[list][scan8[0] - 1 + 0*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)]; - h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1]>>1)]; - }else{ - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]= - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0; - h->ref_cache[list][scan8[0] - 1 + 0*8]= - h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - - if(USES_LIST(left_type[1], list)){ - const int b_xy= h->mb2b_xy[left_xy[1]] + 3; - const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]]; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]]; - h->ref_cache[list][scan8[0] - 1 + 2*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)]; - h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[3]>>1)]; - }else{ - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]= - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0; - h->ref_cache[list][scan8[0] - 1 + 2*8]= - h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; - assert((!left_type[0]) == (!left_type[1])); + for(i=0; i<2; i++){ + int cache_idx = scan8[0] - 1 + i*2*8; + if(USES_LIST(left_type[i], list)){ + const int b_xy= h->mb2b_xy[left_xy[i]] + 3; + const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1; + *(uint32_t*)h->mv_cache[list][cache_idx ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]]; + *(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]]; + h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)]; + h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)]; + }else{ + *(uint32_t*)h->mv_cache [list][cache_idx ]= + *(uint32_t*)h->mv_cache [list][cache_idx+8]= 0; + h->ref_cache[list][cache_idx ]= + h->ref_cache[list][cache_idx+8]= left_type[i] ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } } if((for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) && !FRAME_MBAFF) @@ -1073,7 +1060,7 @@ static inline int pred_intra_mode(H264Context *h, int n){ const int top = h->intra4x4_pred_mode_cache[index8 - 8]; const int min= FFMIN(left, top); - tprintf("mode:%d %d min:%d\n", left ,top, min); + tprintf(h->s.avctx, "mode:%d %d min:%d\n", left ,top, min); if(min<0) return DC_PRED; else return min; @@ -1120,18 +1107,18 @@ static inline int pred_non_zero_count(H264Context *h, int n){ if(i<64) i= (i+1)>>1; - tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31); + tprintf(h->s.avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31); return i&31; } static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){ const int topright_ref= h->ref_cache[list][ i - 8 + part_width ]; + MpegEncContext *s = &h->s; /* there is no consistent mapping of mvs to neighboring locations that will * make mbaff happy, so we can't move all this logic to fill_caches */ if(FRAME_MBAFF){ - MpegEncContext *s = &h->s; const uint32_t *mb_types = s->current_picture_ptr->mb_type; const int16_t *mv; *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0; @@ -1175,7 +1162,7 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in *C= h->mv_cache[list][ i - 8 + part_width ]; return topright_ref; }else{ - tprintf("topright MV not available\n"); + tprintf(s->avctx, "topright MV not available\n"); *C= h->mv_cache[list][ i - 8 - 1 ]; return h->ref_cache[list][ i - 8 - 1 ]; @@ -1210,7 +1197,7 @@ static inline void pred_motion(H264Context * const h, int n, int part_width, int diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width); match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref); - tprintf("pred_motion match_count=%d\n", match_count); + tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count); if(match_count > 1){ //most common *mx= mid_pred(A[0], B[0], C[0]); *my= mid_pred(A[1], B[1], C[1]); @@ -1235,7 +1222,7 @@ static inline void pred_motion(H264Context * const h, int n, int part_width, int } } - tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list); + tprintf(h->s.avctx, "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list); } /** @@ -1249,7 +1236,7 @@ static inline void pred_16x8_motion(H264Context * const h, int n, int list, int const int top_ref= h->ref_cache[list][ scan8[0] - 8 ]; const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; - tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); + tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); if(top_ref == ref){ *mx= B[0]; @@ -1260,7 +1247,7 @@ static inline void pred_16x8_motion(H264Context * const h, int n, int list, int const int left_ref= h->ref_cache[list][ scan8[8] - 1 ]; const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ]; - tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); + tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; @@ -1284,7 +1271,7 @@ static inline void pred_8x16_motion(H264Context * const h, int n, int list, int const int left_ref= h->ref_cache[list][ scan8[0] - 1 ]; const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; - tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); + tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; @@ -1297,7 +1284,7 @@ static inline void pred_8x16_motion(H264Context * const h, int n, int list, int diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2); - tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); + tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); if(diagonal_ref == ref){ *mx= C[0]; @@ -1314,7 +1301,7 @@ static inline void pred_pskip_motion(H264Context * const h, int * const mx, int const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; - tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); + tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0) @@ -1335,13 +1322,13 @@ static inline void direct_dist_scale_factor(H264Context * const h){ int i; for(i=0; iref_count[0]; i++){ int poc0 = h->ref_list[0][i].poc; - int td = clip(poc1 - poc0, -128, 127); + int td = av_clip(poc1 - poc0, -128, 127); if(td == 0 /* FIXME || pic0 is a long-term ref */){ h->dist_scale_factor[i] = 256; }else{ - int tb = clip(poc - poc0, -128, 127); + int tb = av_clip(poc - poc0, -128, 127); int tx = (16384 + (FFABS(td) >> 1)) / td; - h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023); + h->dist_scale_factor[i] = av_clip((tb*tx + 32) >> 6, -1024, 1023); } } if(FRAME_MBAFF){ @@ -1421,7 +1408,7 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){ if(MB_FIELD) *mb_type |= MB_TYPE_INTERLACED; - tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col); + tprintf(s->avctx, "mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col); if(h->direct_spatial_mv_pred){ int ref[2]; @@ -1468,6 +1455,8 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){ } if(IS_16X16(*mb_type)){ + int a=0, b=0; + fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); if(!IS_INTRA(mb_type_col) @@ -1475,17 +1464,15 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){ || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1 && (h->x264_build>33 || !h->x264_build)))){ if(ref[0] > 0) - fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); - else - fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); + a= pack16to32(mv[0][0],mv[0][1]); if(ref[1] > 0) - fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); - else - fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); + b= pack16to32(mv[1][0],mv[1][1]); }else{ - fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4); - fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4); + a= pack16to32(mv[0][0],mv[0][1]); + b= pack16to32(mv[1][0],mv[1][1]); } + fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4); + fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4); }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; @@ -1629,11 +1616,11 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){ /* one-to-one mv scaling */ if(IS_16X16(*mb_type)){ + int ref, mv0, mv1; + fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); if(IS_INTRA(mb_type_col)){ - fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); - fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); - fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); + ref=mv0=mv1=0; }else{ const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0]] : map_col_to_list0[1][l1ref1[0]]; @@ -1642,10 +1629,13 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){ int mv_l0[2]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; - fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1); - fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4); - fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4); + ref= ref0; + mv0= pack16to32(mv_l0[0],mv_l0[1]); + mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); } + fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); + fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4); + fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4); }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; @@ -1813,11 +1803,11 @@ static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *c * identifies the exact end of the bitstream * @return the length of the trailing, or 0 if damaged */ -static int decode_rbsp_trailing(uint8_t *src){ +static int decode_rbsp_trailing(H264Context *h, uint8_t *src){ int v= *src; int r; - tprintf("rbsp trailing %X\n", v); + tprintf(h->s.avctx, "rbsp trailing %X\n", v); for(r=1; r<9; r++){ if(v&1) return r; @@ -1958,12 +1948,12 @@ static void chroma_dc_dct_c(DCTELEM *block){ */ static inline int get_chroma_qp(int chroma_qp_index_offset, int qscale){ - return chroma_qp[clip(qscale + chroma_qp_index_offset, 0, 51)]; + return chroma_qp[av_clip(qscale + chroma_qp_index_offset, 0, 51)]; } //FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close -//FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away) -static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){ +//FIXME check that gcc inlines this (and optimizes intra & separate_dc stuff away) +static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int separate_dc){ int i; const int * const quant_table= quant_coeff[qscale]; const int bias= intra ? (1<mb_x > 0); int deblock_top = (s->mb_y > 1); - tprintf("xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize); + tprintf(s->avctx, "xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize); src_y -= 2 * linesize + 1; src_cb -= 2 * uvlinesize + 1; @@ -3507,7 +3497,7 @@ b= t; } } -static void hl_decode_mb(H264Context *h){ +static void av_always_inline hl_decode_mb_internal(H264Context *h, int simple){ MpegEncContext * const s = &h->s; const int mb_x= s->mb_x; const int mb_y= s->mb_y; @@ -3518,13 +3508,10 @@ static void hl_decode_mb(H264Context *h){ int i; int *block_offset = &h->block_offset[0]; const unsigned int bottom = mb_y & 1; - const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass); + const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass), is_h264 = (simple || s->codec_id == CODEC_ID_H264); void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride); void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride); - if(!s->decode) - return; - dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16; dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; @@ -3532,7 +3519,7 @@ static void hl_decode_mb(H264Context *h){ s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4); s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2); - if (MB_FIELD) { + if (!simple && MB_FIELD) { linesize = h->mb_linesize = s->linesize * 2; uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; block_offset = &h->block_offset[24]; @@ -3576,7 +3563,7 @@ static void hl_decode_mb(H264Context *h){ idct_add = s->dsp.h264_idct_add; } - if(FRAME_MBAFF && h->deblocking_filter && IS_INTRA(mb_type) + if(!simple && FRAME_MBAFF && h->deblocking_filter && IS_INTRA(mb_type) && (!bottom || !IS_INTRA(s->current_picture.mb_type[mb_xy-s->mb_stride]))){ int mbt_y = mb_y&~1; uint8_t *top_y = s->current_picture.data[0] + (mbt_y * 16* s->linesize ) + mb_x * 16; @@ -3585,7 +3572,7 @@ static void hl_decode_mb(H264Context *h){ xchg_pair_border(h, top_y, top_cb, top_cr, s->linesize, s->uvlinesize, 1); } - if (IS_INTRA_PCM(mb_type)) { + if (!simple && IS_INTRA_PCM(mb_type)) { unsigned int x, y; // The pixels are stored in h->mb array in the same order as levels, @@ -3613,23 +3600,23 @@ static void hl_decode_mb(H264Context *h){ } } else { if(IS_INTRA(mb_type)){ - if(h->deblocking_filter && !FRAME_MBAFF) + if(h->deblocking_filter && (simple || !FRAME_MBAFF)) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1); - if(!(s->flags&CODEC_FLAG_GRAY)){ + if(simple || !(s->flags&CODEC_FLAG_GRAY)){ h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize); h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize); } if(IS_INTRA4x4(mb_type)){ - if(!s->encoding){ + if(simple || !s->encoding){ if(IS_8x8DCT(mb_type)){ for(i=0; i<16; i+=4){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; const int nnz = h->non_zero_count_cache[ scan8[i] ]; h->pred8x8l[ dir ](ptr, (h->topleft_samples_available<topright_samples_available<<(i+1))&0x8000, linesize); + (h->topright_samples_available<mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); @@ -3658,7 +3645,7 @@ static void hl_decode_mb(H264Context *h){ h->pred4x4[ dir ](ptr, topright, linesize); nnz = h->non_zero_count_cache[ scan8[i] ]; if(nnz){ - if(s->codec_id == CODEC_ID_H264){ + if(is_h264){ if(nnz == 1 && h->mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); else @@ -3670,15 +3657,15 @@ static void hl_decode_mb(H264Context *h){ } }else{ h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize); - if(s->codec_id == CODEC_ID_H264){ + if(is_h264){ if(!transform_bypass) h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[IS_INTRA(mb_type) ? 0:3][s->qscale][0]); }else svq3_luma_dc_dequant_idct_c(h->mb, s->qscale); } - if(h->deblocking_filter && !FRAME_MBAFF) + if(h->deblocking_filter && (simple || !FRAME_MBAFF)) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0); - }else if(s->codec_id == CODEC_ID_H264){ + }else if(is_h264){ hl_motion(h, dest_y, dest_cb, dest_cr, s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, @@ -3687,7 +3674,7 @@ static void hl_decode_mb(H264Context *h){ if(!IS_INTRA4x4(mb_type)){ - if(s->codec_id == CODEC_ID_H264){ + if(is_h264){ if(IS_INTRA16x16(mb_type)){ for(i=0; i<16; i++){ if(h->non_zero_count_cache[ scan8[i] ]) @@ -3717,7 +3704,7 @@ static void hl_decode_mb(H264Context *h){ } } - if(!(s->flags&CODEC_FLAG_GRAY)){ + if(simple || !(s->flags&CODEC_FLAG_GRAY)){ uint8_t *dest[2] = {dest_cb, dest_cr}; if(transform_bypass){ idct_add = idct_dc_add = s->dsp.add_pixels4; @@ -3727,7 +3714,7 @@ static void hl_decode_mb(H264Context *h){ chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp, h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp][0]); chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp, h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp][0]); } - if(s->codec_id == CODEC_ID_H264){ + if(is_h264){ for(i=16; i<16+8; i++){ if(h->non_zero_count_cache[ scan8[i] ]) idct_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize); @@ -3745,7 +3732,7 @@ static void hl_decode_mb(H264Context *h){ } } if(h->deblocking_filter) { - if (FRAME_MBAFF) { + if (!simple && FRAME_MBAFF) { //FIXME try deblocking one mb at a time? // the reduction in load/storing mvs and such might outweigh the extra backup/xchg_border const int mb_y = s->mb_y - 1; @@ -3765,18 +3752,18 @@ static void hl_decode_mb(H264Context *h){ // deblock a pair // top s->mb_y--; - tprintf("call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y); + tprintf(h->s.avctx, "call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y); fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy]); filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize); // bottom s->mb_y++; - tprintf("call mbaff filter_mb\n"); + tprintf(h->s.avctx, "call mbaff filter_mb\n"); fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy+s->mb_stride]); filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { - tprintf("call filter_mb\n"); + tprintf(h->s.avctx, "call filter_mb\n"); backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize); fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); @@ -3784,6 +3771,36 @@ static void hl_decode_mb(H264Context *h){ } } +/** + * Process a macroblock; this case avoids checks for expensive uncommon cases. + */ +static void hl_decode_mb_simple(H264Context *h){ + hl_decode_mb_internal(h, 1); +} + +/** + * Process a macroblock; this handles edge cases, such as interlacing. + */ +static void av_noinline hl_decode_mb_complex(H264Context *h){ + hl_decode_mb_internal(h, 0); +} + +static void hl_decode_mb(H264Context *h){ + MpegEncContext * const s = &h->s; + const int mb_x= s->mb_x; + const int mb_y= s->mb_y; + const int mb_xy= mb_x + mb_y*s->mb_stride; + const int mb_type= s->current_picture.mb_type[mb_xy]; + int is_complex = FRAME_MBAFF || MB_FIELD || IS_INTRA_PCM(mb_type) || s->codec_id != CODEC_ID_H264 || (s->flags&CODEC_FLAG_GRAY) || s->encoding; + + if(!s->decode) + return; + + if (is_complex) + hl_decode_mb_complex(h); + else hl_decode_mb_simple(h); +} + /** * fills the default_ref_list. */ @@ -3814,7 +3831,7 @@ static int fill_default_ref_list(H264Context *h){ limit= best_poc; sorted_short_ref[out_i]= *h->short_ref[best_i]; - tprintf("sorted poc: %d->%d poc:%d fn:%d\n", best_i, out_i, sorted_short_ref[out_i].poc, sorted_short_ref[out_i].frame_num); + tprintf(h->s.avctx, "sorted poc: %d->%d poc:%d fn:%d\n", best_i, out_i, sorted_short_ref[out_i].poc, sorted_short_ref[out_i].frame_num); if (-1 == smallest_poc_greater_than_current) { if (h->short_ref[best_i]->poc >= s->current_picture_ptr->poc) { smallest_poc_greater_than_current = out_i; @@ -3826,7 +3843,7 @@ static int fill_default_ref_list(H264Context *h){ if(s->picture_structure == PICT_FRAME){ if(h->slice_type==B_TYPE){ int list; - tprintf("current poc: %d, smallest_poc_greater_than_current: %d\n", s->current_picture_ptr->poc, smallest_poc_greater_than_current); + tprintf(h->s.avctx, "current poc: %d, smallest_poc_greater_than_current: %d\n", s->current_picture_ptr->poc, smallest_poc_greater_than_current); // find the largest poc for(list=0; list<2; list++){ @@ -3889,11 +3906,11 @@ static int fill_default_ref_list(H264Context *h){ } #ifdef TRACE for (i=0; iref_count[0]; i++) { - tprintf("List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]); + tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]); } if(h->slice_type==B_TYPE){ for (i=0; iref_count[1]; i++) { - tprintf("List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[0][i].data[0]); + tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[0][i].data[0]); } } #endif @@ -4105,11 +4122,11 @@ static void implicit_weight_table(H264Context *h){ int poc0 = h->ref_list[0][ref0].poc; for(ref1=0; ref1 < h->ref_count[1]; ref1++){ int poc1 = h->ref_list[1][ref1].poc; - int td = clip(poc1 - poc0, -128, 127); + int td = av_clip(poc1 - poc0, -128, 127); if(td){ - int tb = clip(cur_poc - poc0, -128, 127); + int tb = av_clip(cur_poc - poc0, -128, 127); int tx = (16384 + (FFABS(td) >> 1)) / td; - int dist_scale_factor = clip((tb*tx + 32) >> 6, -1024, 1023) >> 2; + int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2; if(dist_scale_factor < -64 || dist_scale_factor > 128) h->implicit_weight[ref0][ref1] = 32; else @@ -4499,6 +4516,11 @@ static int decode_slice_header(H264Context *h){ first_mb_in_slice= get_ue_golomb(&s->gb); + if((s->flags2 & CODEC_FLAG2_CHUNKS) && first_mb_in_slice == 0){ + h->slice_num = 0; + s->current_picture_ptr= NULL; + } + slice_type= get_ue_golomb(&s->gb); if(slice_type > 9){ av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y); @@ -4898,7 +4920,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in } trailing_ones= coeff_token&3; - tprintf("trailing:%d, total:%d\n", trailing_ones, total_coeff); + tprintf(h->s.avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff); assert(total_coeff<=16); for(i=0; idsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong? - tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); + tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); cbp = 0; /* avoid warning. FIXME: find a solution without slowing down the code */ if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){ @@ -5146,21 +5168,21 @@ decode_intra_mb: for(y=0; y<16; y++){ const int index= 4*(y&3) + 32*((y>>2)&1) + 128*(y>>3); for(x=0; x<16; x++){ - tprintf("LUMA ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); + tprintf(s->avctx, "LUMA ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*((x>>2)&1) + 64*(x>>3)]= get_bits(&s->gb, 8); } } for(y=0; y<8; y++){ const int index= 256 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ - tprintf("CHROMA U ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); + tprintf(s->avctx, "CHROMA U ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8); } } for(y=0; y<8; y++){ const int index= 256 + 64 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ - tprintf("CHROMA V ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); + tprintf(s->avctx, "CHROMA V ICPM LEVEL (%3d)\n", show_bits(&s->gb, 8)); h->mb[index + (x&3) + 16*(x>>2)]= get_bits(&s->gb, 8); } } @@ -5297,7 +5319,7 @@ decode_intra_mb: pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= @@ -5328,88 +5350,91 @@ decode_intra_mb: int list, mx, my, i; //FIXME we should set ref_idx_l? to 0 if we use that later ... if(IS_16X16(mb_type)){ - for(list=0; list<2; list++){ - if(h->ref_count[list]>0){ + for(list=0; listlist_count; list++){ + unsigned int val; if(IS_DIR(mb_type, 0, list)){ - unsigned int val= get_te0_golomb(&s->gb, h->ref_count[list]); + val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } - fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1); }else - fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (LIST_NOT_USED&0xFF), 1); - } + val= LIST_NOT_USED&0xFF; + fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1); } - for(list=0; list<2; list++){ + for(list=0; listlist_count; list++){ + unsigned int val; if(IS_DIR(mb_type, 0, list)){ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); - fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); + val= pack16to32(mx,my); }else - fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, 0, 4); + val=0; + fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, val, 4); } } else if(IS_16X8(mb_type)){ - for(list=0; list<2; list++){ - if(h->ref_count[list]>0){ + for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ + unsigned int val; if(IS_DIR(mb_type, i, list)){ - unsigned int val= get_te0_golomb(&s->gb, h->ref_count[list]); + val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } - fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1); }else - fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1); + val= LIST_NOT_USED&0xFF; + fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1); } - } } - for(list=0; list<2; list++){ + for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ + unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); - fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); + val= pack16to32(mx,my); }else - fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4); + val=0; + fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4); } } }else{ assert(IS_8X16(mb_type)); - for(list=0; list<2; list++){ - if(h->ref_count[list]>0){ + for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ + unsigned int val; if(IS_DIR(mb_type, i, list)){ //FIXME optimize - unsigned int val= get_te0_golomb(&s->gb, h->ref_count[list]); + val= get_te0_golomb(&s->gb, h->ref_count[list]); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } - fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1); }else - fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1); + val= LIST_NOT_USED&0xFF; + fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1); } - } } - for(list=0; list<2; list++){ + for(list=0; listlist_count; list++){ for(i=0; i<2; i++){ + unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); - fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); + val= pack16to32(mx,my); }else - fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4); + val=0; + fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4); } } } @@ -5761,7 +5786,7 @@ static int decode_cabac_mb_cbp_luma( H264Context *h) { if( h->slice_table[h->top_mb_xy] == h->slice_num ) { cbp_b = h->top_cbp; - tprintf("cbp_b = top_cbp = %x\n", cbp_b); + tprintf(h->s.avctx, "cbp_b = top_cbp = %x\n", cbp_b); } for( i8x8 = 0; i8x8 < 4; i8x8++ ) { @@ -5776,7 +5801,7 @@ static int decode_cabac_mb_cbp_luma( H264Context *h) { cbp_a = cbp; else if( h->slice_table[h->left_mb_xy[0]] == h->slice_num ) { cbp_a = h->left_cbp; - tprintf("cbp_a = left_cbp = %x\n", cbp_a); + tprintf(h->s.avctx, "cbp_a = left_cbp = %x\n", cbp_a); } if( y > 0 ) @@ -6200,7 +6225,7 @@ static int decode_mb_cabac(H264Context *h) { s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong?) - tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); + tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE ) { int skip; /* a skipped mb needs the aff flag from the following mb */ @@ -6292,21 +6317,21 @@ decode_intra_mb: for(y=0; y<16; y++){ const int index= 4*(y&3) + 32*((y>>2)&1) + 128*(y>>3); for(x=0; x<16; x++){ - tprintf("LUMA ICPM LEVEL (%3d)\n", *ptr); + tprintf(s->avctx, "LUMA ICPM LEVEL (%3d)\n", *ptr); h->mb[index + (x&3) + 16*((x>>2)&1) + 64*(x>>3)]= *ptr++; } } for(y=0; y<8; y++){ const int index= 256 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ - tprintf("CHROMA U ICPM LEVEL (%3d)\n", *ptr); + tprintf(s->avctx, "CHROMA U ICPM LEVEL (%3d)\n", *ptr); h->mb[index + (x&3) + 16*(x>>2)]= *ptr++; } } for(y=0; y<8; y++){ const int index= 256 + 64 + 4*(y&3) + 32*(y>>2); for(x=0; x<8; x++){ - tprintf("CHROMA V ICPM LEVEL (%3d)\n", *ptr); + tprintf(s->avctx, "CHROMA V ICPM LEVEL (%3d)\n", *ptr); h->mb[index + (x&3) + 16*(x>>2)]= *ptr++; } } @@ -6428,7 +6453,7 @@ decode_intra_mb: mx = mpx + decode_cabac_mb_mvd( h, list, index, 0 ); my = mpy + decode_cabac_mb_mvd( h, list, index, 1 ); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= @@ -6475,22 +6500,20 @@ decode_intra_mb: } else { int list, mx, my, i, mpx, mpy; if(IS_16X16(mb_type)){ - for(list=0; list<2; list++){ + for(list=0; listlist_count; list++){ if(IS_DIR(mb_type, 0, list)){ - if(h->ref_count[list] > 0 ){ const int ref = h->ref_count[list] > 1 ? decode_cabac_mb_ref( h, list, 0 ) : 0; fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1); - } }else - fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1); + fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1); //FIXME factorize and the other fill_rect below too } - for(list=0; list<2; list++){ + for(list=0; listlist_count; list++){ if(IS_DIR(mb_type, 0, list)){ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mpx, &mpy); mx = mpx + decode_cabac_mb_mvd( h, list, 0, 0 ); my = mpy + decode_cabac_mb_mvd( h, list, 0, 1 ); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx-mpx,my-mpy), 4); fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); @@ -6514,7 +6537,7 @@ decode_intra_mb: pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mpx, &mpy); mx = mpx + decode_cabac_mb_mvd( h, list, 8*i, 0 ); my = mpy + decode_cabac_mb_mvd( h, list, 8*i, 1 ); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx-mpx,my-mpy), 4); fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); @@ -6542,7 +6565,7 @@ decode_intra_mb: mx = mpx + decode_cabac_mb_mvd( h, list, 4*i, 0 ); my = mpy + decode_cabac_mb_mvd( h, list, 4*i, 1 ); - tprintf("final mv:%d %d\n", mx, my); + tprintf(s->avctx, "final mv:%d %d\n", mx, my); fill_rectangle(h->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx-mpx,my-mpy), 4); fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); }else{ @@ -6735,7 +6758,7 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int16_t b pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } - tprintf("filter_mb_edgev i:%d d:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, p2, p1, p0, q0, q1, q2, pix[-2], pix[-1], pix[0], pix[1]); + tprintf(h->s.avctx, "filter_mb_edgev i:%d d:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, p2, p1, p0, q0, q1, q2, pix[-2], pix[-1], pix[0], pix[1]); } pix += stride; } @@ -6796,18 +6819,18 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int int i_delta; if( FFABS( p2 - p0 ) < beta ) { - pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); + pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( FFABS( q2 - q0 ) < beta ) { - pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); + pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } - i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); + i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ + tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; @@ -6850,7 +6873,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } - tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); + tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } @@ -6884,11 +6907,11 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { - const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); + pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ + tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; @@ -6902,7 +6925,7 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ - tprintf("filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); + tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } @@ -6961,7 +6984,7 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int16_t b pix[-1*pix_next] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0*pix_next] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } - tprintf("filter_mb_edgeh i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, bS[i], p2, p1, p0, q0, q1, q2, pix[-2*pix_next], pix[-pix_next], pix[0], pix[pix_next]); + tprintf(h->s.avctx, "filter_mb_edgeh i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, bS[i], p2, p1, p0, q0, q1, q2, pix[-2*pix_next], pix[-pix_next], pix[0], pix[pix_next]); } pix++; } @@ -7164,8 +7187,8 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 get_chroma_qp( h->pps.chroma_qp_index_offset, mbn1_qp ) + 1 ) >> 1; /* Filter edge */ - tprintf("filter mb:%d/%d MBAFF, QPy:%d/%d, QPc:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], chroma_qp[0], chroma_qp[1], linesize, uvlinesize); - { int i; for (i = 0; i < 8; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); } + tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPc:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], chroma_qp[0], chroma_qp[1], linesize, uvlinesize); + { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_mbaff_edgev ( h, &img_y [0], linesize, bS, qp ); filter_mb_mbaff_edgecv( h, &img_cb[0], uvlinesize, bS, chroma_qp ); filter_mb_mbaff_edgecv( h, &img_cr[0], uvlinesize, bS, chroma_qp ); @@ -7227,8 +7250,8 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; - tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); - { int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); } + tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); + { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_edgeh( h, &img_y[j*linesize], tmp_linesize, bS, qp ); chroma_qp = ( h->chroma_qp + get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1; @@ -7324,9 +7347,9 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; - //tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]); - tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); - { int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); } + //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]); + tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); + { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { filter_mb_edgev( h, &img_y[4*edge], linesize, bS, qp ); if( (edge&1) == 0 ) { @@ -7369,9 +7392,9 @@ static int decode_slice(H264Context *h){ for( i= 0; i < 460; i++ ) { int pre; if( h->slice_type == I_TYPE ) - pre = clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 ); + pre = av_clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 ); else - pre = clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 ); + pre = av_clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 ); if( pre <= 63 ) h->cabac_state[i] = 2 * ( 63 - pre ) + 0; @@ -7413,7 +7436,7 @@ static int decode_slice(H264Context *h){ } if( eos || s->mb_y >= s->mb_height ) { - tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); + tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return 0; } @@ -7448,7 +7471,7 @@ static int decode_slice(H264Context *h){ ++s->mb_y; } if(s->mb_y >= s->mb_height){ - tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); + tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); if(get_bits_count(&s->gb) == s->gb.size_in_bits ) { ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); @@ -7463,7 +7486,7 @@ static int decode_slice(H264Context *h){ } if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){ - tprintf("slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); + tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits); if(get_bits_count(&s->gb) == s->gb.size_in_bits ){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); @@ -7972,36 +7995,45 @@ static int find_frame_end(H264Context *h, const uint8_t *buf, int buf_size){ //printf("first %02X%02X%02X%02X\n", buf[0], buf[1],buf[2],buf[3]); // mb_addr= pc->mb_addr - 1; state= pc->state; - for(i=0; i<=buf_size; i++){ - if((state&0xFFFFFF1F) == 0x101 || (state&0xFFFFFF1F) == 0x102 || (state&0xFFFFFF1F) == 0x105){ - tprintf("find_frame_end new startcode = %08x, frame_start_found = %d, pos = %d\n", state, pc->frame_start_found, i); - if(pc->frame_start_found){ - // If there isn't one more byte in the buffer - // the test on first_mb_in_slice cannot be done yet - // do it at next call. - if (i >= buf_size) break; - if (buf[i] & 0x80) { - // first_mb_in_slice is 0, probably the first nal of a new - // slice - tprintf("find_frame_end frame_end_found, state = %08x, pos = %d\n", state, i); - pc->state=-1; + if(state>13) + state= 7; + + for(i=0; i7, 1->4, 0->5 + else if(buf[i]) state = 7; + else state>>=1; //2->1, 1->0, 0->0 + }else if(state<=5){ + int v= buf[i] & 0x1F; + if(v==7 || v==8 || v==9){ + if(pc->frame_start_found){ + i++; +found: + pc->state=7; pc->frame_start_found= 0; - return i-4; + return i-(state&5); } + }else if(v==1 || v==2 || v==5){ + if(pc->frame_start_found){ + state+=8; + continue; + }else + pc->frame_start_found = 1; } - pc->frame_start_found = 1; - } - if((state&0xFFFFFF1F) == 0x107 || (state&0xFFFFFF1F) == 0x108 || (state&0xFFFFFF1F) == 0x109){ - if(pc->frame_start_found){ - pc->state=-1; - pc->frame_start_found= 0; - return i-4; - } + state= 7; + }else{ + if(buf[i] & 0x80) + goto found; + state= 7; } - if (istate= state; return END_NOT_FOUND; } @@ -8016,12 +8048,20 @@ static int h264_parse(AVCodecParserContext *s, ParseContext *pc = &h->s.parse_context; int next; - next= find_frame_end(h, buf, buf_size); + if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){ + next= buf_size; + }else{ + next= find_frame_end(h, buf, buf_size); + + if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) { + *poutbuf = NULL; + *poutbuf_size = 0; + return buf_size; + } - if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) { - *poutbuf = NULL; - *poutbuf_size = 0; - return buf_size; + if(next<0){ + find_frame_end(h, &pc->buffer[pc->last_index + next], -next); //update state + } } *poutbuf = (uint8_t *)buf; @@ -8064,8 +8104,11 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ av_log(NULL, AV_LOG_ERROR,"%02X ", buf[i]); } #endif - h->slice_num = 0; - s->current_picture_ptr= NULL; + if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){ + h->slice_num = 0; + s->current_picture_ptr= NULL; + } + for(;;){ int consumed; int dst_length; @@ -8106,7 +8149,7 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ } while(ptr[dst_length - 1] == 0 && dst_length > 1) dst_length--; - bit_length= 8*dst_length - decode_rbsp_trailing(ptr + dst_length - 1); + bit_length= 8*dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1); if(s->avctx->debug&FF_DEBUG_STARTCODE){ av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", h->nal_unit_type, buf_index, buf_size, dst_length); @@ -8201,24 +8244,6 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ } } - if(!s->current_picture_ptr) return buf_index; //no frame - - s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; - s->current_picture_ptr->pict_type= s->pict_type; - - h->prev_frame_num_offset= h->frame_num_offset; - h->prev_frame_num= h->frame_num; - if(s->current_picture_ptr->reference){ - h->prev_poc_msb= h->poc_msb; - h->prev_poc_lsb= h->poc_lsb; - } - if(s->current_picture_ptr->reference) - execute_ref_pic_marking(h, h->mmco, h->mmco_index); - - ff_er_frame_end(s); - - MPV_frame_end(s); - return buf_index; } @@ -8253,6 +8278,26 @@ static int decode_frame(AVCodecContext *avctx, /* no supplementary picture */ if (buf_size == 0) { + Picture *out; + int i, out_idx; + +//FIXME factorize this with the output code below + out = h->delayed_pic[0]; + out_idx = 0; + for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++) + if(h->delayed_pic[i]->poc < out->poc){ + out = h->delayed_pic[i]; + out_idx = i; + } + + for(i=out_idx; h->delayed_pic[i]; i++) + h->delayed_pic[i] = h->delayed_pic[i+1]; + + if(out){ + *data_size = sizeof(AVFrame); + *pict= *(AVFrame*)out; + } + return 0; } @@ -8314,23 +8359,41 @@ static int decode_frame(AVCodecContext *avctx, if(buf_index < 0) return -1; - //FIXME do something with unavailable reference frames - -// if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_index, buf_size); - if(!s->current_picture_ptr){ - av_log(h->s.avctx, AV_LOG_DEBUG, "error, NO frame\n"); + if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){ + av_log(avctx, AV_LOG_ERROR, "no frame!\n"); return -1; } - { + if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){ Picture *out = s->current_picture_ptr; + Picture *cur = s->current_picture_ptr; + Picture *prev = h->delayed_output_pic; + int i, pics, cross_idr, out_of_order, out_idx; + + s->mb_y= 0; + + s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; + s->current_picture_ptr->pict_type= s->pict_type; + + h->prev_frame_num_offset= h->frame_num_offset; + h->prev_frame_num= h->frame_num; + if(s->current_picture_ptr->reference){ + h->prev_poc_msb= h->poc_msb; + h->prev_poc_lsb= h->poc_lsb; + } + if(s->current_picture_ptr->reference) + execute_ref_pic_marking(h, h->mmco, h->mmco_index); + + ff_er_frame_end(s); + + MPV_frame_end(s); + + //FIXME do something with unavailable reference frames + #if 0 //decode order *data_size = sizeof(AVFrame); #else /* Sort B-frames into display order */ - Picture *cur = s->current_picture_ptr; - Picture *prev = h->delayed_output_pic; - int i, pics, cross_idr, out_of_order, out_idx; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){