X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fh263.c;h=6d5ffc0b237a8cae8fba307bb94b3e6ebe5df396;hb=f0ce9913d0974ac0c7371a62c3324dd2863b5d1b;hp=77a1bb828b16b2671a8423ac3612a2db5a8efac7;hpb=124e28847b95a70724399c8473dd778b5c4c8ffc;p=ffmpeg diff --git a/libavcodec/h263.c b/libavcodec/h263.c index 77a1bb828b1..6d5ffc0b237 100644 --- a/libavcodec/h263.c +++ b/libavcodec/h263.c @@ -27,21 +27,18 @@ * h263/mpeg4 codec. */ -//#define DEBUG #include -#include "dsputil.h" #include "avcodec.h" #include "mpegvideo.h" #include "h263.h" #include "h263data.h" #include "mathops.h" +#include "mpegutils.h" #include "unary.h" #include "flv.h" #include "mpeg4video.h" -//#undef NDEBUG -//#include uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; @@ -52,7 +49,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){ const int wrap = s->b8_stride; const int xy = s->block_index[0]; - s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped; + s->current_picture.mbskip_table[mb_xy] = s->mb_skipped; if(s->mv_type != MV_TYPE_8X8){ int motion_x, motion_y; @@ -71,34 +68,34 @@ void ff_h263_update_motion_val(MpegEncContext * s){ s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; } - s->current_picture.f.ref_index[0][4*mb_xy ] = - s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; - s->current_picture.f.ref_index[0][4*mb_xy + 2] = - s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; + s->current_picture.ref_index[0][4*mb_xy ] = + s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; + s->current_picture.ref_index[0][4*mb_xy + 2] = + s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; } /* no update if 8X8 because it has been done during parsing */ - s->current_picture.f.motion_val[0][xy][0] = motion_x; - s->current_picture.f.motion_val[0][xy][1] = motion_y; - s->current_picture.f.motion_val[0][xy + 1][0] = motion_x; - s->current_picture.f.motion_val[0][xy + 1][1] = motion_y; - s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x; - s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y; - s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x; - s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y; + s->current_picture.motion_val[0][xy][0] = motion_x; + s->current_picture.motion_val[0][xy][1] = motion_y; + s->current_picture.motion_val[0][xy + 1][0] = motion_x; + s->current_picture.motion_val[0][xy + 1][1] = motion_y; + s->current_picture.motion_val[0][xy + wrap][0] = motion_x; + s->current_picture.motion_val[0][xy + wrap][1] = motion_y; + s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x; + s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y; } if(s->encoding){ //FIXME encoding MUST be cleaned up if (s->mv_type == MV_TYPE_8X8) - s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; + s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; else if(s->mb_intra) - s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA; + s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA; else - s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; + s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; } } -int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr) +int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr) { int x, y, wrap, a, c, pred_dc; int16_t *dc_val; @@ -154,20 +151,20 @@ void ff_h263_loop_filter(MpegEncContext * s){ Diag Top Left Center */ - if (!IS_SKIP(s->current_picture.f.mb_type[xy])) { + if (!IS_SKIP(s->current_picture.mb_type[xy])) { qp_c= s->qscale; - s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c); - s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c); + s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize, linesize, qp_c); + s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c); }else qp_c= 0; if(s->mb_y){ int qp_dt, qp_tt, qp_tc; - if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride])) + if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride])) qp_tt=0; else - qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride]; + qp_tt = s->current_picture.qscale_table[xy - s->mb_stride]; if(qp_c) qp_tc= qp_c; @@ -176,57 +173,57 @@ void ff_h263_loop_filter(MpegEncContext * s){ if(qp_tc){ const int chroma_qp= s->chroma_qscale_table[qp_tc]; - s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc); - s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc); + s->h263dsp.h263_v_loop_filter(dest_y, linesize, qp_tc); + s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc); - s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp); - s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp); + s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp); + s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp); } if(qp_tt) - s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt); + s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt); if(s->mb_x){ - if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride])) + if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride])) qp_dt= qp_tt; else - qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride]; + qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride]; if(qp_dt){ const int chroma_qp= s->chroma_qscale_table[qp_dt]; - s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt); - s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp); - s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp); + s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize, linesize, qp_dt); + s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp); + s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp); } } } if(qp_c){ - s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c); + s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c); if(s->mb_y + 1 == s->mb_height) - s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c); + s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c); } if(s->mb_x){ int qp_lc; - if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1])) + if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1])) qp_lc= qp_c; else - qp_lc = s->current_picture.f.qscale_table[xy - 1]; + qp_lc = s->current_picture.qscale_table[xy - 1]; if(qp_lc){ - s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); + s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); if(s->mb_y + 1 == s->mb_height){ const int chroma_qp= s->chroma_qscale_table[qp_lc]; - s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc); - s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp); - s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp); + s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc); + s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp); + s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp); } } } } -void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) +void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n) { int x, y, wrap, a, c, pred_dc, scale, i; int16_t *dc_val, *ac_val, *ac_val1; @@ -313,15 +310,15 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]]; } -int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir, - int *px, int *py) +int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir, + int *px, int *py) { int wrap; int16_t *A, *B, *C, (*mot_val)[2]; static const int off[4]= {2, 1, 1, -1}; wrap = s->b8_stride; - mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block]; + mot_val = s->current_picture.motion_val[dir] + s->block_index[block]; A = mot_val[ - 1]; /* special case for first (slice) line */