-
- if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
- (s->avctx->debug_mv)) {
- const int shift = 1 + s->quarter_sample;
- int mb_y;
- uint8_t *ptr;
- int i;
- int h_chroma_shift, v_chroma_shift, block_height;
- const int width = s->avctx->width;
- const int height = s->avctx->height;
- const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
- const int mv_stride = (s->mb_width << mv_sample_log2) +
- (s->codec_id == CODEC_ID_H264 ? 0 : 1);
- s->low_delay = 0; // needed to see the vectors without trashing the buffers
-
- avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
- &h_chroma_shift, &v_chroma_shift);
- for (i = 0; i < 3; i++) {
- memcpy(s->visualization_buffer[i], pict->data[i],
- (i == 0) ? pict->linesize[i] * height:
- pict->linesize[i] * height >> v_chroma_shift);
- pict->data[i] = s->visualization_buffer[i];
- }
- pict->type = FF_BUFFER_TYPE_COPY;
- ptr = pict->data[0];
- block_height = 16 >> v_chroma_shift;
-
- for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
- int mb_x;
- for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
- const int mb_index = mb_x + mb_y * s->mb_stride;
- if ((s->avctx->debug_mv) && pict->motion_val) {
- int type;
- for (type = 0; type < 3; type++) {
- int direction = 0;
- switch (type) {
- case 0:
- if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
- (pict->pict_type!= AV_PICTURE_TYPE_P))
- continue;
- direction = 0;
- break;
- case 1:
- if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
- (pict->pict_type!= AV_PICTURE_TYPE_B))
- continue;
- direction = 0;
- break;
- case 2:
- if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
- (pict->pict_type!= AV_PICTURE_TYPE_B))
- continue;
- direction = 1;
- break;
- }
- if (!USES_LIST(pict->mb_type[mb_index], direction))
- continue;
-
- if (IS_8X8(pict->mb_type[mb_index])) {
- int i;
- for (i = 0; i < 4; i++) {
- int sx = mb_x * 16 + 4 + 8 * (i & 1);
- int sy = mb_y * 16 + 4 + 8 * (i >> 1);
- int xy = (mb_x * 2 + (i & 1) +
- (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
- int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
- int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
- draw_arrow(ptr, sx, sy, mx, my, width,
- height, s->linesize, 100);
- }
- } else if (IS_16X8(pict->mb_type[mb_index])) {
- int i;
- for (i = 0; i < 2; i++) {
- int sx = mb_x * 16 + 8;
- int sy = mb_y * 16 + 4 + 8 * i;
- int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
- int mx = (pict->motion_val[direction][xy][0] >> shift);
- int my = (pict->motion_val[direction][xy][1] >> shift);
-
- if (IS_INTERLACED(pict->mb_type[mb_index]))
- my *= 2;
-
- draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
- height, s->linesize, 100);
- }
- } else if (IS_8X16(pict->mb_type[mb_index])) {
- int i;
- for (i = 0; i < 2; i++) {
- int sx = mb_x * 16 + 4 + 8 * i;
- int sy = mb_y * 16 + 8;
- int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
- int mx = pict->motion_val[direction][xy][0] >> shift;
- int my = pict->motion_val[direction][xy][1] >> shift;
-
- if (IS_INTERLACED(pict->mb_type[mb_index]))
- my *= 2;
-
- draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
- height, s->linesize, 100);
- }
- } else {
- int sx = mb_x * 16 + 8;
- int sy = mb_y * 16 + 8;
- int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
- int mx = pict->motion_val[direction][xy][0] >> shift + sx;
- int my = pict->motion_val[direction][xy][1] >> shift + sy;
- draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
- }
- }
- }
- if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
- uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
- 0x0101010101010101ULL;
- int y;
- for (y = 0; y < block_height; y++) {
- *(uint64_t *)(pict->data[1] + 8 * mb_x +
- (block_height * mb_y + y) *
- pict->linesize[1]) = c;
- *(uint64_t *)(pict->data[2] + 8 * mb_x +
- (block_height * mb_y + y) *
- pict->linesize[2]) = c;
- }
- }
- if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
- pict->motion_val) {
- int mb_type = pict->mb_type[mb_index];
- uint64_t u,v;
- int y;
-#define COLOR(theta, r) \
- u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
- v = (int)(128 + r * sin(theta * 3.141592 / 180));
-
-
- u = v = 128;
- if (IS_PCM(mb_type)) {
- COLOR(120, 48)
- } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
- IS_INTRA16x16(mb_type)) {
- COLOR(30, 48)
- } else if (IS_INTRA4x4(mb_type)) {
- COLOR(90, 48)
- } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
- // COLOR(120, 48)
- } else if (IS_DIRECT(mb_type)) {
- COLOR(150, 48)
- } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
- COLOR(170, 48)
- } else if (IS_GMC(mb_type)) {
- COLOR(190, 48)
- } else if (IS_SKIP(mb_type)) {
- // COLOR(180, 48)
- } else if (!USES_LIST(mb_type, 1)) {
- COLOR(240, 48)
- } else if (!USES_LIST(mb_type, 0)) {
- COLOR(0, 48)
- } else {
- assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
- COLOR(300,48)
- }
-
- u *= 0x0101010101010101ULL;
- v *= 0x0101010101010101ULL;
- for (y = 0; y < block_height; y++) {
- *(uint64_t *)(pict->data[1] + 8 * mb_x +
- (block_height * mb_y + y) * pict->linesize[1]) = u;
- *(uint64_t *)(pict->data[2] + 8 * mb_x +
- (block_height * mb_y + y) * pict->linesize[2]) = v;
- }
-
- // segmentation
- if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
- *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
- (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
- *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
- (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
- }
- if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
- for (y = 0; y < 16; y++)
- pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
- pict->linesize[0]] ^= 0x80;
- }
- if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
- int dm = 1 << (mv_sample_log2 - 2);
- for (i = 0; i < 4; i++) {
- int sx = mb_x * 16 + 8 * (i & 1);
- int sy = mb_y * 16 + 8 * (i >> 1);
- int xy = (mb_x * 2 + (i & 1) +
- (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
- // FIXME bidir
- int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
- if (mv[0] != mv[dm] ||
- mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
- for (y = 0; y < 8; y++)
- pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
- if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
- *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
- pict->linesize[0]) ^= 0x8080808080808080ULL;
- }
- }
-
- if (IS_INTERLACED(mb_type) &&
- s->codec_id == CODEC_ID_H264) {
- // hmm
- }
- }
- s->mbskip_table[mb_index] = 0;
- }
- }
- }
-}
-
-static inline int hpel_motion_lowres(MpegEncContext *s,
- uint8_t *dest, uint8_t *src,
- int field_based, int field_select,
- int src_x, int src_y,
- int width, int height, int stride,
- int h_edge_pos, int v_edge_pos,
- int w, int h, h264_chroma_mc_func *pix_op,
- int motion_x, int motion_y)
-{
- const int lowres = s->avctx->lowres;
- const int op_index = FFMIN(lowres, 2);
- const int s_mask = (2 << lowres) - 1;
- int emu = 0;
- int sx, sy;
-
- if (s->quarter_sample) {
- motion_x /= 2;
- motion_y /= 2;
- }
-
- sx = motion_x & s_mask;
- sy = motion_y & s_mask;
- src_x += motion_x >> lowres + 1;
- src_y += motion_y >> lowres + 1;
-
- src += src_y * stride + src_x;
-
- if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
- (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
- s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
- (h + 1) << field_based, src_x,
- src_y << field_based,
- h_edge_pos,
- v_edge_pos);
- src = s->edge_emu_buffer;
- emu = 1;
- }
-
- sx = (sx << 2) >> lowres;
- sy = (sy << 2) >> lowres;
- if (field_select)
- src += s->linesize;
- pix_op[op_index](dest, src, stride, h, sx, sy);
- return emu;
-}
-
-/* apply one mpeg motion vector to the three components */
-static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
- uint8_t *dest_y,
- uint8_t *dest_cb,
- uint8_t *dest_cr,
- int field_based,
- int bottom_field,
- int field_select,
- uint8_t **ref_picture,
- h264_chroma_mc_func *pix_op,
- int motion_x, int motion_y,
- int h, int mb_y)
-{
- uint8_t *ptr_y, *ptr_cb, *ptr_cr;
- int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
- uvsx, uvsy;
- const int lowres = s->avctx->lowres;
- const int op_index = FFMIN(lowres, 2);
- const int block_s = 8>>lowres;
- const int s_mask = (2 << lowres) - 1;
- const int h_edge_pos = s->h_edge_pos >> lowres;
- const int v_edge_pos = s->v_edge_pos >> lowres;
- linesize = s->current_picture.f.linesize[0] << field_based;
- uvlinesize = s->current_picture.f.linesize[1] << field_based;
-
- // FIXME obviously not perfect but qpel will not work in lowres anyway
- if (s->quarter_sample) {
- motion_x /= 2;
- motion_y /= 2;
- }
-
- if (field_based) {
- motion_y += (bottom_field - field_select) * (1 << lowres - 1);
- }
-
- sx = motion_x & s_mask;
- sy = motion_y & s_mask;
- src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
- src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
-
- if (s->out_format == FMT_H263) {
- uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
- uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
- uvsrc_x = src_x >> 1;
- uvsrc_y = src_y >> 1;
- } else if (s->out_format == FMT_H261) {
- // even chroma mv's are full pel in H261
- mx = motion_x / 4;
- my = motion_y / 4;
- uvsx = (2 * mx) & s_mask;
- uvsy = (2 * my) & s_mask;
- uvsrc_x = s->mb_x * block_s + (mx >> lowres);
- uvsrc_y = mb_y * block_s + (my >> lowres);
- } else {
- mx = motion_x / 2;
- my = motion_y / 2;
- uvsx = mx & s_mask;
- uvsy = my & s_mask;
- uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
- uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
- }
-
- ptr_y = ref_picture[0] + src_y * linesize + src_x;
- ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
- ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
-
- if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
- (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
- s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
- s->linesize, 17, 17 + field_based,
- src_x, src_y << field_based, h_edge_pos,
- v_edge_pos);
- ptr_y = s->edge_emu_buffer;
- if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
- uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
- s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
- 9 + field_based,
- uvsrc_x, uvsrc_y << field_based,
- h_edge_pos >> 1, v_edge_pos >> 1);
- s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
- 9 + field_based,
- uvsrc_x, uvsrc_y << field_based,
- h_edge_pos >> 1, v_edge_pos >> 1);
- ptr_cb = uvbuf;
- ptr_cr = uvbuf + 16;
- }
- }
-
- // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
- if (bottom_field) {
- dest_y += s->linesize;
- dest_cb += s->uvlinesize;
- dest_cr += s->uvlinesize;
- }
-
- if (field_select) {
- ptr_y += s->linesize;
- ptr_cb += s->uvlinesize;
- ptr_cr += s->uvlinesize;
- }
-
- sx = (sx << 2) >> lowres;
- sy = (sy << 2) >> lowres;
- pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
-
- if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
- uvsx = (uvsx << 2) >> lowres;
- uvsy = (uvsy << 2) >> lowres;
- pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
- uvsx, uvsy);
- pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
- uvsx, uvsy);
- }
- // FIXME h261 lowres loop filter
-}
-
-static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
- uint8_t *dest_cb, uint8_t *dest_cr,
- uint8_t **ref_picture,
- h264_chroma_mc_func * pix_op,
- int mx, int my)
-{
- const int lowres = s->avctx->lowres;
- const int op_index = FFMIN(lowres, 2);
- const int block_s = 8 >> lowres;
- const int s_mask = (2 << lowres) - 1;
- const int h_edge_pos = s->h_edge_pos >> lowres + 1;
- const int v_edge_pos = s->v_edge_pos >> lowres + 1;
- int emu = 0, src_x, src_y, offset, sx, sy;
- uint8_t *ptr;
-
- if (s->quarter_sample) {
- mx /= 2;
- my /= 2;
- }
-
- /* In case of 8X8, we construct a single chroma motion vector
- with a special rounding */
- mx = ff_h263_round_chroma(mx);
- my = ff_h263_round_chroma(my);
-
- sx = mx & s_mask;
- sy = my & s_mask;
- src_x = s->mb_x * block_s + (mx >> lowres + 1);
- src_y = s->mb_y * block_s + (my >> lowres + 1);
-
- offset = src_y * s->uvlinesize + src_x;
- ptr = ref_picture[1] + offset;
- if (s->flags & CODEC_FLAG_EMU_EDGE) {
- if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
- (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
- s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
- 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
- ptr = s->edge_emu_buffer;
- emu = 1;
- }
- }
- sx = (sx << 2) >> lowres;
- sy = (sy << 2) >> lowres;
- pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
-
- ptr = ref_picture[2] + offset;
- if (emu) {
- s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
- src_x, src_y, h_edge_pos, v_edge_pos);
- ptr = s->edge_emu_buffer;
- }
- pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
-}
-
-/**
- * motion compensation of a single macroblock
- * @param s context
- * @param dest_y luma destination pointer
- * @param dest_cb chroma cb/u destination pointer
- * @param dest_cr chroma cr/v destination pointer
- * @param dir direction (0->forward, 1->backward)
- * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
- * @param pix_op halfpel motion compensation function (average or put normally)
- * the motion vectors are taken from s->mv and the MV type from s->mv_type
- */
-static inline void MPV_motion_lowres(MpegEncContext *s,
- uint8_t *dest_y, uint8_t *dest_cb,
- uint8_t *dest_cr,
- int dir, uint8_t **ref_picture,
- h264_chroma_mc_func *pix_op)
-{
- int mx, my;
- int mb_x, mb_y, i;
- const int lowres = s->avctx->lowres;
- const int block_s = 8 >>lowres;
-
- mb_x = s->mb_x;
- mb_y = s->mb_y;
-
- switch (s->mv_type) {
- case MV_TYPE_16X16:
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 0, 0, 0,
- ref_picture, pix_op,
- s->mv[dir][0][0], s->mv[dir][0][1],
- 2 * block_s, mb_y);
- break;
- case MV_TYPE_8X8:
- mx = 0;
- my = 0;
- for (i = 0; i < 4; i++) {
- hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
- s->linesize) * block_s,
- ref_picture[0], 0, 0,
- (2 * mb_x + (i & 1)) * block_s,
- (2 * mb_y + (i >> 1)) * block_s,
- s->width, s->height, s->linesize,
- s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
- block_s, block_s, pix_op,
- s->mv[dir][i][0], s->mv[dir][i][1]);
-
- mx += s->mv[dir][i][0];
- my += s->mv[dir][i][1];
- }
-
- if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
- chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
- pix_op, mx, my);
- break;
- case MV_TYPE_FIELD:
- if (s->picture_structure == PICT_FRAME) {
- /* top field */
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 1, 0, s->field_select[dir][0],
- ref_picture, pix_op,
- s->mv[dir][0][0], s->mv[dir][0][1],
- block_s, mb_y);
- /* bottom field */
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 1, 1, s->field_select[dir][1],
- ref_picture, pix_op,
- s->mv[dir][1][0], s->mv[dir][1][1],
- block_s, mb_y);
- } else {
- if (s->picture_structure != s->field_select[dir][0] + 1 &&
- s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
- ref_picture = s->current_picture_ptr->f.data;
-
- }
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 0, 0, s->field_select[dir][0],
- ref_picture, pix_op,
- s->mv[dir][0][0],
- s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
- }
- break;
- case MV_TYPE_16X8:
- for (i = 0; i < 2; i++) {
- uint8_t **ref2picture;
-
- if (s->picture_structure == s->field_select[dir][i] + 1 ||
- s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
- ref2picture = ref_picture;
- } else {
- ref2picture = s->current_picture_ptr->f.data;
- }
-
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 0, 0, s->field_select[dir][i],
- ref2picture, pix_op,
- s->mv[dir][i][0], s->mv[dir][i][1] +
- 2 * block_s * i, block_s, mb_y >> 1);
-
- dest_y += 2 * block_s * s->linesize;
- dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
- dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
- }
- break;
- case MV_TYPE_DMV:
- if (s->picture_structure == PICT_FRAME) {
- for (i = 0; i < 2; i++) {
- int j;
- for (j = 0; j < 2; j++) {
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 1, j, j ^ i,
- ref_picture, pix_op,
- s->mv[dir][2 * i + j][0],
- s->mv[dir][2 * i + j][1],
- block_s, mb_y);
- }
- pix_op = s->dsp.avg_h264_chroma_pixels_tab;
- }
- } else {
- for (i = 0; i < 2; i++) {
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 0, 0, s->picture_structure != i + 1,
- ref_picture, pix_op,
- s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
- 2 * block_s, mb_y >> 1);
-
- // after put we make avg of the same block
- pix_op = s->dsp.avg_h264_chroma_pixels_tab;
-
- // opposite parity is always in the same
- // frame if this is second field
- if (!s->first_field) {
- ref_picture = s->current_picture_ptr->f.data;
- }
- }
- }
- break;
- default:
- assert(0);
- }