/**
* Initialize all tables.
*/
-static void rv34_init_tables()
+static av_cold void rv34_init_tables()
{
int i, j, k;
}
}
+#define GET_PTS_DIFF(a, b) ((a - b + 8192) & 0x1FFF)
+
/**
* Calculate motion vector component that should be added for direct blocks.
*/
-static int calc_add_mv(MpegEncContext *s, int dir, int component)
+static int calc_add_mv(RV34DecContext *r, int dir, int val)
{
- int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
- int sum;
+ int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
+ int dist = dir ? GET_PTS_DIFF(r->next_pts, r->cur_pts) : GET_PTS_DIFF(r->cur_pts, r->last_pts);
- sum = (s->next_picture_ptr->motion_val[0][mv_pos][component] +
- s->next_picture_ptr->motion_val[0][mv_pos + 1][component] +
- s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride][component] +
- s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride + 1][component]) >> 2;
- return dir ? -(sum >> 1) : ((sum + 1) >> 1);
+ if(!refdist) return 0;
+ if(!dir)
+ return (val * dist + refdist - 1) / refdist;
+ else
+ return -(val * dist / refdist);
}
/**
mx += r->dmv[dir][0];
my += r->dmv[dir][1];
- if(block_type == RV34_MB_B_DIRECT){
- mx += calc_add_mv(s, dir, 0);
- my += calc_add_mv(s, dir, 1);
- }
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
r->s.dsp.avg_h264_chroma_pixels_tab);
}
+static void rv34_mc_2mv_skip(RV34DecContext *r)
+{
+ int i, j, k;
+ for(j = 0; j < 2; j++)
+ for(i = 0; i < 2; i++){
+ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
+ r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab
+ : r->s.dsp.put_h264_qpel_pixels_tab,
+ r->s.dsp.put_h264_chroma_pixels_tab);
+ rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
+ r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab
+ : r->s.dsp.avg_h264_qpel_pixels_tab,
+ r->s.dsp.avg_h264_chroma_pixels_tab);
+ }
+}
+
/** number of motion vectors in each macroblock type */
static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
{
MpegEncContext *s = &r->s;
GetBitContext *gb = &s->gb;
- int i;
+ int i, j, k, l;
+ int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
+ int next_bt;
memset(r->dmv, 0, sizeof(r->dmv));
for(i = 0; i < num_mvs[block_type]; i++){
fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4);
return 0;
case RV34_MB_SKIP:
- if(s->pict_type == P_TYPE){
+ if(s->pict_type == FF_P_TYPE){
fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
}
case RV34_MB_B_DIRECT:
- rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 0);
- rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 1);
- rv34_mc_2mv (r, RV34_MB_B_DIRECT);
+ //surprisingly, it uses motion scheme from next reference frame
+ next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
+ for(j = 0; j < 2; j++)
+ for(i = 0; i < 2; i++)
+ for(k = 0; k < 2; k++)
+ for(l = 0; l < 2; l++)
+ s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
+ if(IS_16X16(next_bt)) //we can use whole macroblock MC
+ rv34_mc_2mv(r, block_type);
+ else
+ rv34_mc_2mv_skip(r);
+ fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4);
break;
case RV34_MB_P_16x16:
case RV34_MB_P_MIX16x16:
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){
- if(s->pict_type == P_TYPE)
+ if(s->pict_type == FF_P_TYPE)
r->mb_type[mb_pos] = RV34_MB_P_16x16;
- if(s->pict_type == B_TYPE)
+ if(s->pict_type == FF_B_TYPE)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
}
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
r->cbp_luma [s->mb_x + s->mb_y * s->mb_stride] = cbp;
r->cbp_chroma[s->mb_x + s->mb_y * s->mb_stride] = cbp >> 16;
+ s->current_picture.qscale_table[s->mb_x + s->mb_y * s->mb_stride] = s->qscale;
if(cbp == -1)
return -1;
return si1->type != si2->type ||
si1->start >= si2->start ||
si1->width != si2->width ||
- si1->height != si2->height;
+ si1->height != si2->height||
+ si1->pts != si2->pts;
}
static int rv34_decode_slice(RV34DecContext *r, int end, uint8_t* buf, int buf_size)
r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
}
- s->pict_type = r->si.type ? r->si.type : I_TYPE;
+ s->pict_type = r->si.type ? r->si.type : FF_I_TYPE;
if(MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
s->current_picture_ptr = &s->current_picture;
+ r->cur_pts = r->si.pts;
+ if(s->pict_type != FF_B_TYPE){
+ r->last_pts = r->next_pts;
+ r->next_pts = r->cur_pts;
+ }
s->mb_x = s->mb_y = 0;
}
}
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
- return (s->mb_y == s->mb_height);
+ return s->mb_y == s->mb_height;
}
/** @} */ // recons group end
/**
* Initialize decoder.
*/
-int ff_rv34_decode_init(AVCodecContext *avctx)
+av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
{
RV34DecContext *r = avctx->priv_data;
MpegEncContext *s = &r->s;
r->loop_filter(r);
ff_er_frame_end(s);
MPV_frame_end(s);
- if (s->pict_type == B_TYPE || s->low_delay) {
+ if (s->pict_type == FF_B_TYPE || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
*pict= *(AVFrame*)s->last_picture_ptr;
return buf_size;
}
-int ff_rv34_decode_end(AVCodecContext *avctx)
+av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
{
RV34DecContext *r = avctx->priv_data;