3 * Copyright (c) 2007 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "libavutil/imgutils.h"
30 #include "mpegutils.h"
31 #include "mpegvideo.h"
38 static VLC aic_top_vlc;
39 static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM];
40 static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS];
42 static const int16_t mode2_offs[] = {
43 0, 614, 1222, 1794, 2410, 3014, 3586, 4202, 4792, 5382, 5966, 6542,
44 7138, 7716, 8292, 8864, 9444, 10030, 10642, 11212, 11814
48 * Initialize all tables.
50 static av_cold void rv40_init_tables(void)
53 static VLC_TYPE aic_table[1 << AIC_TOP_BITS][2];
54 static VLC_TYPE aic_mode1_table[AIC_MODE1_NUM << AIC_MODE1_BITS][2];
55 static VLC_TYPE aic_mode2_table[11814][2];
56 static VLC_TYPE ptype_table[NUM_PTYPE_VLCS << PTYPE_VLC_BITS][2];
57 static VLC_TYPE btype_table[NUM_BTYPE_VLCS << BTYPE_VLC_BITS][2];
59 aic_top_vlc.table = aic_table;
60 aic_top_vlc.table_allocated = 1 << AIC_TOP_BITS;
61 init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE,
62 rv40_aic_top_vlc_bits, 1, 1,
63 rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
64 for(i = 0; i < AIC_MODE1_NUM; i++){
65 // Every tenth VLC table is empty
66 if((i % 10) == 9) continue;
67 aic_mode1_vlc[i].table = &aic_mode1_table[i << AIC_MODE1_BITS];
68 aic_mode1_vlc[i].table_allocated = 1 << AIC_MODE1_BITS;
69 init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE,
70 aic_mode1_vlc_bits[i], 1, 1,
71 aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
73 for(i = 0; i < AIC_MODE2_NUM; i++){
74 aic_mode2_vlc[i].table = &aic_mode2_table[mode2_offs[i]];
75 aic_mode2_vlc[i].table_allocated = mode2_offs[i + 1] - mode2_offs[i];
76 init_vlc(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE,
77 aic_mode2_vlc_bits[i], 1, 1,
78 aic_mode2_vlc_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
80 for(i = 0; i < NUM_PTYPE_VLCS; i++){
81 ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS];
82 ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS;
83 ff_init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
84 ptype_vlc_bits[i], 1, 1,
85 ptype_vlc_codes[i], 1, 1,
86 ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
88 for(i = 0; i < NUM_BTYPE_VLCS; i++){
89 btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS];
90 btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS;
91 ff_init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
92 btype_vlc_bits[i], 1, 1,
93 btype_vlc_codes[i], 1, 1,
94 btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
99 * Get stored dimension from bitstream.
101 * If the width/height is the standard one then it's coded as a 3-bit index.
102 * Otherwise it is coded as escaped 8-bit portions.
104 static int get_dimension(GetBitContext *gb, const int *dim)
106 int t = get_bits(gb, 3);
109 val = dim[get_bits1(gb) - val];
112 if (get_bits_left(gb) < 8)
113 return AVERROR_INVALIDDATA;
122 * Get encoded picture size - usually this is called from rv40_parse_slice_header.
124 static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
126 *w = get_dimension(gb, rv40_standard_widths);
127 *h = get_dimension(gb, rv40_standard_heights);
130 static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
133 int w = r->s.width, h = r->s.height;
137 memset(si, 0, sizeof(SliceInfo));
139 return AVERROR_INVALIDDATA;
140 si->type = get_bits(gb, 2);
141 if(si->type == 1) si->type = 0;
142 si->quant = get_bits(gb, 5);
144 return AVERROR_INVALIDDATA;
145 si->vlc_set = get_bits(gb, 2);
147 si->pts = get_bits(gb, 13);
148 if(!si->type || !get_bits1(gb))
149 rv40_parse_picture_size(gb, &w, &h);
150 if ((ret = av_image_check_size(w, h, 0, r->s.avctx)) < 0)
154 mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
155 mb_bits = ff_rv34_get_start_offset(gb, mb_size);
156 si->start = get_bits(gb, mb_bits);
162 * Decode 4x4 intra types array.
164 static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
166 MpegEncContext *s = &r->s;
172 for(i = 0; i < 4; i++, dst += r->intra_types_stride){
173 if(!i && s->first_slice_line){
174 pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1);
175 dst[0] = (pattern >> 2) & 2;
176 dst[1] = (pattern >> 1) & 2;
177 dst[2] = pattern & 2;
178 dst[3] = (pattern << 1) & 2;
182 for(j = 0; j < 4; j++){
183 /* Coefficients are read using VLC chosen by the prediction pattern
184 * The first one (used for retrieving a pair of coefficients) is
185 * constructed from the top, top right and left coefficients
186 * The second one (used for retrieving only one coefficient) is
189 A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row
190 B = ptr[-r->intra_types_stride];
192 pattern = A + B * (1 << 4) + C * (1 << 8);
193 for(k = 0; k < MODE2_PATTERNS_NUM; k++)
194 if(pattern == rv40_aic_table_index[k])
196 if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
197 v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2);
202 if(B != -1 && C != -1)
203 v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1);
204 else{ // tricky decoding
207 case -1: // code 0 -> 1, 1 -> 0
209 v = get_bits1(gb) ^ 1;
212 case 2: // code 0 -> 2, 1 -> 0
213 v = (get_bits1(gb) ^ 1) << 1;
225 * Decode macroblock information.
227 static int rv40_decode_mb_info(RV34DecContext *r)
229 MpegEncContext *s = &r->s;
230 GetBitContext *gb = &s->gb;
233 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
235 if(!r->s.mb_skip_run) {
236 r->s.mb_skip_run = get_interleaved_ue_golomb(gb) + 1;
237 if(r->s.mb_skip_run > (unsigned)s->mb_num)
241 if(--r->s.mb_skip_run)
244 if(r->avail_cache[6-4]){
245 int blocks[RV34_MB_TYPES] = {0};
247 if(r->avail_cache[6-1])
248 blocks[r->mb_type[mb_pos - 1]]++;
249 blocks[r->mb_type[mb_pos - s->mb_stride]]++;
250 if(r->avail_cache[6-2])
251 blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
252 if(r->avail_cache[6-5])
253 blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
254 for(i = 0; i < RV34_MB_TYPES; i++){
255 if(blocks[i] > count){
262 } else if (r->avail_cache[6-1])
263 prev_type = r->mb_type[mb_pos - 1];
265 if(s->pict_type == AV_PICTURE_TYPE_P){
266 prev_type = block_num_to_ptype_vlc_num[prev_type];
267 q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
268 if(q < PBTYPE_ESCAPE)
270 q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
271 av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
273 prev_type = block_num_to_btype_vlc_num[prev_type];
274 q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
275 if(q < PBTYPE_ESCAPE)
277 q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
278 av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
290 #define MASK_CUR 0x0001
291 #define MASK_RIGHT 0x0008
292 #define MASK_BOTTOM 0x0010
293 #define MASK_TOP 0x1000
294 #define MASK_Y_TOP_ROW 0x000F
295 #define MASK_Y_LAST_ROW 0xF000
296 #define MASK_Y_LEFT_COL 0x1111
297 #define MASK_Y_RIGHT_COL 0x8888
298 #define MASK_C_TOP_ROW 0x0003
299 #define MASK_C_LAST_ROW 0x000C
300 #define MASK_C_LEFT_COL 0x0005
301 #define MASK_C_RIGHT_COL 0x000A
303 static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
304 static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
306 static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp,
307 uint8_t *src, int stride, int dmode,
308 int lim_q1, int lim_p1,
309 int alpha, int beta, int beta2,
310 int chroma, int edge, int dir)
312 int filter_p1, filter_q1;
316 strong = rdsp->rv40_loop_filter_strength[dir](src, stride, beta, beta2,
317 edge, &filter_p1, &filter_q1);
319 lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
322 rdsp->rv40_strong_loop_filter[dir](src, stride, alpha,
323 lims, dmode, chroma);
324 } else if (filter_p1 & filter_q1) {
325 rdsp->rv40_weak_loop_filter[dir](src, stride, 1, 1, alpha, beta,
326 lims, lim_q1, lim_p1);
327 } else if (filter_p1 | filter_q1) {
328 rdsp->rv40_weak_loop_filter[dir](src, stride, filter_p1, filter_q1,
329 alpha, beta, lims >> 1, lim_q1 >> 1,
335 * RV40 loop filtering function
337 static void rv40_loop_filter(RV34DecContext *r, int row)
339 MpegEncContext *s = &r->s;
343 int alpha, beta, betaY, betaC;
345 int mbtype[4]; ///< current macroblock and its neighbours types
347 * flags indicating that macroblock can be filtered with strong filter
348 * it is set only for intra coded MB and MB with DCs coded separately
351 int clip[4]; ///< MB filter clipping value calculated from filtering strength
353 * coded block patterns for luma part of current macroblock and its neighbours
355 * LSB corresponds to the top left block,
356 * each nibble represents one row of subblocks.
360 * coded block patterns for chroma part of current macroblock and its neighbours
361 * Format is the same as for luma with two subblocks in a row.
365 * This mask represents the pattern of luma subblocks that should be filtered
366 * in addition to the coded ones because they lie at the edge of
367 * 8x8 block with different enough motion vectors
371 mb_pos = row * s->mb_stride;
372 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
373 int mbtype = s->current_picture_ptr->mb_type[mb_pos];
374 if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
375 r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
377 r->cbp_chroma[mb_pos] = 0xFF;
379 mb_pos = row * s->mb_stride;
380 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
381 int y_h_deblock, y_v_deblock;
382 int c_v_deblock[2], c_h_deblock[2];
385 unsigned y_to_deblock;
388 q = s->current_picture_ptr->qscale_table[mb_pos];
389 alpha = rv40_alpha_tab[q];
390 beta = rv40_beta_tab [q];
391 betaY = betaC = beta * 3;
392 if(s->width * s->height <= 176*144)
398 avail[3] = row < s->mb_height - 1;
399 for(i = 0; i < 4; i++){
401 int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
402 mvmasks[i] = r->deblock_coefs[pos];
403 mbtype [i] = s->current_picture_ptr->mb_type[pos];
404 cbp [i] = r->cbp_luma[pos];
405 uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
406 uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
409 mbtype [i] = mbtype[0];
411 uvcbp[i][0] = uvcbp[i][1] = 0;
413 mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
414 clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
416 y_to_deblock = mvmasks[POS_CUR]
417 | (mvmasks[POS_BOTTOM] << 16);
418 /* This pattern contains bits signalling that horizontal edges of
419 * the current block can be filtered.
420 * That happens when either of adjacent subblocks is coded or lies on
421 * the edge of 8x8 blocks with motion vectors differing by more than
422 * 3/4 pel in any component (any edge orientation for some reason).
424 y_h_deblock = y_to_deblock
425 | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
426 | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
427 /* This pattern contains bits signalling that vertical edges of
428 * the current block can be filtered.
429 * That happens when either of adjacent subblocks is coded or lies on
430 * the edge of 8x8 blocks with motion vectors differing by more than
431 * 3/4 pel in any component (any edge orientation for some reason).
433 y_v_deblock = y_to_deblock
434 | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
435 | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
437 y_v_deblock &= ~MASK_Y_LEFT_COL;
439 y_h_deblock &= ~MASK_Y_TOP_ROW;
440 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
441 y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
442 /* Calculating chroma patterns is similar and easier since there is
443 * no motion vector pattern for them.
445 for(i = 0; i < 2; i++){
446 c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
447 c_v_deblock[i] = c_to_deblock[i]
448 | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
449 | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
450 c_h_deblock[i] = c_to_deblock[i]
451 | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
452 | (uvcbp[POS_CUR][i] << 2);
454 c_v_deblock[i] &= ~MASK_C_LEFT_COL;
456 c_h_deblock[i] &= ~MASK_C_TOP_ROW;
457 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
458 c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
461 for(j = 0; j < 16; j += 4){
462 Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
463 for(i = 0; i < 4; i++, Y += 4){
465 int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
466 int dither = j ? ij : i*4;
468 // if bottom block is coded then we can filter its top edge
469 // (or bottom edge of this block, which is the same)
470 if(y_h_deblock & (MASK_BOTTOM << ij)){
471 rv40_adaptive_loop_filter(&r->rdsp, Y+4*s->linesize,
473 y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
474 clip_cur, alpha, beta, betaY,
477 // filter left block edge in ordinary mode (with low filtering strength)
478 if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
480 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
482 clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
483 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
486 alpha, beta, betaY, 0, 0, 1);
488 // filter top edge of the current macroblock when filtering strength is high
489 if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
490 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
492 mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
493 alpha, beta, betaY, 0, 1, 0);
495 // filter left block edge in edge mode (with high filtering strength)
496 if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
497 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
498 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
501 alpha, beta, betaY, 0, 1, 1);
505 for(k = 0; k < 2; k++){
506 for(j = 0; j < 2; j++){
507 C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
508 for(i = 0; i < 2; i++, C += 4){
510 int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
511 if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
512 int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
513 rv40_adaptive_loop_filter(&r->rdsp, C+4*s->uvlinesize, s->uvlinesize, i*8,
516 alpha, beta, betaC, 1, 0, 0);
518 if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
520 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
522 clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
523 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
526 alpha, beta, betaC, 1, 0, 1);
528 if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
529 int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
530 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, i*8,
533 alpha, beta, betaC, 1, 1, 0);
535 if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
536 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
537 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
540 alpha, beta, betaC, 1, 1, 1);
549 * Initialize decoder.
551 static av_cold int rv40_decode_init(AVCodecContext *avctx)
553 RV34DecContext *r = avctx->priv_data;
557 if ((ret = ff_rv34_decode_init(avctx)) < 0)
559 if(!aic_top_vlc.bits)
561 r->parse_slice_header = rv40_parse_slice_header;
562 r->decode_intra_types = rv40_decode_intra_types;
563 r->decode_mb_info = rv40_decode_mb_info;
564 r->loop_filter = rv40_loop_filter;
565 r->luma_dc_quant_i = rv40_luma_dc_quant[0];
566 r->luma_dc_quant_p = rv40_luma_dc_quant[1];
570 AVCodec ff_rv40_decoder = {
572 .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
573 .type = AVMEDIA_TYPE_VIDEO,
574 .id = AV_CODEC_ID_RV40,
575 .priv_data_size = sizeof(RV34DecContext),
576 .init = rv40_decode_init,
577 .close = ff_rv34_decode_end,
578 .decode = ff_rv34_decode_frame,
579 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
580 AV_CODEC_CAP_FRAME_THREADS,
581 .flush = ff_mpeg_flush,
582 .pix_fmts = (const enum AVPixelFormat[]) {
586 .init_thread_copy = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_init_thread_copy),
587 .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_update_thread_context),