2 * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
3 * written, produced, and directed by Alan Smithee
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "mpegvideo.h"
32 #include "indeo3data.h"
39 unsigned char *the_buf;
40 unsigned int the_buf_size;
41 unsigned short y_w, y_h;
42 unsigned short uv_w, uv_h;
45 typedef struct Indeo3DecodeContext {
46 AVCodecContext *avctx;
54 unsigned char *ModPred;
55 unsigned short *corrector_type;
56 } Indeo3DecodeContext;
58 static int corrector_type_0[24] = {
59 195, 159, 133, 115, 101, 93, 87, 77,
60 195, 159, 133, 115, 101, 93, 87, 77,
61 128, 79, 79, 79, 79, 79, 79, 79
64 static int corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
66 static void build_modpred(Indeo3DecodeContext *s)
70 s->ModPred = (unsigned char *) av_malloc (8 * 128);
72 for (i=0; i < 128; ++i) {
73 s->ModPred[i+0*128] = (i > 126) ? 254 : 2*((i + 1) - ((i + 1) % 2));
74 s->ModPred[i+1*128] = (i == 7) ? 20 : ((i == 119 || i == 120)
75 ? 236 : 2*((i + 2) - ((i + 1) % 3)));
76 s->ModPred[i+2*128] = (i > 125) ? 248 : 2*((i + 2) - ((i + 2) % 4));
77 s->ModPred[i+3*128] = 2*((i + 1) - ((i - 3) % 5));
78 s->ModPred[i+4*128] = (i == 8) ? 20 : 2*((i + 1) - ((i - 3) % 6));
79 s->ModPred[i+5*128] = 2*((i + 4) - ((i + 3) % 7));
80 s->ModPred[i+6*128] = (i > 123) ? 240 : 2*((i + 4) - ((i + 4) % 8));
81 s->ModPred[i+7*128] = 2*((i + 5) - ((i + 4) % 9));
84 s->corrector_type = (unsigned short *) av_malloc (24 * 256 * sizeof(unsigned short));
86 for (i=0; i < 24; ++i) {
87 for (j=0; j < 256; ++j) {
88 s->corrector_type[i*256+j] = (j < corrector_type_0[i])
89 ? 1 : ((j < 248 || (i == 16 && j == 248))
90 ? 0 : corrector_type_2[j - 248]);
95 static void iv_Decode_Chunk(Indeo3DecodeContext *s, unsigned char *cur,
96 unsigned char *ref, int width, int height, unsigned char *buf1,
97 long fflags2, unsigned char *hdr,
98 unsigned char *buf2, int min_width_160);
101 #define min(a,b) ((a) < (b) ? (a) : (b))
104 /* ---------------------------------------------------------------------- */
105 static void iv_alloc_frames(Indeo3DecodeContext *s)
107 int luma_width, luma_height, luma_pixels, chroma_width, chroma_height,
109 unsigned int bufsize;
111 luma_width = (s->width + 3) & (~3);
112 luma_height = (s->height + 3) & (~3);
114 s->iv_frame[0].y_w = s->iv_frame[0].y_h =
115 s->iv_frame[0].the_buf_size = 0;
116 s->iv_frame[1].y_w = s->iv_frame[1].y_h =
117 s->iv_frame[1].the_buf_size = 0;
118 s->iv_frame[1].the_buf = NULL;
120 chroma_width = ((luma_width >> 2) + 3) & (~3);
121 chroma_height = ((luma_height>> 2) + 3) & (~3);
122 luma_pixels = luma_width * luma_height;
123 chroma_pixels = chroma_width * chroma_height;
125 bufsize = luma_pixels * 2 + luma_width * 3 +
126 (chroma_pixels + chroma_width) * 4;
128 if((s->iv_frame[0].the_buf =
129 (s->iv_frame[0].the_buf_size == 0 ? av_malloc(bufsize) :
130 av_realloc(s->iv_frame[0].the_buf, bufsize))) == NULL)
132 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
133 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
134 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
135 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
136 s->iv_frame[0].the_buf_size = bufsize;
138 s->iv_frame[0].Ybuf = s->iv_frame[0].the_buf + luma_width;
139 i = luma_pixels + luma_width * 2;
140 s->iv_frame[1].Ybuf = s->iv_frame[0].the_buf + i;
141 i += (luma_pixels + luma_width);
142 s->iv_frame[0].Ubuf = s->iv_frame[0].the_buf + i;
143 i += (chroma_pixels + chroma_width);
144 s->iv_frame[1].Ubuf = s->iv_frame[0].the_buf + i;
145 i += (chroma_pixels + chroma_width);
146 s->iv_frame[0].Vbuf = s->iv_frame[0].the_buf + i;
147 i += (chroma_pixels + chroma_width);
148 s->iv_frame[1].Vbuf = s->iv_frame[0].the_buf + i;
150 for(i = 1; i <= luma_width; i++)
151 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
152 s->iv_frame[0].Ubuf[-i] = 0x80;
154 for(i = 1; i <= chroma_width; i++) {
155 s->iv_frame[1].Ubuf[-i] = 0x80;
156 s->iv_frame[0].Vbuf[-i] = 0x80;
157 s->iv_frame[1].Vbuf[-i] = 0x80;
158 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
162 /* ---------------------------------------------------------------------- */
163 static void iv_free_func(Indeo3DecodeContext *s)
167 for(i = 0 ; i < 2 ; i++) {
168 if(s->iv_frame[i].the_buf != NULL)
169 av_free(s->iv_frame[i].the_buf);
170 s->iv_frame[i].Ybuf = s->iv_frame[i].Ubuf =
171 s->iv_frame[i].Vbuf = NULL;
172 s->iv_frame[i].the_buf = NULL;
173 s->iv_frame[i].the_buf_size = 0;
174 s->iv_frame[i].y_w = s->iv_frame[i].y_h = 0;
175 s->iv_frame[i].uv_w = s->iv_frame[i].uv_h = 0;
179 av_free(s->corrector_type);
182 /* ---------------------------------------------------------------------- */
183 static unsigned long iv_decode_frame(Indeo3DecodeContext *s,
184 unsigned char *buf, int buf_size)
186 unsigned int hdr_width, hdr_height,
187 chroma_width, chroma_height;
188 unsigned long fflags1, fflags2, fflags3, offs1, offs2, offs3, offs;
189 unsigned char *hdr_pos, *buf_pos;
194 fflags1 = le2me_16(*(uint16_t *)buf_pos);
196 fflags3 = le2me_32(*(uint32_t *)buf_pos);
198 fflags2 = *buf_pos++;
200 hdr_height = le2me_16(*(uint16_t *)buf_pos);
202 hdr_width = le2me_16(*(uint16_t *)buf_pos);
204 if(avcodec_check_dimensions(NULL, hdr_width, hdr_height))
208 chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc;
209 chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc;
210 offs1 = le2me_32(*(uint32_t *)buf_pos);
212 offs2 = le2me_32(*(uint32_t *)buf_pos);
214 offs3 = le2me_32(*(uint32_t *)buf_pos);
217 if(fflags3 == 0x80) return 4;
219 if(fflags1 & 0x200) {
220 s->cur_frame = s->iv_frame + 1;
221 s->ref_frame = s->iv_frame;
223 s->cur_frame = s->iv_frame;
224 s->ref_frame = s->iv_frame + 1;
227 buf_pos = buf + 16 + offs1;
228 offs = le2me_32(*(uint32_t *)buf_pos);
231 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, hdr_width,
232 hdr_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
233 min(hdr_width, 160));
235 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
238 buf_pos = buf + 16 + offs2;
239 offs = le2me_32(*(uint32_t *)buf_pos);
242 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
243 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
244 min(chroma_width, 40));
246 buf_pos = buf + 16 + offs3;
247 offs = le2me_32(*(uint32_t *)buf_pos);
250 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
251 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
252 min(chroma_width, 40));
265 long split_direction;
269 /* ---------------------------------------------------------------------- */
271 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
272 if((lv1 & 0x80) != 0) { \
283 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
296 #define LP2_CHECK(buf1,rle_v3,lp2) \
297 if(lp2 == 0 && rle_v3 != 0) \
305 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
313 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
314 unsigned char *cur, unsigned char *ref, int width, int height,
315 unsigned char *buf1, long fflags2, unsigned char *hdr,
316 unsigned char *buf2, int min_width_160)
318 unsigned char bit_buf;
319 unsigned long bit_pos, lv, lv1, lv2;
320 long *width_tbl, width_tbl_arr[10];
321 signed char *ref_vectors;
322 unsigned char *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
323 uint32_t *cur_lp, *ref_lp;
324 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
325 unsigned short *correction_type_sp[2];
326 ustr_t strip_tbl[20], *strip;
327 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
328 rle_v1, rle_v2, rle_v3;
334 width_tbl = width_tbl_arr + 1;
335 i = (width < 0 ? width + 3 : width)/4;
336 for(j = -1; j < 8; j++)
337 width_tbl[j] = i * j;
341 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
343 strip->ypos = strip->xpos = 0;
344 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
345 strip->height = height;
346 strip->split_direction = 0;
347 strip->split_flag = 0;
352 rle_v1 = rle_v2 = rle_v3 = 0;
354 while(strip >= strip_tbl) {
361 cmd = (bit_buf >> bit_pos) & 0x03;
365 memcpy(strip, strip-1, sizeof(ustr_t));
366 strip->split_flag = 1;
367 strip->split_direction = 0;
368 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
370 } else if(cmd == 1) {
372 memcpy(strip, strip-1, sizeof(ustr_t));
373 strip->split_flag = 1;
374 strip->split_direction = 1;
375 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
377 } else if(cmd == 2) {
378 if(strip->usl7 == 0) {
383 } else if(cmd == 3) {
384 if(strip->usl7 == 0) {
386 ref_vectors = (signed char*)buf2 + (*buf1 * 2);
392 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
394 if((blks_width = strip->width) < 0)
397 blks_height = strip->height;
399 if(ref_vectors != NULL) {
400 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
401 ref_vectors[1] + strip->xpos;
403 ref_frm_pos = cur_frm_pos - width_tbl[4];
412 cmd = (bit_buf >> bit_pos) & 0x03;
414 if(cmd == 0 || ref_vectors != NULL) {
415 for(lp1 = 0; lp1 < blks_width; lp1++) {
416 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
417 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
429 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
430 cp2 = s->ModPred + ((lv - 8) << 7);
432 for(i = 0; i < blks_width << 2; i++) {
438 if(k == 1 || k == 4) {
439 lv = (hdr[j] & 0xf) + fflags2;
440 correction_type_sp[0] = s->corrector_type + (lv << 8);
441 correction_lp[0] = correction + (lv << 8);
442 lv = (hdr[j] >> 4) + fflags2;
443 correction_lp[1] = correction + (lv << 8);
444 correction_type_sp[1] = s->corrector_type + (lv << 8);
446 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
447 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
448 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
449 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
454 case 0: /********** CASE 0 **********/
455 for( ; blks_height > 0; blks_height -= 4) {
456 for(lp1 = 0; lp1 < blks_width; lp1++) {
457 for(lp2 = 0; lp2 < 4; ) {
459 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
460 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
462 switch(correction_type_sp[0][k]) {
464 *cur_lp = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
468 res = ((le2me_16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
469 ((unsigned short *)cur_lp)[0] = le2me_16(res);
470 res = ((le2me_16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
471 ((unsigned short *)cur_lp)[1] = le2me_16(res);
477 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
478 cur_lp[j] = ref_lp[j];
484 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
485 cur_lp[j] = ref_lp[j];
491 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
493 if(rle_v1 == 1 || ref_vectors != NULL) {
494 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
495 cur_lp[j] = ref_lp[j];
498 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
505 LP2_CHECK(buf1,rle_v3,lp2)
507 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
508 cur_lp[j] = ref_lp[j];
520 if(ref_vectors != NULL) {
521 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
522 cur_lp[j] = ref_lp[j];
529 lv = (lv1 & 0x7F) << 1;
532 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
535 LV1_CHECK(buf1,rle_v3,lv1,lp2)
546 cur_frm_pos += ((width - blks_width) * 4);
547 ref_frm_pos += ((width - blks_width) * 4);
552 case 3: /********** CASE 3 **********/
553 if(ref_vectors != NULL)
557 for( ; blks_height > 0; blks_height -= 8) {
558 for(lp1 = 0; lp1 < blks_width; lp1++) {
559 for(lp2 = 0; lp2 < 4; ) {
562 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
563 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
565 switch(correction_type_sp[lp2 & 0x01][k]) {
567 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
568 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
569 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
571 cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
576 res = ((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
577 ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
578 res = ((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
579 ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
581 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
582 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
584 cur_lp[0] = cur_lp[width_tbl[1]];
591 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
599 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
621 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
624 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
625 cur_lp[j] = ref_lp[j];
628 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
631 rle_v2 = (*buf1) - 1;
635 LP2_CHECK(buf1,rle_v3,lp2)
637 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
643 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
645 lv = (lv1 & 0x7F) << 1;
649 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
652 LV1_CHECK(buf1,rle_v3,lv1,lp2)
663 cur_frm_pos += (((width * 2) - blks_width) * 4);
668 case 10: /********** CASE 10 **********/
669 if(ref_vectors == NULL) {
672 for( ; blks_height > 0; blks_height -= 8) {
673 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
674 for(lp2 = 0; lp2 < 4; ) {
676 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
677 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
680 if(lp2 == 0 && flag1 != 0) {
681 #ifdef WORDS_BIGENDIAN
682 lv1 = lv1 & 0xFF00FF00;
683 lv1 = (lv1 >> 8) | lv1;
684 lv2 = lv2 & 0xFF00FF00;
685 lv2 = (lv2 >> 8) | lv2;
687 lv1 = lv1 & 0x00FF00FF;
688 lv1 = (lv1 << 8) | lv1;
689 lv2 = lv2 & 0x00FF00FF;
690 lv2 = (lv2 << 8) | lv2;
694 switch(correction_type_sp[lp2 & 0x01][k]) {
696 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
697 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
698 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
699 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
700 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
702 cur_lp[0] = cur_lp[width_tbl[1]];
703 cur_lp[1] = cur_lp[width_tbl[1]+1];
709 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
710 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
711 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
712 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
713 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
715 cur_lp[0] = cur_lp[width_tbl[1]];
716 cur_lp[1] = cur_lp[width_tbl[1]+1];
725 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
729 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
730 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
732 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
743 if(lp2 == 0 && flag1 != 0) {
744 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
748 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
749 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
751 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
762 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
765 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
769 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
770 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
772 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
778 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
782 rle_v2 = (*buf1) - 1;
785 LP2_CHECK(buf1,rle_v3,lp2)
787 if(lp2 == 0 && flag1 != 0) {
788 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
792 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
793 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
795 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
820 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
822 lv = (lv1 & 0x7F) << 1;
825 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
827 LV1_CHECK(buf1,rle_v3,lv1,lp2)
838 cur_frm_pos += (((width * 2) - blks_width) * 4);
842 for( ; blks_height > 0; blks_height -= 8) {
843 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
844 for(lp2 = 0; lp2 < 4; ) {
846 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
847 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
849 switch(correction_type_sp[lp2 & 0x01][k]) {
851 lv1 = correctionloworder_lp[lp2 & 0x01][k];
852 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
853 cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
854 cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
855 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
856 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
861 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
862 lv2 = correctionloworder_lp[lp2 & 0x01][k];
863 cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
864 cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
865 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
866 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
872 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
873 cur_lp[j] = ref_lp[j];
874 cur_lp[j+1] = ref_lp[j+1];
882 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
883 cur_lp[j] = ref_lp[j];
884 cur_lp[j+1] = ref_lp[j+1];
892 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
893 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
894 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
895 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
897 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
901 rle_v2 = (*buf1) - 1;
905 LP2_CHECK(buf1,rle_v3,lp2)
908 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
909 cur_lp[j] = ref_lp[j];
910 cur_lp[j+1] = ref_lp[j+1];
916 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
918 lv = (lv1 & 0x7F) << 1;
921 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
922 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
923 LV1_CHECK(buf1,rle_v3,lv1,lp2)
935 cur_frm_pos += (((width * 2) - blks_width) * 4);
936 ref_frm_pos += (((width * 2) - blks_width) * 4);
941 case 11: /********** CASE 11 **********/
942 if(ref_vectors == NULL)
945 for( ; blks_height > 0; blks_height -= 8) {
946 for(lp1 = 0; lp1 < blks_width; lp1++) {
947 for(lp2 = 0; lp2 < 4; ) {
949 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
950 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
952 switch(correction_type_sp[lp2 & 0x01][k]) {
954 cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
955 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
960 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
961 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
962 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
963 ((unsigned short *)cur_lp)[0] = le2me_16(res);
964 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
965 ((unsigned short *)cur_lp)[1] = le2me_16(res);
966 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
967 ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
968 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
969 ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
975 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
976 cur_lp[j] = ref_lp[j];
983 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
984 cur_lp[j] = ref_lp[j];
991 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
993 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
994 cur_lp[j] = ref_lp[j];
996 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
1000 rle_v2 = (*buf1) - 1;
1004 LP2_CHECK(buf1,rle_v3,lp2)
1007 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
1008 cur_lp[j] = ref_lp[j];
1013 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
1015 lv = (lv1 & 0x7F) << 1;
1018 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
1020 LV1_CHECK(buf1,rle_v3,lv1,lp2)
1032 cur_frm_pos += (((width * 2) - blks_width) * 4);
1033 ref_frm_pos += (((width * 2) - blks_width) * 4);
1042 if(strip < strip_tbl)
1045 for( ; strip >= strip_tbl; strip--) {
1046 if(strip->split_flag != 0) {
1047 strip->split_flag = 0;
1048 strip->usl7 = (strip-1)->usl7;
1050 if(strip->split_direction) {
1051 strip->xpos += strip->width;
1052 strip->width = (strip-1)->width - strip->width;
1053 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
1054 strip->width = width - strip->xpos;
1056 strip->ypos += strip->height;
1057 strip->height = (strip-1)->height - strip->height;
1065 static int indeo3_decode_init(AVCodecContext *avctx)
1067 Indeo3DecodeContext *s = avctx->priv_data;
1070 s->width = avctx->width;
1071 s->height = avctx->height;
1072 avctx->pix_fmt = PIX_FMT_YUV410P;
1073 avctx->has_b_frames = 0;
1081 static int indeo3_decode_frame(AVCodecContext *avctx,
1082 void *data, int *data_size,
1083 unsigned char *buf, int buf_size)
1085 Indeo3DecodeContext *s=avctx->priv_data;
1086 unsigned char *src, *dest;
1089 iv_decode_frame(s, buf, buf_size);
1091 if(s->frame.data[0])
1092 avctx->release_buffer(avctx, &s->frame);
1094 s->frame.reference = 0;
1095 if(avctx->get_buffer(avctx, &s->frame) < 0) {
1096 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1100 src = s->cur_frame->Ybuf;
1101 dest = s->frame.data[0];
1102 for (y = 0; y < s->height; y++) {
1103 memcpy(dest, src, s->cur_frame->y_w);
1104 src += s->cur_frame->y_w;
1105 dest += s->frame.linesize[0];
1108 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1110 src = s->cur_frame->Ubuf;
1111 dest = s->frame.data[1];
1112 for (y = 0; y < s->height / 4; y++) {
1113 memcpy(dest, src, s->cur_frame->uv_w);
1114 src += s->cur_frame->uv_w;
1115 dest += s->frame.linesize[1];
1118 src = s->cur_frame->Vbuf;
1119 dest = s->frame.data[2];
1120 for (y = 0; y < s->height / 4; y++) {
1121 memcpy(dest, src, s->cur_frame->uv_w);
1122 src += s->cur_frame->uv_w;
1123 dest += s->frame.linesize[2];
1127 *data_size=sizeof(AVFrame);
1128 *(AVFrame*)data= s->frame;
1133 static int indeo3_decode_end(AVCodecContext *avctx)
1135 Indeo3DecodeContext *s = avctx->priv_data;
1142 AVCodec indeo3_decoder = {
1146 sizeof(Indeo3DecodeContext),
1150 indeo3_decode_frame,