2 * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
3 * written, produced, and directed by Alan Smithee
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include "mpegvideo.h"
30 #include "indeo3data.h"
37 unsigned char *the_buf;
38 unsigned int the_buf_size;
39 unsigned short y_w, y_h;
40 unsigned short uv_w, uv_h;
43 typedef struct Indeo3DecodeContext {
44 AVCodecContext *avctx;
52 unsigned char *ModPred;
53 unsigned short *corrector_type;
54 } Indeo3DecodeContext;
56 static int corrector_type_0[24] = {
57 195, 159, 133, 115, 101, 93, 87, 77,
58 195, 159, 133, 115, 101, 93, 87, 77,
59 128, 79, 79, 79, 79, 79, 79, 79
62 static int corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
64 static void build_modpred(Indeo3DecodeContext *s)
68 s->ModPred = (unsigned char *) av_malloc (8 * 128);
70 for (i=0; i < 128; ++i) {
71 s->ModPred[i+0*128] = (i > 126) ? 254 : 2*((i + 1) - ((i + 1) % 2));
72 s->ModPred[i+1*128] = (i == 7) ? 20 : ((i == 119 || i == 120)
73 ? 236 : 2*((i + 2) - ((i + 1) % 3)));
74 s->ModPred[i+2*128] = (i > 125) ? 248 : 2*((i + 2) - ((i + 2) % 4));
75 s->ModPred[i+3*128] = 2*((i + 1) - ((i - 3) % 5));
76 s->ModPred[i+4*128] = (i == 8) ? 20 : 2*((i + 1) - ((i - 3) % 6));
77 s->ModPred[i+5*128] = 2*((i + 4) - ((i + 3) % 7));
78 s->ModPred[i+6*128] = (i > 123) ? 240 : 2*((i + 4) - ((i + 4) % 8));
79 s->ModPred[i+7*128] = 2*((i + 5) - ((i + 4) % 9));
82 s->corrector_type = (unsigned short *) av_malloc (24 * 256 * sizeof(unsigned short));
84 for (i=0; i < 24; ++i) {
85 for (j=0; j < 256; ++j) {
86 s->corrector_type[i*256+j] = (j < corrector_type_0[i])
87 ? 1 : ((j < 248 || (i == 16 && j == 248))
88 ? 0 : corrector_type_2[j - 248]);
93 static void iv_Decode_Chunk(Indeo3DecodeContext *s, unsigned char *cur,
94 unsigned char *ref, int width, int height, unsigned char *buf1,
95 long fflags2, unsigned char *hdr,
96 unsigned char *buf2, int min_width_160);
98 #define min(a,b) ((a) < (b) ? (a) : (b))
100 /* ---------------------------------------------------------------------- */
101 static void iv_alloc_frames(Indeo3DecodeContext *s)
103 int luma_width, luma_height, luma_pixels, chroma_width, chroma_height,
105 unsigned int bufsize;
107 luma_width = (s->width + 3) & (~3);
108 luma_height = (s->height + 3) & (~3);
110 s->iv_frame[0].y_w = s->iv_frame[0].y_h =
111 s->iv_frame[0].the_buf_size = 0;
112 s->iv_frame[1].y_w = s->iv_frame[1].y_h =
113 s->iv_frame[1].the_buf_size = 0;
114 s->iv_frame[1].the_buf = NULL;
116 chroma_width = ((luma_width >> 2) + 3) & (~3);
117 chroma_height = ((luma_height>> 2) + 3) & (~3);
118 luma_pixels = luma_width * luma_height;
119 chroma_pixels = chroma_width * chroma_height;
121 bufsize = luma_pixels * 2 + luma_width * 3 +
122 (chroma_pixels + chroma_width) * 4;
124 if((s->iv_frame[0].the_buf =
125 (s->iv_frame[0].the_buf_size == 0 ? av_malloc(bufsize) :
126 av_realloc(s->iv_frame[0].the_buf, bufsize))) == NULL)
128 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
129 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
130 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
131 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
132 s->iv_frame[0].the_buf_size = bufsize;
134 s->iv_frame[0].Ybuf = s->iv_frame[0].the_buf + luma_width;
135 i = luma_pixels + luma_width * 2;
136 s->iv_frame[1].Ybuf = s->iv_frame[0].the_buf + i;
137 i += (luma_pixels + luma_width);
138 s->iv_frame[0].Ubuf = s->iv_frame[0].the_buf + i;
139 i += (chroma_pixels + chroma_width);
140 s->iv_frame[1].Ubuf = s->iv_frame[0].the_buf + i;
141 i += (chroma_pixels + chroma_width);
142 s->iv_frame[0].Vbuf = s->iv_frame[0].the_buf + i;
143 i += (chroma_pixels + chroma_width);
144 s->iv_frame[1].Vbuf = s->iv_frame[0].the_buf + i;
146 for(i = 1; i <= luma_width; i++)
147 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
148 s->iv_frame[0].Ubuf[-i] = 0x80;
150 for(i = 1; i <= chroma_width; i++) {
151 s->iv_frame[1].Ubuf[-i] = 0x80;
152 s->iv_frame[0].Vbuf[-i] = 0x80;
153 s->iv_frame[1].Vbuf[-i] = 0x80;
154 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
158 /* ---------------------------------------------------------------------- */
159 static void iv_free_func(Indeo3DecodeContext *s)
163 for(i = 0 ; i < 2 ; i++) {
164 if(s->iv_frame[i].the_buf != NULL)
165 av_free(s->iv_frame[i].the_buf);
166 s->iv_frame[i].Ybuf = s->iv_frame[i].Ubuf =
167 s->iv_frame[i].Vbuf = NULL;
168 s->iv_frame[i].the_buf = NULL;
169 s->iv_frame[i].the_buf_size = 0;
170 s->iv_frame[i].y_w = s->iv_frame[i].y_h = 0;
171 s->iv_frame[i].uv_w = s->iv_frame[i].uv_h = 0;
175 av_free(s->corrector_type);
178 /* ---------------------------------------------------------------------- */
179 static unsigned long iv_decode_frame(Indeo3DecodeContext *s,
180 unsigned char *buf, int buf_size)
182 unsigned int hdr_width, hdr_height,
183 chroma_width, chroma_height;
184 unsigned long fflags1, fflags2, fflags3, offs1, offs2, offs3, offs;
185 unsigned char *hdr_pos, *buf_pos;
190 fflags1 = le2me_16(*(uint16_t *)buf_pos);
192 fflags3 = le2me_32(*(uint32_t *)buf_pos);
194 fflags2 = *buf_pos++;
196 hdr_height = le2me_16(*(uint16_t *)buf_pos);
198 hdr_width = le2me_16(*(uint16_t *)buf_pos);
200 if(avcodec_check_dimensions(NULL, hdr_width, hdr_height))
204 chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc;
205 chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc;
206 offs1 = le2me_32(*(uint32_t *)buf_pos);
208 offs2 = le2me_32(*(uint32_t *)buf_pos);
210 offs3 = le2me_32(*(uint32_t *)buf_pos);
213 if(fflags3 == 0x80) return 4;
215 if(fflags1 & 0x200) {
216 s->cur_frame = s->iv_frame + 1;
217 s->ref_frame = s->iv_frame;
219 s->cur_frame = s->iv_frame;
220 s->ref_frame = s->iv_frame + 1;
223 buf_pos = buf + 16 + offs1;
224 offs = le2me_32(*(uint32_t *)buf_pos);
227 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, hdr_width,
228 hdr_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
229 min(hdr_width, 160));
231 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
234 buf_pos = buf + 16 + offs2;
235 offs = le2me_32(*(uint32_t *)buf_pos);
238 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
239 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
240 min(chroma_width, 40));
242 buf_pos = buf + 16 + offs3;
243 offs = le2me_32(*(uint32_t *)buf_pos);
246 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
247 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
248 min(chroma_width, 40));
261 long split_direction;
265 /* ---------------------------------------------------------------------- */
267 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
268 if((lv1 & 0x80) != 0) { \
279 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
292 #define LP2_CHECK(buf1,rle_v3,lp2) \
293 if(lp2 == 0 && rle_v3 != 0) \
301 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
309 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
310 unsigned char *cur, unsigned char *ref, int width, int height,
311 unsigned char *buf1, long fflags2, unsigned char *hdr,
312 unsigned char *buf2, int min_width_160)
314 unsigned char bit_buf;
315 unsigned long bit_pos, lv, lv1, lv2;
316 long *width_tbl, width_tbl_arr[10];
318 unsigned char *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
319 uint32_t *cur_lp, *ref_lp;
320 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
321 unsigned short *correction_type_sp[2];
322 ustr_t strip_tbl[20], *strip;
323 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
324 rle_v1, rle_v2, rle_v3;
329 width_tbl = width_tbl_arr + 1;
330 i = (width < 0 ? width + 3 : width)/4;
331 for(j = -1; j < 8; j++)
332 width_tbl[j] = i * j;
336 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
338 strip->ypos = strip->xpos = 0;
339 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
340 strip->height = height;
341 strip->split_direction = 0;
342 strip->split_flag = 0;
347 rle_v1 = rle_v2 = rle_v3 = 0;
349 while(strip >= strip_tbl) {
356 cmd = (bit_buf >> bit_pos) & 0x03;
360 memcpy(strip, strip-1, sizeof(ustr_t));
361 strip->split_flag = 1;
362 strip->split_direction = 0;
363 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
365 } else if(cmd == 1) {
367 memcpy(strip, strip-1, sizeof(ustr_t));
368 strip->split_flag = 1;
369 strip->split_direction = 1;
370 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
372 } else if(cmd == 2) {
373 if(strip->usl7 == 0) {
378 } else if(cmd == 3) {
379 if(strip->usl7 == 0) {
381 ref_vectors = buf2 + (*buf1 * 2);
387 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
389 if((blks_width = strip->width) < 0)
392 blks_height = strip->height;
394 if(ref_vectors != NULL) {
395 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
396 ref_vectors[1] + strip->xpos;
398 ref_frm_pos = cur_frm_pos - width_tbl[4];
407 cmd = (bit_buf >> bit_pos) & 0x03;
409 if(cmd == 0 || ref_vectors != NULL) {
410 for(lp1 = 0; lp1 < blks_width; lp1++) {
411 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
412 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
424 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
425 cp2 = s->ModPred + ((lv - 8) << 7);
427 for(i = 0; i < blks_width << 2; i++) {
433 if(k == 1 || k == 4) {
434 lv = (hdr[j] & 0xf) + fflags2;
435 correction_type_sp[0] = s->corrector_type + (lv << 8);
436 correction_lp[0] = correction + (lv << 8);
437 lv = (hdr[j] >> 4) + fflags2;
438 correction_lp[1] = correction + (lv << 8);
439 correction_type_sp[1] = s->corrector_type + (lv << 8);
441 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
442 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
443 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
444 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
449 case 0: /********** CASE 0 **********/
450 for( ; blks_height > 0; blks_height -= 4) {
451 for(lp1 = 0; lp1 < blks_width; lp1++) {
452 for(lp2 = 0; lp2 < 4; ) {
454 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
455 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
457 switch(correction_type_sp[0][k]) {
459 *cur_lp = ((*ref_lp >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
463 ((unsigned short *)cur_lp)[0] = ((((unsigned short *)(ref_lp))[0] >> 1)
464 + correction_lp[lp2 & 0x01][*buf1++]) << 1;
465 ((unsigned short *)cur_lp)[1] = ((((unsigned short *)(ref_lp))[1] >> 1)
466 + correction_lp[lp2 & 0x01][k]) << 1;
471 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
472 cur_lp[j] = ref_lp[j];
478 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
479 cur_lp[j] = ref_lp[j];
485 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
487 if(rle_v1 == 1 || ref_vectors != NULL) {
488 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
489 cur_lp[j] = ref_lp[j];
492 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
499 LP2_CHECK(buf1,rle_v3,lp2)
501 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
502 cur_lp[j] = ref_lp[j];
514 if(ref_vectors != NULL) {
515 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
516 cur_lp[j] = ref_lp[j];
523 lv = (lv1 & 0x7F) << 1;
526 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
529 LV1_CHECK(buf1,rle_v3,lv1,lp2)
540 cur_frm_pos += ((width - blks_width) * 4);
541 ref_frm_pos += ((width - blks_width) * 4);
546 case 3: /********** CASE 3 **********/
547 if(ref_vectors != NULL)
551 for( ; blks_height > 0; blks_height -= 8) {
552 for(lp1 = 0; lp1 < blks_width; lp1++) {
553 for(lp2 = 0; lp2 < 4; ) {
556 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
557 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
559 switch(correction_type_sp[lp2 & 0x01][k]) {
561 cur_lp[width_tbl[1]] = ((*ref_lp >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
562 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
563 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
565 cur_lp[0] = ((*ref_lp >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
570 ((unsigned short *)cur_lp)[width_tbl[2]] =
571 ((((unsigned short *)ref_lp)[0] >> 1) + correction_lp[lp2 & 0x01][*buf1++]) << 1;
572 ((unsigned short *)cur_lp)[width_tbl[2]+1] =
573 ((((unsigned short *)ref_lp)[1] >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
574 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
575 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
577 cur_lp[0] = cur_lp[width_tbl[1]];
583 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
591 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
613 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
616 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
617 cur_lp[j] = ref_lp[j];
620 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
623 rle_v2 = (*buf1) - 1;
627 LP2_CHECK(buf1,rle_v3,lp2)
629 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
635 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
637 lv = (lv1 & 0x7F) << 1;
641 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
644 LV1_CHECK(buf1,rle_v3,lv1,lp2)
655 cur_frm_pos += (((width * 2) - blks_width) * 4);
660 case 10: /********** CASE 10 **********/
661 if(ref_vectors == NULL) {
664 for( ; blks_height > 0; blks_height -= 8) {
665 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
666 for(lp2 = 0; lp2 < 4; ) {
668 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
669 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
672 if(lp2 == 0 && flag1 != 0) {
673 lv1 = lv1 & 0x00FF00FF;
674 lv1 = (lv1 << 8) | lv1;
675 lv2 = lv2 & 0x00FF00FF;
676 lv2 = (lv2 << 8) | lv2;
679 switch(correction_type_sp[lp2 & 0x01][k]) {
681 cur_lp[width_tbl[1]] = ((lv1 >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1;
682 cur_lp[width_tbl[1]+1] = ((lv2 >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1;
683 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
684 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
685 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
687 cur_lp[0] = cur_lp[width_tbl[1]];
688 cur_lp[1] = cur_lp[width_tbl[1]+1];
694 cur_lp[width_tbl[1]] = ((lv1 >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1++]) << 1;
695 cur_lp[width_tbl[1]+1] = ((lv2 >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1;
696 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
697 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
698 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
700 cur_lp[0] = cur_lp[width_tbl[1]];
701 cur_lp[1] = cur_lp[width_tbl[1]+1];
709 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
713 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
714 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
716 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
727 if(lp2 == 0 && flag1 != 0) {
728 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
732 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
733 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
735 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
746 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
749 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
753 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
754 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
756 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
762 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
766 rle_v2 = (*buf1) - 1;
769 LP2_CHECK(buf1,rle_v3,lp2)
771 if(lp2 == 0 && flag1 != 0) {
772 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
776 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
777 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
779 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
804 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
806 lv = (lv1 & 0x7F) << 1;
809 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
811 LV1_CHECK(buf1,rle_v3,lv1,lp2)
822 cur_frm_pos += (((width * 2) - blks_width) * 4);
826 for( ; blks_height > 0; blks_height -= 8) {
827 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
828 for(lp2 = 0; lp2 < 4; ) {
830 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
831 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
833 switch(correction_type_sp[lp2 & 0x01][k]) {
835 lv1 = correctionloworder_lp[lp2 & 0x01][k];
836 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
837 cur_lp[0] = ((ref_lp[0] >> 1) + lv1) << 1;
838 cur_lp[1] = ((ref_lp[1] >> 1) + lv2) << 1;
839 cur_lp[width_tbl[1]] = ((ref_lp[width_tbl[1]] >> 1) + lv1) << 1;
840 cur_lp[width_tbl[1]+1] = ((ref_lp[width_tbl[1]+1] >> 1) + lv2) << 1;
845 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
846 lv2 = correctionloworder_lp[lp2 & 0x01][k];
847 cur_lp[0] = ((ref_lp[0] >> 1) + lv1) << 1;
848 cur_lp[1] = ((ref_lp[1] >> 1) + lv2) << 1;
849 cur_lp[width_tbl[1]] = ((ref_lp[width_tbl[1]] >> 1) + lv1) << 1;
850 cur_lp[width_tbl[1]+1] = ((ref_lp[width_tbl[1]+1] >> 1) + lv2) << 1;
856 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
857 cur_lp[j] = ref_lp[j];
858 cur_lp[j+1] = ref_lp[j+1];
866 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
867 cur_lp[j] = ref_lp[j];
868 cur_lp[j+1] = ref_lp[j+1];
876 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
877 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
878 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
879 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
881 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
885 rle_v2 = (*buf1) - 1;
889 LP2_CHECK(buf1,rle_v3,lp2)
892 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
893 cur_lp[j] = ref_lp[j];
894 cur_lp[j+1] = ref_lp[j+1];
900 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
902 lv = (lv1 & 0x7F) << 1;
905 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
906 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
907 LV1_CHECK(buf1,rle_v3,lv1,lp2)
919 cur_frm_pos += (((width * 2) - blks_width) * 4);
920 ref_frm_pos += (((width * 2) - blks_width) * 4);
925 case 11: /********** CASE 11 **********/
926 if(ref_vectors == NULL)
929 for( ; blks_height > 0; blks_height -= 8) {
930 for(lp1 = 0; lp1 < blks_width; lp1++) {
931 for(lp2 = 0; lp2 < 4; ) {
933 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
934 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
936 switch(correction_type_sp[lp2 & 0x01][k]) {
938 cur_lp[0] = ((*ref_lp >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
939 cur_lp[width_tbl[1]] = ((ref_lp[width_tbl[1]] >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
944 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
945 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
946 ((unsigned short *)cur_lp)[0] = ((((unsigned short *)ref_lp)[0] >> 1) + lv1) << 1;
947 ((unsigned short *)cur_lp)[1] = ((((unsigned short *)ref_lp)[1] >> 1) + lv2) << 1;
948 ((unsigned short *)cur_lp)[width_tbl[2]] = ((((unsigned short *)ref_lp)[width_tbl[2]] >> 1) + lv1) << 1;
949 ((unsigned short *)cur_lp)[width_tbl[2]+1] = ((((unsigned short *)ref_lp)[width_tbl[2]+1] >> 1) + lv2) << 1;
955 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
956 cur_lp[j] = ref_lp[j];
963 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
964 cur_lp[j] = ref_lp[j];
971 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
973 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
974 cur_lp[j] = ref_lp[j];
976 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
980 rle_v2 = (*buf1) - 1;
984 LP2_CHECK(buf1,rle_v3,lp2)
987 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
988 cur_lp[j] = ref_lp[j];
993 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
995 lv = (lv1 & 0x7F) << 1;
998 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
1000 LV1_CHECK(buf1,rle_v3,lv1,lp2)
1012 cur_frm_pos += (((width * 2) - blks_width) * 4);
1013 ref_frm_pos += (((width * 2) - blks_width) * 4);
1022 if(strip < strip_tbl)
1025 for( ; strip >= strip_tbl; strip--) {
1026 if(strip->split_flag != 0) {
1027 strip->split_flag = 0;
1028 strip->usl7 = (strip-1)->usl7;
1030 if(strip->split_direction) {
1031 strip->xpos += strip->width;
1032 strip->width = (strip-1)->width - strip->width;
1033 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
1034 strip->width = width - strip->xpos;
1036 strip->ypos += strip->height;
1037 strip->height = (strip-1)->height - strip->height;
1045 static int indeo3_decode_init(AVCodecContext *avctx)
1047 Indeo3DecodeContext *s = avctx->priv_data;
1050 s->width = avctx->width;
1051 s->height = avctx->height;
1052 avctx->pix_fmt = PIX_FMT_YUV410P;
1053 avctx->has_b_frames = 0;
1061 static int indeo3_decode_frame(AVCodecContext *avctx,
1062 void *data, int *data_size,
1063 unsigned char *buf, int buf_size)
1065 Indeo3DecodeContext *s=avctx->priv_data;
1066 unsigned char *src, *dest;
1069 /* no supplementary picture */
1070 if (buf_size == 0) {
1074 iv_decode_frame(s, buf, buf_size);
1076 if(s->frame.data[0])
1077 avctx->release_buffer(avctx, &s->frame);
1079 s->frame.reference = 0;
1080 if(avctx->get_buffer(avctx, &s->frame) < 0) {
1081 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1085 src = s->cur_frame->Ybuf;
1086 dest = s->frame.data[0];
1087 for (y = 0; y < s->height; y++) {
1088 memcpy(dest, src, s->cur_frame->y_w);
1089 src += s->cur_frame->y_w;
1090 dest += s->frame.linesize[0];
1093 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1095 src = s->cur_frame->Ubuf;
1096 dest = s->frame.data[1];
1097 for (y = 0; y < s->height / 4; y++) {
1098 memcpy(dest, src, s->cur_frame->uv_w);
1099 src += s->cur_frame->uv_w;
1100 dest += s->frame.linesize[1];
1103 src = s->cur_frame->Vbuf;
1104 dest = s->frame.data[2];
1105 for (y = 0; y < s->height / 4; y++) {
1106 memcpy(dest, src, s->cur_frame->uv_w);
1107 src += s->cur_frame->uv_w;
1108 dest += s->frame.linesize[2];
1112 *data_size=sizeof(AVFrame);
1113 *(AVFrame*)data= s->frame;
1118 static int indeo3_decode_end(AVCodecContext *avctx)
1120 Indeo3DecodeContext *s = avctx->priv_data;
1127 AVCodec indeo3_decoder = {
1131 sizeof(Indeo3DecodeContext),
1135 indeo3_decode_frame,