2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 * Intel Indeo 3 (IV31, IV32, etc.) video decoder for FFmpeg
22 * written, produced, and directed by Alan Smithee
24 * For some documentation see:
25 * http://wiki.multimedia.cx/index.php?title=Indeo_3
28 #include "libavutil/imgutils.h"
31 #include "bytestream.h"
33 #include "indeo3data.h"
40 unsigned short y_w, y_h;
41 unsigned short uv_w, uv_h;
44 typedef struct Indeo3DecodeContext {
45 AVCodecContext *avctx;
55 uint8_t *corrector_type;
56 } Indeo3DecodeContext;
58 static const uint8_t corrector_type_0[24] = {
59 195, 159, 133, 115, 101, 93, 87, 77,
60 195, 159, 133, 115, 101, 93, 87, 77,
61 128, 79, 79, 79, 79, 79, 79, 79
64 static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
66 static av_cold int build_modpred(Indeo3DecodeContext *s)
70 if (!(s->ModPred = av_malloc(8 * 128)))
71 return AVERROR(ENOMEM);
73 for (i=0; i < 128; ++i) {
74 s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
75 s->ModPred[i+1*128] = i == 7 ? 20 :
77 i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
78 s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
79 s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
80 s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
81 s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
82 s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
83 s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
86 if (!(s->corrector_type = av_malloc(24 * 256)))
87 return AVERROR(ENOMEM);
89 for (i=0; i < 24; ++i) {
90 for (j=0; j < 256; ++j) {
91 s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
92 j < 248 || (i == 16 && j == 248) ? 0 :
93 corrector_type_2[j - 248];
100 static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
102 int luma_width = (s->width + 3) & ~3,
103 luma_height = (s->height + 3) & ~3,
104 chroma_width = ((luma_width >> 2) + 3) & ~3,
105 chroma_height = ((luma_height >> 2) + 3) & ~3,
106 luma_pixels = luma_width * luma_height,
107 chroma_pixels = chroma_width * chroma_height,
109 unsigned int bufsize = luma_pixels * 2 + luma_width * 3 +
110 (chroma_pixels + chroma_width) * 4;
113 if(!(s->buf = av_malloc(bufsize)))
114 return AVERROR(ENOMEM);
115 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
116 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
117 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
118 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
120 s->iv_frame[0].Ybuf = s->buf + luma_width;
121 i = luma_pixels + luma_width * 2;
122 s->iv_frame[1].Ybuf = s->buf + i;
123 i += (luma_pixels + luma_width);
124 s->iv_frame[0].Ubuf = s->buf + i;
125 i += (chroma_pixels + chroma_width);
126 s->iv_frame[1].Ubuf = s->buf + i;
127 i += (chroma_pixels + chroma_width);
128 s->iv_frame[0].Vbuf = s->buf + i;
129 i += (chroma_pixels + chroma_width);
130 s->iv_frame[1].Vbuf = s->buf + i;
132 for(i = 1; i <= luma_width; i++)
133 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
134 s->iv_frame[0].Ubuf[-i] = 0x80;
136 for(i = 1; i <= chroma_width; i++) {
137 s->iv_frame[1].Ubuf[-i] = 0x80;
138 s->iv_frame[0].Vbuf[-i] = 0x80;
139 s->iv_frame[1].Vbuf[-i] = 0x80;
140 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
146 static av_cold void iv_free_func(Indeo3DecodeContext *s)
149 av_freep(&s->ModPred);
150 av_freep(&s->corrector_type);
164 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
165 if((lv1 & 0x80) != 0) { \
176 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
189 #define LP2_CHECK(buf1,rle_v3,lp2) \
190 if(lp2 == 0 && rle_v3 != 0) \
198 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
206 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
207 uint8_t *cur, uint8_t *ref, int width, int height,
208 const uint8_t *buf1, int cb_offset, const uint8_t *hdr,
209 const uint8_t *buf2, int min_width_160)
212 unsigned int bit_pos, lv, lv1, lv2;
213 int *width_tbl, width_tbl_arr[10];
214 const signed char *ref_vectors;
215 uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
216 uint8_t *cur_end = cur + width*height + width;
217 uint32_t *cur_lp, *ref_lp;
218 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
219 uint8_t *correction_type_sp[2];
220 struct ustr strip_tbl[20], *strip;
221 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
222 rle_v1, rle_v2, rle_v3;
228 width_tbl = width_tbl_arr + 1;
229 i = (width < 0 ? width + 3 : width)/4;
230 for(j = -1; j < 8; j++)
231 width_tbl[j] = i * j;
235 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
237 strip->ypos = strip->xpos = 0;
238 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
239 strip->height = height;
240 strip->split_direction = 0;
241 strip->split_flag = 0;
246 rle_v1 = rle_v2 = rle_v3 = 0;
248 while(strip >= strip_tbl) {
255 cmd = (bit_buf >> bit_pos) & 0x03;
259 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
260 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
263 memcpy(strip, strip-1, sizeof(*strip));
264 strip->split_flag = 1;
265 strip->split_direction = 0;
266 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
268 } else if(cmd == 1) {
270 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
271 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
274 memcpy(strip, strip-1, sizeof(*strip));
275 strip->split_flag = 1;
276 strip->split_direction = 1;
277 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
279 } else if(cmd == 2) {
280 if(strip->usl7 == 0) {
285 } else if(cmd == 3) {
286 if(strip->usl7 == 0) {
288 ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
294 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
296 if((blks_width = strip->width) < 0)
299 blks_height = strip->height;
301 if(ref_vectors != NULL) {
302 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
303 ref_vectors[1] + strip->xpos;
305 ref_frm_pos = cur_frm_pos - width_tbl[4];
314 cmd = (bit_buf >> bit_pos) & 0x03;
316 if(cmd == 0 || ref_vectors != NULL) {
317 for(lp1 = 0; lp1 < blks_width; lp1++) {
318 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
319 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
331 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
332 cp2 = s->ModPred + ((lv - 8) << 7);
334 for(i = 0; i < blks_width << 2; i++) {
340 if(k == 1 || k == 4) {
341 lv = (hdr[j] & 0xf) + cb_offset;
342 correction_type_sp[0] = s->corrector_type + (lv << 8);
343 correction_lp[0] = correction + (lv << 8);
344 lv = (hdr[j] >> 4) + cb_offset;
345 correction_lp[1] = correction + (lv << 8);
346 correction_type_sp[1] = s->corrector_type + (lv << 8);
348 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
349 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
350 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
351 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
356 case 0: /********** CASE 0 **********/
357 for( ; blks_height > 0; blks_height -= 4) {
358 for(lp1 = 0; lp1 < blks_width; lp1++) {
359 for(lp2 = 0; lp2 < 4; ) {
361 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
362 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
363 if ((uint8_t *)cur_lp >= cur_end-3)
366 switch(correction_type_sp[0][k]) {
368 *cur_lp = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
372 res = ((av_le2ne16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
373 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
374 res = ((av_le2ne16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
375 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
381 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
382 cur_lp[j] = ref_lp[j];
388 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
389 cur_lp[j] = ref_lp[j];
395 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
397 if(rle_v1 == 1 || ref_vectors != NULL) {
398 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
399 cur_lp[j] = ref_lp[j];
402 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
409 LP2_CHECK(buf1,rle_v3,lp2)
411 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
412 cur_lp[j] = ref_lp[j];
424 if(ref_vectors != NULL) {
425 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
426 cur_lp[j] = ref_lp[j];
433 lv = (lv1 & 0x7F) << 1;
436 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
439 LV1_CHECK(buf1,rle_v3,lv1,lp2)
450 cur_frm_pos += ((width - blks_width) * 4);
451 ref_frm_pos += ((width - blks_width) * 4);
456 case 3: /********** CASE 3 **********/
457 if(ref_vectors != NULL)
461 for( ; blks_height > 0; blks_height -= 8) {
462 for(lp1 = 0; lp1 < blks_width; lp1++) {
463 for(lp2 = 0; lp2 < 4; ) {
466 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
467 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
469 switch(correction_type_sp[lp2 & 0x01][k]) {
471 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
472 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
473 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
475 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
480 res = ((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
481 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
482 res = ((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
483 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
485 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
486 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
488 cur_lp[0] = cur_lp[width_tbl[1]];
495 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
503 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
525 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
528 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
529 cur_lp[j] = ref_lp[j];
532 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
535 rle_v2 = (*buf1) - 1;
539 LP2_CHECK(buf1,rle_v3,lp2)
541 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
547 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
549 lv = (lv1 & 0x7F) << 1;
553 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
556 LV1_CHECK(buf1,rle_v3,lv1,lp2)
567 cur_frm_pos += (((width * 2) - blks_width) * 4);
572 case 10: /********** CASE 10 **********/
573 if(ref_vectors == NULL) {
576 for( ; blks_height > 0; blks_height -= 8) {
577 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
578 for(lp2 = 0; lp2 < 4; ) {
580 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
581 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
584 if(lp2 == 0 && flag1 != 0) {
586 lv1 = lv1 & 0xFF00FF00;
587 lv1 = (lv1 >> 8) | lv1;
588 lv2 = lv2 & 0xFF00FF00;
589 lv2 = (lv2 >> 8) | lv2;
591 lv1 = lv1 & 0x00FF00FF;
592 lv1 = (lv1 << 8) | lv1;
593 lv2 = lv2 & 0x00FF00FF;
594 lv2 = (lv2 << 8) | lv2;
598 switch(correction_type_sp[lp2 & 0x01][k]) {
600 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
601 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
602 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
603 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
604 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
606 cur_lp[0] = cur_lp[width_tbl[1]];
607 cur_lp[1] = cur_lp[width_tbl[1]+1];
613 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
614 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
615 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
616 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
617 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
619 cur_lp[0] = cur_lp[width_tbl[1]];
620 cur_lp[1] = cur_lp[width_tbl[1]+1];
629 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
633 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
634 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
636 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
647 if(lp2 == 0 && flag1 != 0) {
648 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
652 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
653 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
655 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
666 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
669 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
673 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
674 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
676 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
682 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
686 rle_v2 = (*buf1) - 1;
689 LP2_CHECK(buf1,rle_v3,lp2)
691 if(lp2 == 0 && flag1 != 0) {
692 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
696 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
697 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
699 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
724 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
726 lv = (lv1 & 0x7F) << 1;
729 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
731 LV1_CHECK(buf1,rle_v3,lv1,lp2)
742 cur_frm_pos += (((width * 2) - blks_width) * 4);
746 for( ; blks_height > 0; blks_height -= 8) {
747 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
748 for(lp2 = 0; lp2 < 4; ) {
750 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
751 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
753 switch(correction_type_sp[lp2 & 0x01][k]) {
755 lv1 = correctionloworder_lp[lp2 & 0x01][k];
756 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
757 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
758 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
759 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
760 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
765 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
766 lv2 = correctionloworder_lp[lp2 & 0x01][k];
767 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
768 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
769 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
770 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
776 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
777 cur_lp[j] = ref_lp[j];
778 cur_lp[j+1] = ref_lp[j+1];
786 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
787 cur_lp[j] = ref_lp[j];
788 cur_lp[j+1] = ref_lp[j+1];
796 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
797 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
798 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
799 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
801 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
805 rle_v2 = (*buf1) - 1;
809 LP2_CHECK(buf1,rle_v3,lp2)
812 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
813 cur_lp[j] = ref_lp[j];
814 cur_lp[j+1] = ref_lp[j+1];
820 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
822 lv = (lv1 & 0x7F) << 1;
825 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
826 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
827 LV1_CHECK(buf1,rle_v3,lv1,lp2)
839 cur_frm_pos += (((width * 2) - blks_width) * 4);
840 ref_frm_pos += (((width * 2) - blks_width) * 4);
845 case 11: /********** CASE 11 **********/
846 if(ref_vectors == NULL)
849 for( ; blks_height > 0; blks_height -= 8) {
850 for(lp1 = 0; lp1 < blks_width; lp1++) {
851 for(lp2 = 0; lp2 < 4; ) {
853 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
854 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
856 switch(correction_type_sp[lp2 & 0x01][k]) {
858 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
859 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
864 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
865 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
866 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
867 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
868 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
869 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
870 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
871 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
872 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
873 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
879 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
880 cur_lp[j] = ref_lp[j];
887 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
888 cur_lp[j] = ref_lp[j];
895 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
897 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
898 cur_lp[j] = ref_lp[j];
900 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
904 rle_v2 = (*buf1) - 1;
908 LP2_CHECK(buf1,rle_v3,lp2)
911 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
912 cur_lp[j] = ref_lp[j];
917 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
919 lv = (lv1 & 0x7F) << 1;
922 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
924 LV1_CHECK(buf1,rle_v3,lv1,lp2)
936 cur_frm_pos += (((width * 2) - blks_width) * 4);
937 ref_frm_pos += (((width * 2) - blks_width) * 4);
946 for( ; strip >= strip_tbl; strip--) {
947 if(strip->split_flag != 0) {
948 strip->split_flag = 0;
949 strip->usl7 = (strip-1)->usl7;
951 if(strip->split_direction) {
952 strip->xpos += strip->width;
953 strip->width = (strip-1)->width - strip->width;
954 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
955 strip->width = width - strip->xpos;
957 strip->ypos += strip->height;
958 strip->height = (strip-1)->height - strip->height;
966 static av_cold int indeo3_decode_init(AVCodecContext *avctx)
968 Indeo3DecodeContext *s = avctx->priv_data;
972 s->width = avctx->width;
973 s->height = avctx->height;
974 avctx->pix_fmt = PIX_FMT_YUV410P;
975 avcodec_get_frame_defaults(&s->frame);
977 if (!(ret = build_modpred(s)))
978 ret = iv_alloc_frames(s);
985 static int iv_decode_frame(AVCodecContext *avctx,
986 const uint8_t *buf, int buf_size)
988 Indeo3DecodeContext *s = avctx->priv_data;
989 unsigned int image_width, image_height,
990 chroma_width, chroma_height;
991 unsigned int flags, cb_offset, data_size,
992 y_offset, v_offset, u_offset, mc_vector_count;
993 const uint8_t *hdr_pos, *buf_pos;
996 buf_pos += 18; /* skip OS header (16 bytes) and version number */
998 flags = bytestream_get_le16(&buf_pos);
999 data_size = bytestream_get_le32(&buf_pos);
1000 cb_offset = *buf_pos++;
1001 buf_pos += 3; /* skip reserved byte and checksum */
1002 image_height = bytestream_get_le16(&buf_pos);
1003 image_width = bytestream_get_le16(&buf_pos);
1005 if(av_image_check_size(image_width, image_height, 0, avctx))
1007 if (image_width != avctx->width || image_height != avctx->height) {
1009 avcodec_set_dimensions(avctx, image_width, image_height);
1010 s->width = avctx->width;
1011 s->height = avctx->height;
1012 ret = iv_alloc_frames(s);
1014 s->width = s->height = 0;
1019 chroma_height = ((image_height >> 2) + 3) & 0x7ffc;
1020 chroma_width = ((image_width >> 2) + 3) & 0x7ffc;
1021 y_offset = bytestream_get_le32(&buf_pos);
1022 v_offset = bytestream_get_le32(&buf_pos);
1023 u_offset = bytestream_get_le32(&buf_pos);
1024 buf_pos += 4; /* reserved */
1026 if(data_size == 0x80) return 4;
1028 if(FFMAX3(y_offset, v_offset, u_offset) >= buf_size-16) {
1029 av_log(s->avctx, AV_LOG_ERROR, "y/u/v offset outside buffer\n");
1034 s->cur_frame = s->iv_frame + 1;
1035 s->ref_frame = s->iv_frame;
1037 s->cur_frame = s->iv_frame;
1038 s->ref_frame = s->iv_frame + 1;
1041 buf_pos = buf + 16 + y_offset;
1042 mc_vector_count = bytestream_get_le32(&buf_pos);
1043 if(2LL*mc_vector_count >= buf_size-16-y_offset) {
1044 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
1048 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width,
1049 image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
1050 FFMIN(image_width, 160));
1052 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1055 buf_pos = buf + 16 + v_offset;
1056 mc_vector_count = bytestream_get_le32(&buf_pos);
1057 if(2LL*mc_vector_count >= buf_size-16-v_offset) {
1058 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
1062 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
1063 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
1064 FFMIN(chroma_width, 40));
1066 buf_pos = buf + 16 + u_offset;
1067 mc_vector_count = bytestream_get_le32(&buf_pos);
1068 if(2LL*mc_vector_count >= buf_size-16-u_offset) {
1069 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
1073 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
1074 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
1075 FFMIN(chroma_width, 40));
1082 static int indeo3_decode_frame(AVCodecContext *avctx,
1083 void *data, int *data_size,
1086 const uint8_t *buf = avpkt->data;
1087 int buf_size = avpkt->size;
1088 Indeo3DecodeContext *s=avctx->priv_data;
1089 uint8_t *src, *dest;
1092 if (iv_decode_frame(avctx, buf, buf_size) < 0)
1095 if(s->frame.data[0])
1096 avctx->release_buffer(avctx, &s->frame);
1098 s->frame.reference = 0;
1099 if(avctx->get_buffer(avctx, &s->frame) < 0) {
1100 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1104 src = s->cur_frame->Ybuf;
1105 dest = s->frame.data[0];
1106 for (y = 0; y < s->height; y++) {
1107 memcpy(dest, src, s->cur_frame->y_w);
1108 src += s->cur_frame->y_w;
1109 dest += s->frame.linesize[0];
1112 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1114 src = s->cur_frame->Ubuf;
1115 dest = s->frame.data[1];
1116 for (y = 0; y < s->height / 4; y++) {
1117 memcpy(dest, src, s->cur_frame->uv_w);
1118 src += s->cur_frame->uv_w;
1119 dest += s->frame.linesize[1];
1122 src = s->cur_frame->Vbuf;
1123 dest = s->frame.data[2];
1124 for (y = 0; y < s->height / 4; y++) {
1125 memcpy(dest, src, s->cur_frame->uv_w);
1126 src += s->cur_frame->uv_w;
1127 dest += s->frame.linesize[2];
1131 *data_size=sizeof(AVFrame);
1132 *(AVFrame*)data= s->frame;
1137 static av_cold int indeo3_decode_end(AVCodecContext *avctx)
1139 Indeo3DecodeContext *s = avctx->priv_data;
1143 if (s->frame.data[0])
1144 avctx->release_buffer(avctx, &s->frame);
1149 AVCodec ff_indeo3_decoder = {
1151 .type = AVMEDIA_TYPE_VIDEO,
1152 .id = CODEC_ID_INDEO3,
1153 .priv_data_size = sizeof(Indeo3DecodeContext),
1154 .init = indeo3_decode_init,
1155 .close = indeo3_decode_end,
1156 .decode = indeo3_decode_frame,
1157 .capabilities = CODEC_CAP_DR1,
1158 .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),