}
#define DCT_TEMPLATE(blk, step, bias, shift, dshift, OP) \
- const int t0 = OP( 2841 * blk[1 * step] + 565 * blk[7 * step]); \
- const int t1 = OP( 565 * blk[1 * step] - 2841 * blk[7 * step]); \
- const int t2 = OP( 1609 * blk[5 * step] + 2408 * blk[3 * step]); \
- const int t3 = OP( 2408 * blk[5 * step] - 1609 * blk[3 * step]); \
- const int t4 = OP( 1108 * blk[2 * step] - 2676 * blk[6 * step]); \
- const int t5 = OP( 2676 * blk[2 * step] + 1108 * blk[6 * step]); \
+ const int t0 = OP(2841 * blk[1 * step] + 565 * blk[7 * step]); \
+ const int t1 = OP( 565 * blk[1 * step] - 2841 * blk[7 * step]); \
+ const int t2 = OP(1609 * blk[5 * step] + 2408 * blk[3 * step]); \
+ const int t3 = OP(2408 * blk[5 * step] - 1609 * blk[3 * step]); \
+ const int t4 = OP(1108 * blk[2 * step] - 2676 * blk[6 * step]); \
+ const int t5 = OP(2676 * blk[2 * step] + 1108 * blk[6 * step]); \
- const int t6 = ((blk[0 * step] + blk[4 * step]) << dshift) + bias; \
- const int t7 = ((blk[0 * step] - blk[4 * step]) << dshift) + bias; \
+ const int t6 = ((blk[0 * step] + blk[4 * step]) * (1 << dshift)) + bias; \
+ const int t7 = ((blk[0 * step] - blk[4 * step]) * (1 << dshift)) + bias; \
const int t8 = t0 + t2; \
const int t9 = t0 - t2; \
- const int tA = 181 * (t9 + (t1 - t3)) + 0x80 >> 8; \
- const int tB = 181 * (t9 - (t1 - t3)) + 0x80 >> 8; \
+ const int tA = (int)(181U * (t9 + (t1 - t3)) + 0x80) >> 8; \
+ const int tB = (int)(181U * (t9 - (t1 - t3)) + 0x80) >> 8; \
const int tC = t1 + t3; \
\
blk[0 * step] = (t6 + t5 + t8) >> shift; \
CLVContext *c = avctx->priv_data;
GetByteContext gb;
uint32_t frame_type;
- int i, j;
- int ret;
+ int i, j, ret;
+ int mb_ret = 0;
bytestream2_init(&gb, buf, buf_size);
- if (avctx->codec_tag == MKTAG('C','L','V','1')) {
+ if (avctx->codec_tag == MKTAG('C', 'L', 'V', '1')) {
int skip = bytestream2_get_byte(&gb);
bytestream2_skip(&gb, (skip + 1) * 8);
}
frame_type = bytestream2_get_byte(&gb);
- if ((ret = ff_reget_buffer(avctx, c->pic)) < 0)
- return ret;
-
- c->pic->key_frame = frame_type & 0x20 ? 1 : 0;
- c->pic->pict_type = frame_type & 0x20 ? AV_PICTURE_TYPE_I
- : AV_PICTURE_TYPE_P;
if (frame_type & 0x2) {
- c->pic->pict_type = frame_type & 0x20 ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ if (buf_size < c->mb_width * c->mb_height) {
+ av_log(avctx, AV_LOG_ERROR, "Packet too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((ret = ff_reget_buffer(avctx, c->pic)) < 0)
+ return ret;
+
+ c->pic->key_frame = frame_type & 0x20 ? 1 : 0;
++ c->pic->pict_type = frame_type & 0x20 ? AV_PICTURE_TYPE_I
++ : AV_PICTURE_TYPE_P;
+
bytestream2_get_be32(&gb); // frame size;
c->ac_quant = bytestream2_get_byte(&gb);
c->luma_dc_quant = 32;
for (j = 0; j < c->mb_height; j++) {
for (i = 0; i < c->mb_width; i++) {
- ret |= decode_mb(c, i, j);
+ ret = decode_mb(c, i, j);
+ if (ret < 0)
+ mb_ret = ret;
}
}
+
+ if ((ret = av_frame_ref(data, c->pic)) < 0)
+ return ret;
+
+ *got_frame = 1;
} else {
+ if (!c->iframes_warning)
+ avpriv_report_missing_feature(avctx, "Non-I-frames in Clearvideo");
+ c->iframes_warning = 1;
+ return AVERROR_PATCHWELCOME;
}
- if ((ret = av_frame_ref(data, c->pic)) < 0)
- return ret;
-
- *got_frame = 1;
-
- return ret < 0 ? ret : buf_size;
+ return mb_ret < 0 ? mb_ret : buf_size;
}
static av_cold int clv_decode_init(AVCodecContext *avctx)