2 * Lagarith lossless decoder
3 * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Lagarith lossless decoder
25 * @author Nathan Caldwell
33 #include "lagarithrac.h"
34 #include "lossless_videodsp.h"
37 enum LagarithFrameType {
38 FRAME_RAW = 1, /**< uncompressed */
39 FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
40 FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
41 FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
42 FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
43 FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
44 FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
45 FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
46 FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
47 FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
48 FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
51 typedef struct LagarithContext {
52 AVCodecContext *avctx;
53 LLVidDSPContext llviddsp;
54 int zeros; /**< number of consecutive zero bytes encountered */
55 int zeros_rem; /**< number of zero bytes remaining to output */
57 int rgb_planes_allocated;
62 * Compute the 52-bit mantissa of 1/(double)denom.
63 * This crazy format uses floats in an entropy coder and we have to match x86
64 * rounding exactly, thus ordinary floats aren't portable enough.
65 * @param denom denominator
66 * @return 52-bit mantissa
69 static uint64_t softfloat_reciprocal(uint32_t denom)
71 int shift = av_log2(denom - 1) + 1;
72 uint64_t ret = (1ULL << 52) / denom;
73 uint64_t err = (1ULL << 52) - ret * denom;
77 return ret + err / denom;
81 * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
82 * Used in combination with softfloat_reciprocal computes x/(double)denom.
83 * @param x 32-bit integer factor
84 * @param mantissa mantissa of f with exponent 0
85 * @return 32-bit integer value (x*f)
86 * @see softfloat_reciprocal
88 static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
90 uint64_t l = x * (mantissa & 0xffffffff);
91 uint64_t h = x * (mantissa >> 32);
94 l += 1LL << av_log2(h >> 21);
99 static uint8_t lag_calc_zero_run(int8_t x)
101 return (x * 2) ^ (x >> 7);
104 static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
106 static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
113 for (i = 0; i < 7; i++) {
122 if (bits < 0 || bits > 31) {
125 } else if (bits == 0) {
130 val = get_bits_long(gb, bits);
138 static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
140 int i, j, scale_factor;
141 unsigned prob, cumulative_target;
142 unsigned cumul_prob = 0;
143 unsigned scaled_cumul_prob = 0;
147 rac->prob[257] = UINT_MAX;
148 /* Read probabilities from bitstream */
149 for (i = 1; i < 257; i++) {
150 if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
151 av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
154 if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
155 av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
158 cumul_prob += rac->prob[i];
160 if (lag_decode_prob(gb, &prob)) {
161 av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
166 for (j = 0; j < prob; j++)
174 av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
178 if (nnz == 1 && (show_bits_long(gb, 32) & 0xFFFFFF)) {
179 return AVERROR_INVALIDDATA;
182 /* Scale probabilities so cumulative probability is an even power of 2. */
183 scale_factor = av_log2(cumul_prob);
185 if (cumul_prob & (cumul_prob - 1)) {
186 uint64_t mul = softfloat_reciprocal(cumul_prob);
187 for (i = 1; i <= 128; i++) {
188 rac->prob[i] = softfloat_mul(rac->prob[i], mul);
189 scaled_cumul_prob += rac->prob[i];
191 if (scaled_cumul_prob <= 0) {
192 av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
193 return AVERROR_INVALIDDATA;
195 for (; i < 257; i++) {
196 rac->prob[i] = softfloat_mul(rac->prob[i], mul);
197 scaled_cumul_prob += rac->prob[i];
201 if (scale_factor >= 32U)
202 return AVERROR_INVALIDDATA;
203 cumulative_target = 1U << scale_factor;
205 if (scaled_cumul_prob > cumulative_target) {
206 av_log(rac->avctx, AV_LOG_ERROR,
207 "Scaled probabilities are larger than target!\n");
211 scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
213 for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
218 /* Comment from reference source:
219 * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
220 * // since the compression change is negligible and fixing it
221 * // breaks backwards compatibility
222 * b =- (signed int)b;
232 rac->scale = scale_factor;
234 /* Fill probability array with cumulative probability for each symbol. */
235 for (i = 1; i < 257; i++)
236 rac->prob[i] += rac->prob[i - 1];
241 static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
242 uint8_t *diff, int w, int *left,
245 /* This is almost identical to add_hfyu_median_pred in huffyuvdsp.h.
246 * However the &0xFF on the gradient predictor yields incorrect output
255 for (i = 0; i < w; i++) {
256 l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
265 static void lag_pred_line(LagarithContext *l, uint8_t *buf,
266 int width, int stride, int line)
271 /* Left prediction only for first line */
272 L = l->llviddsp.add_left_pred(buf, buf, width, 0);
274 /* Left pixel is actually prev_row[width] */
275 L = buf[width - stride - 1];
278 /* Second line, left predict first pixel, the rest of the line is median predicted
279 * NOTE: In the case of RGB this pixel is top predicted */
280 TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
282 /* Top left is 2 rows back, last pixel */
283 TL = buf[width - (2 * stride) - 1];
286 add_lag_median_prediction(buf, buf - stride, buf,
291 static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
292 int width, int stride, int line,
301 l->llviddsp.add_left_pred(buf, buf, width, 0);
307 const int HEAD = is_luma ? 4 : 2;
310 L = buf[width - stride - 1];
311 TL = buf[HEAD - stride - 1];
312 for (i = 0; i < HEAD; i++) {
316 for (; i < width; i++) {
317 L = mid_pred(L & 0xFF, buf[i - stride], (L + buf[i - stride] - TL) & 0xFF) + buf[i];
318 TL = buf[i - stride];
322 TL = buf[width - (2 * stride) - 1];
323 L = buf[width - stride - 1];
324 l->llviddsp.add_median_pred(buf, buf - stride, buf, width, &L, &TL);
328 static int lag_decode_line(LagarithContext *l, lag_rac *rac,
329 uint8_t *dst, int width, int stride,
338 /* Output any zeros remaining from the previous run */
341 int count = FFMIN(l->zeros_rem, width - i);
342 memset(dst + i, 0, count);
344 l->zeros_rem -= count;
348 dst[i] = lag_get_rac(rac);
357 if (l->zeros == esc_count) {
358 int index = lag_get_rac(rac);
363 l->zeros_rem = lag_calc_zero_run(index);
370 static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
371 const uint8_t *src, const uint8_t *src_end,
372 int width, int esc_count)
376 uint8_t zero_run = 0;
377 const uint8_t *src_start = src;
378 uint8_t mask1 = -(esc_count < 2);
379 uint8_t mask2 = -(esc_count < 3);
380 uint8_t *end = dst + (width - 2);
382 avpriv_request_sample(l->avctx, "zero_run_line");
384 memset(dst, 0, width);
388 count = FFMIN(l->zeros_rem, width - i);
389 if (end - dst < count) {
390 av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
391 return AVERROR_INVALIDDATA;
394 memset(dst, 0, count);
395 l->zeros_rem -= count;
401 while (!zero_run && dst + i < end) {
403 if (i+2 >= src_end - src)
404 return AVERROR_INVALIDDATA;
406 !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
413 l->zeros_rem = lag_calc_zero_run(src[i]);
423 return src - src_start;
428 static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
429 int width, int height, int stride,
430 const uint8_t *src, int src_size)
439 const uint8_t *src_end = src + src_size;
442 rac.avctx = l->avctx;
446 return AVERROR_INVALIDDATA;
450 length = width * height;
452 return AVERROR_INVALIDDATA;
453 if (esc_count && AV_RL32(src + 1) < length) {
454 length = AV_RL32(src + 1);
458 if ((ret = init_get_bits8(&gb, src + offset, src_size - offset)) < 0)
461 if (lag_read_prob_header(&rac, &gb) < 0)
464 ff_lag_rac_init(&rac, &gb, length - stride);
465 for (i = 0; i < height; i++) {
466 if (rac.overread > MAX_OVERREAD)
467 return AVERROR_INVALIDDATA;
468 read += lag_decode_line(l, &rac, dst + (i * stride), width,
473 av_log(l->avctx, AV_LOG_WARNING,
474 "Output more bytes than length (%d of %"PRIu32")\n", read,
476 } else if (esc_count < 8) {
481 /* Zero run coding only, no range coding. */
482 for (i = 0; i < height; i++) {
483 int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
484 src_end, width, esc_count);
490 if (src_size < width * height)
491 return AVERROR_INVALIDDATA; // buffer not big enough
492 /* Plane is stored uncompressed */
493 for (i = 0; i < height; i++) {
494 memcpy(dst + (i * stride), src, width);
498 } else if (esc_count == 0xff) {
499 /* Plane is a solid run of given value */
500 for (i = 0; i < height; i++)
501 memset(dst + i * stride, src[1], width);
502 /* Do not apply prediction.
503 Note: memset to 0 above, setting first value to src[1]
504 and applying prediction gives the same result. */
507 av_log(l->avctx, AV_LOG_ERROR,
508 "Invalid zero run escape code! (%#x)\n", esc_count);
512 if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
513 for (i = 0; i < height; i++) {
514 lag_pred_line(l, dst, width, stride, i);
518 for (i = 0; i < height; i++) {
519 lag_pred_line_yuy2(l, dst, width, stride, i,
520 width == l->avctx->width);
530 * @param avctx codec context
531 * @param data output AVFrame
532 * @param data_size size of output data or 0 if no picture is returned
533 * @param avpkt input packet
534 * @return number of consumed bytes on success or negative if decode fails
536 static int lag_decode_frame(AVCodecContext *avctx,
537 void *data, int *got_frame, AVPacket *avpkt)
539 const uint8_t *buf = avpkt->data;
540 unsigned int buf_size = avpkt->size;
541 LagarithContext *l = avctx->priv_data;
542 ThreadFrame frame = { .f = data };
543 AVFrame *const p = data;
545 uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
547 uint8_t *srcs[4], *dst;
548 int i, j, planes = 3;
555 offset_gu = AV_RL32(buf + 1);
556 offset_bv = AV_RL32(buf + 5);
559 case FRAME_SOLID_RGBA:
560 avctx->pix_fmt = AV_PIX_FMT_RGB32;
561 case FRAME_SOLID_GRAY:
562 if (frametype == FRAME_SOLID_GRAY)
563 if (avctx->bits_per_coded_sample == 24) {
564 avctx->pix_fmt = AV_PIX_FMT_RGB24;
566 avctx->pix_fmt = AV_PIX_FMT_0RGB32;
570 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
574 if (frametype == FRAME_SOLID_RGBA) {
575 for (j = 0; j < avctx->height; j++) {
576 for (i = 0; i < avctx->width; i++)
577 AV_WN32(dst + i * 4, offset_gu);
578 dst += p->linesize[0];
581 for (j = 0; j < avctx->height; j++) {
582 memset(dst, buf[1], avctx->width * planes);
583 dst += p->linesize[0];
587 case FRAME_SOLID_COLOR:
588 if (avctx->bits_per_coded_sample == 24) {
589 avctx->pix_fmt = AV_PIX_FMT_RGB24;
591 avctx->pix_fmt = AV_PIX_FMT_RGB32;
592 offset_gu |= 0xFFU << 24;
595 if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
599 for (j = 0; j < avctx->height; j++) {
600 for (i = 0; i < avctx->width; i++)
601 if (avctx->bits_per_coded_sample == 24) {
602 AV_WB24(dst + i * 3, offset_gu);
604 AV_WN32(dst + i * 4, offset_gu);
606 dst += p->linesize[0];
609 case FRAME_ARITH_RGBA:
610 avctx->pix_fmt = AV_PIX_FMT_RGB32;
613 offs[3] = AV_RL32(buf + 9);
614 case FRAME_ARITH_RGB24:
616 if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
617 avctx->pix_fmt = AV_PIX_FMT_RGB24;
619 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
626 l->rgb_stride = FFALIGN(avctx->width, 16);
627 av_fast_malloc(&l->rgb_planes, &l->rgb_planes_allocated,
628 l->rgb_stride * avctx->height * planes + 1);
629 if (!l->rgb_planes) {
630 av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
631 return AVERROR(ENOMEM);
633 for (i = 0; i < planes; i++)
634 srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
635 for (i = 0; i < planes; i++)
636 if (buf_size <= offs[i]) {
637 av_log(avctx, AV_LOG_ERROR,
638 "Invalid frame offsets\n");
639 return AVERROR_INVALIDDATA;
642 for (i = 0; i < planes; i++)
643 lag_decode_arith_plane(l, srcs[i],
644 avctx->width, avctx->height,
645 -l->rgb_stride, buf + offs[i],
648 for (i = 0; i < planes; i++)
649 srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
650 for (j = 0; j < avctx->height; j++) {
651 for (i = 0; i < avctx->width; i++) {
658 if (frametype == FRAME_ARITH_RGBA) {
660 AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
667 dst += p->linesize[0];
668 for (i = 0; i < planes; i++)
669 srcs[i] += l->rgb_stride;
672 case FRAME_ARITH_YUY2:
673 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
675 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
678 if (offset_ry >= buf_size ||
679 offset_gu >= buf_size ||
680 offset_bv >= buf_size) {
681 av_log(avctx, AV_LOG_ERROR,
682 "Invalid frame offsets\n");
683 return AVERROR_INVALIDDATA;
686 lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
687 p->linesize[0], buf + offset_ry,
688 buf_size - offset_ry);
689 lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
690 avctx->height, p->linesize[1],
691 buf + offset_gu, buf_size - offset_gu);
692 lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
693 avctx->height, p->linesize[2],
694 buf + offset_bv, buf_size - offset_bv);
696 case FRAME_ARITH_YV12:
697 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
699 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
701 if (buf_size <= offset_ry || buf_size <= offset_gu || buf_size <= offset_bv) {
702 return AVERROR_INVALIDDATA;
705 if (offset_ry >= buf_size ||
706 offset_gu >= buf_size ||
707 offset_bv >= buf_size) {
708 av_log(avctx, AV_LOG_ERROR,
709 "Invalid frame offsets\n");
710 return AVERROR_INVALIDDATA;
713 lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
714 p->linesize[0], buf + offset_ry,
715 buf_size - offset_ry);
716 lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
717 (avctx->height + 1) / 2, p->linesize[2],
718 buf + offset_gu, buf_size - offset_gu);
719 lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
720 (avctx->height + 1) / 2, p->linesize[1],
721 buf + offset_bv, buf_size - offset_bv);
724 av_log(avctx, AV_LOG_ERROR,
725 "Unsupported Lagarith frame type: %#"PRIx8"\n", frametype);
726 return AVERROR_PATCHWELCOME;
734 static av_cold int lag_decode_init(AVCodecContext *avctx)
736 LagarithContext *l = avctx->priv_data;
739 ff_llviddsp_init(&l->llviddsp);
745 static av_cold int lag_decode_init_thread_copy(AVCodecContext *avctx)
747 LagarithContext *l = avctx->priv_data;
754 static av_cold int lag_decode_end(AVCodecContext *avctx)
756 LagarithContext *l = avctx->priv_data;
758 av_freep(&l->rgb_planes);
763 AVCodec ff_lagarith_decoder = {
765 .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
766 .type = AVMEDIA_TYPE_VIDEO,
767 .id = AV_CODEC_ID_LAGARITH,
768 .priv_data_size = sizeof(LagarithContext),
769 .init = lag_decode_init,
770 .init_thread_copy = ONLY_IF_THREADS_ENABLED(lag_decode_init_thread_copy),
771 .close = lag_decode_end,
772 .decode = lag_decode_frame,
773 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,