2 * LucasArts Smush video decoder
3 * Copyright (c) 2006 Cyril Zorin
4 * Copyright (c) 2011 Konstantin Shishkov
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "bytestream.h"
27 #include "libavutil/bswap.h"
28 #include "sanm_data.h"
33 AVCodecContext *avctx;
36 int version, subversion;
38 int16_t delta_pal[768];
42 int aligned_width, aligned_height;
45 AVFrame frame, *output;
46 uint16_t *frm0, *frm1, *frm2;
47 uint8_t *stored_frame;
48 uint32_t frm0_size, frm1_size, frm2_size;
49 uint32_t stored_frame_size;
52 unsigned int rle_buf_size;
56 long npixels, buf_size;
58 uint16_t codebook[256];
59 uint16_t small_codebook[4];
61 int8_t p4x4glyphs[NGLYPHS][16];
62 int8_t p8x8glyphs[NGLYPHS][64];
66 int seq_num, codec, rotate_code, rle_output_size;
69 uint32_t width, height;
89 * Return enum GlyphEdge of box where point (x, y) lies.
91 * @param x x point coordinate
92 * @param y y point coordinate
93 * @param edge_size box width/height.
95 static enum GlyphEdge which_edge(int x, int y, int edge_size)
97 const int edge_max = edge_size - 1;
101 } else if (y == edge_max) {
105 } else if (x == edge_max) {
112 static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
114 if ((edge0 == LEFT_EDGE && edge1 == RIGHT_EDGE) ||
115 (edge1 == LEFT_EDGE && edge0 == RIGHT_EDGE) ||
116 (edge0 == BOTTOM_EDGE && edge1 != TOP_EDGE) ||
117 (edge1 == BOTTOM_EDGE && edge0 != TOP_EDGE)) {
119 } else if ((edge0 == TOP_EDGE && edge1 != BOTTOM_EDGE) ||
120 (edge1 == TOP_EDGE && edge0 != BOTTOM_EDGE)) {
122 } else if ((edge0 == LEFT_EDGE && edge1 != RIGHT_EDGE) ||
123 (edge1 == LEFT_EDGE && edge0 != RIGHT_EDGE)) {
125 } else if ((edge0 == TOP_EDGE && edge1 == BOTTOM_EDGE) ||
126 (edge1 == TOP_EDGE && edge0 == BOTTOM_EDGE) ||
127 (edge0 == RIGHT_EDGE && edge1 != LEFT_EDGE) ||
128 (edge1 == RIGHT_EDGE && edge0 != LEFT_EDGE)) {
136 * Interpolate two points.
138 static void interp_point(int8_t *points, int x0, int y0, int x1, int y1,
139 int pos, int npoints)
142 points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
143 points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
151 * Construct glyphs by iterating through vectors coordinates.
153 * @param pglyphs pointer to table where glyphs are stored
154 * @param xvec pointer to x component of vectors coordinates
155 * @param yvec pointer to y component of vectors coordinates
156 * @param side_length glyph width/height.
158 static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec,
159 const int side_length)
161 const int glyph_size = side_length * side_length;
162 int8_t *pglyph = pglyphs;
165 for (i = 0; i < GLYPH_COORD_VECT_SIZE; i++) {
168 enum GlyphEdge edge0 = which_edge(x0, y0, side_length);
170 for (j = 0; j < GLYPH_COORD_VECT_SIZE; j++, pglyph += glyph_size) {
173 enum GlyphEdge edge1 = which_edge(x1, y1, side_length);
174 enum GlyphDir dir = which_direction(edge0, edge1);
175 int npoints = FFMAX(FFABS(x1 - x0), FFABS(y1 - y0));
178 for (ipoint = 0; ipoint <= npoints; ipoint++) {
182 interp_point(point, x0, y0, x1, y1, ipoint, npoints);
186 for (irow = point[1]; irow >= 0; irow--)
187 pglyph[point[0] + irow * side_length] = 1;
191 for (irow = point[1]; irow < side_length; irow++)
192 pglyph[point[0] + irow * side_length] = 1;
196 for (icol = point[0]; icol >= 0; icol--)
197 pglyph[icol + point[1] * side_length] = 1;
201 for (icol = point[0]; icol < side_length; icol++)
202 pglyph[icol + point[1] * side_length] = 1;
210 static void init_sizes(SANMVideoContext *ctx, int width, int height)
213 ctx->height = height;
214 ctx->npixels = width * height;
216 ctx->aligned_width = FFALIGN(width, 8);
217 ctx->aligned_height = FFALIGN(height, 8);
219 ctx->buf_size = ctx->aligned_width * ctx->aligned_height * sizeof(ctx->frm0[0]);
223 static void destroy_buffers(SANMVideoContext *ctx)
225 av_freep(&ctx->frm0);
226 av_freep(&ctx->frm1);
227 av_freep(&ctx->frm2);
228 av_freep(&ctx->stored_frame);
229 av_freep(&ctx->rle_buf);
232 static av_cold int init_buffers(SANMVideoContext *ctx)
234 av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size);
235 av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size);
236 av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size);
238 av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size);
240 if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) {
241 destroy_buffers(ctx);
242 return AVERROR(ENOMEM);
248 static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
250 av_dlog(ctx->avctx, "rotate %d\n", rotate_code);
251 if (rotate_code == 2)
252 FFSWAP(uint16_t*, ctx->frm1, ctx->frm2);
253 FFSWAP(uint16_t*, ctx->frm2, ctx->frm0);
256 static av_cold int decode_init(AVCodecContext *avctx)
258 SANMVideoContext *ctx = avctx->priv_data;
261 ctx->version = !avctx->extradata_size;
263 avctx->pix_fmt = ctx->version ? PIX_FMT_RGB565 : PIX_FMT_PAL8;
265 init_sizes(ctx, avctx->width, avctx->height);
266 if (init_buffers(ctx)) {
267 av_log(avctx, AV_LOG_ERROR, "error allocating buffers\n");
268 return AVERROR(ENOMEM);
270 ctx->output = &ctx->frame;
271 ctx->output->data[0] = 0;
273 make_glyphs(ctx->p4x4glyphs[0], glyph4_x, glyph4_y, 4);
274 make_glyphs(ctx->p8x8glyphs[0], glyph8_x, glyph8_y, 8);
279 if (avctx->extradata_size < 1026) {
280 av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
281 return AVERROR_INVALIDDATA;
284 ctx->subversion = AV_RL16(avctx->extradata);
285 for (i = 0; i < 256; i++)
286 ctx->pal[i] = AV_RL32(avctx->extradata + 2 + i * 4);
292 static av_cold int decode_end(AVCodecContext *avctx)
294 SANMVideoContext *ctx = avctx->priv_data;
296 destroy_buffers(ctx);
298 if (ctx->frame.data[0]) {
299 avctx->release_buffer(avctx, &ctx->frame);
300 ctx->frame.data[0] = 0;
306 static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
308 int opcode, color, run_len, left = out_size;
311 opcode = bytestream2_get_byte(&ctx->gb);
312 run_len = (opcode >> 1) + 1;
313 if (run_len > left || bytestream2_get_bytes_left(&ctx->gb) <= 0)
314 return AVERROR_INVALIDDATA;
317 color = bytestream2_get_byte(&ctx->gb);
318 memset(dst, color, run_len);
320 if (bytestream2_get_bytes_left(&ctx->gb) < run_len)
321 return AVERROR_INVALIDDATA;
322 bytestream2_get_bufferu(&ctx->gb, dst, run_len);
332 static int old_codec1(SANMVideoContext *ctx, int top,
333 int left, int width, int height)
335 uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * ctx->pitch;
336 int i, j, len, flag, code, val, pos, end;
338 for (i = 0; i < height; i++) {
341 if (bytestream2_get_bytes_left(&ctx->gb) < 2)
342 return AVERROR_INVALIDDATA;
344 len = bytestream2_get_le16u(&ctx->gb);
345 end = bytestream2_tell(&ctx->gb) + len;
347 while (bytestream2_tell(&ctx->gb) < end) {
348 if (bytestream2_get_bytes_left(&ctx->gb) < 2)
349 return AVERROR_INVALIDDATA;
351 code = bytestream2_get_byteu(&ctx->gb);
353 code = (code >> 1) + 1;
354 if (pos + code > width)
355 return AVERROR_INVALIDDATA;
357 val = bytestream2_get_byteu(&ctx->gb);
359 memset(dst + pos, val, code);
362 if (bytestream2_get_bytes_left(&ctx->gb) < code)
363 return AVERROR_INVALIDDATA;
364 for (j = 0; j < code; j++) {
365 val = bytestream2_get_byteu(&ctx->gb);
374 ctx->rotate_code = 0;
379 static inline void codec37_mv(uint8_t *dst, const uint8_t *src,
380 int height, int stride, int x, int y)
384 pos = x + y * stride;
385 for (j = 0; j < 4; j++) {
386 for (i = 0; i < 4; i++) {
387 if ((pos + i) < 0 || (pos + i) >= height * stride)
398 static int old_codec37(SANMVideoContext *ctx, int top,
399 int left, int width, int height)
401 int stride = ctx->pitch;
404 int compr, mvoff, seq, flags;
405 uint32_t decoded_size;
408 compr = bytestream2_get_byte(&ctx->gb);
409 mvoff = bytestream2_get_byte(&ctx->gb);
410 seq = bytestream2_get_le16(&ctx->gb);
411 decoded_size = bytestream2_get_le32(&ctx->gb);
412 bytestream2_skip(&ctx->gb, 4);
413 flags = bytestream2_get_byte(&ctx->gb);
414 bytestream2_skip(&ctx->gb, 3);
416 ctx->rotate_code = 0;
418 if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
421 dst = ((uint8_t*)ctx->frm0) + left + top * stride;
422 prev = ((uint8_t*)ctx->frm2) + left + top * stride;
425 av_log(ctx->avctx, AV_LOG_ERROR, "invalid motion base value %d\n", mvoff);
426 return AVERROR_INVALIDDATA;
428 av_dlog(ctx->avctx, "compression %d\n", compr);
431 for (i = 0; i < height; i++) {
432 bytestream2_get_buffer(&ctx->gb, dst, width);
435 memset(ctx->frm1, 0, ctx->height * stride);
436 memset(ctx->frm2, 0, ctx->height * stride);
439 if (rle_decode(ctx, dst, decoded_size))
440 return AVERROR_INVALIDDATA;
441 memset(ctx->frm1, 0, ctx->frm1_size);
442 memset(ctx->frm2, 0, ctx->frm2_size);
447 for (j = 0; j < height; j += 4) {
448 for (i = 0; i < width; i += 4) {
452 for (k = 0; k < 4; k++)
453 memcpy(dst + i + k * stride, prev + i + k * stride, 4);
456 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
457 return AVERROR_INVALIDDATA;
458 code = bytestream2_get_byteu(&ctx->gb);
461 if (bytestream2_get_bytes_left(&ctx->gb) < 16)
462 return AVERROR_INVALIDDATA;
463 for (k = 0; k < 4; k++)
464 bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
467 if (bytestream2_get_bytes_left(&ctx->gb) < 4)
468 return AVERROR_INVALIDDATA;
469 for (k = 0; k < 4; k++)
470 memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->gb), 4);
473 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
474 return AVERROR_INVALIDDATA;
475 t = bytestream2_get_byteu(&ctx->gb);
476 for (k = 0; k < 4; k++)
477 memset(dst + i + k * stride, t, 4);
480 if (compr == 4 && !code) {
481 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
482 return AVERROR_INVALIDDATA;
483 skip_run = bytestream2_get_byteu(&ctx->gb);
484 for (k = 0; k < 4; k++)
485 memcpy(dst + i + k * stride, prev + i + k * stride, 4);
489 mx = c37_mv[(mvoff * 255 + code) * 2 ];
490 my = c37_mv[(mvoff * 255 + code) * 2 + 1];
491 codec37_mv(dst + i, prev + i + mx + my * stride,
492 ctx->height, stride, i + mx, j + my);
500 for (j = 0; j < height; j += 4) {
501 for (i = 0; i < width; i += 4) {
505 for (k = 0; k < 4; k++)
506 memcpy(dst + i + k * stride, prev + i + k * stride, 4);
509 code = bytestream2_get_byte(&ctx->gb);
511 if (bytestream2_get_bytes_left(&ctx->gb) < 16)
512 return AVERROR_INVALIDDATA;
513 for (k = 0; k < 4; k++)
514 bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
515 } else if (compr == 4 && !code) {
516 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
517 return AVERROR_INVALIDDATA;
518 skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
523 mx = c37_mv[(mvoff * 255 + code) * 2];
524 my = c37_mv[(mvoff * 255 + code) * 2 + 1];
525 codec37_mv(dst + i, prev + i + mx + my * stride,
526 ctx->height, stride, i + mx, j + my);
535 av_log(ctx->avctx, AV_LOG_ERROR,
536 "subcodec 37 compression %d not implemented\n", compr);
537 return AVERROR_PATCHWELCOME;
543 static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1,
544 uint8_t *prev2, int stride, int tbl, int size)
550 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
551 return AVERROR_INVALIDDATA;
553 code = bytestream2_get_byteu(&ctx->gb);
558 if (bytestream2_get_bytes_left(&ctx->gb) < 4)
559 return AVERROR_INVALIDDATA;
560 dst[0] = bytestream2_get_byteu(&ctx->gb);
561 dst[1] = bytestream2_get_byteu(&ctx->gb);
562 dst[0+stride] = bytestream2_get_byteu(&ctx->gb);
563 dst[1+stride] = bytestream2_get_byteu(&ctx->gb);
566 if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
567 return AVERROR_INVALIDDATA;
568 if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
570 return AVERROR_INVALIDDATA;
571 dst += size * stride;
572 prev1 += size * stride;
573 prev2 += size * stride;
574 if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
575 return AVERROR_INVALIDDATA;
576 if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
578 return AVERROR_INVALIDDATA;
582 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
583 return AVERROR_INVALIDDATA;
585 t = bytestream2_get_byteu(&ctx->gb);
586 for (k = 0; k < size; k++)
587 memset(dst + k * stride, t, size);
590 if (bytestream2_get_bytes_left(&ctx->gb) < 3)
591 return AVERROR_INVALIDDATA;
593 code = bytestream2_get_byteu(&ctx->gb);
594 pglyph = (size == 8) ? ctx->p8x8glyphs[code] : ctx->p4x4glyphs[code];
595 bytestream2_get_bufferu(&ctx->gb, colors, 2);
597 for (k = 0; k < size; k++)
598 for (t = 0; t < size; t++)
599 dst[t + k * stride] = colors[!*pglyph++];
602 for (k = 0; k < size; k++)
603 memcpy(dst + k * stride, prev1 + k * stride, size);
606 k = bytestream2_tell(&ctx->gb);
607 bytestream2_seek(&ctx->gb, tbl + (code & 7), SEEK_SET);
608 t = bytestream2_get_byte(&ctx->gb);
609 bytestream2_seek(&ctx->gb, k, SEEK_SET);
610 for (k = 0; k < size; k++)
611 memset(dst + k * stride, t, size);
614 int mx = motion_vectors[code][0];
615 int my = motion_vectors[code][1];
616 for (k = 0; k < size; k++)
617 memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
623 static int old_codec47(SANMVideoContext *ctx, int top,
624 int left, int width, int height)
626 int i, j, seq, compr, new_rot, tbl_pos, skip;
627 int stride = ctx->pitch;
628 uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * stride;
629 uint8_t *prev1 = (uint8_t*)ctx->frm1;
630 uint8_t *prev2 = (uint8_t*)ctx->frm2;
631 uint32_t decoded_size;
633 tbl_pos = bytestream2_tell(&ctx->gb);
634 seq = bytestream2_get_le16(&ctx->gb);
635 compr = bytestream2_get_byte(&ctx->gb);
636 new_rot = bytestream2_get_byte(&ctx->gb);
637 skip = bytestream2_get_byte(&ctx->gb);
638 bytestream2_skip(&ctx->gb, 9);
639 decoded_size = bytestream2_get_le32(&ctx->gb);
640 bytestream2_skip(&ctx->gb, 8);
643 bytestream2_skip(&ctx->gb, 0x8080);
646 memset(prev1, 0, ctx->height * stride);
647 memset(prev2, 0, ctx->height * stride);
649 av_dlog(ctx->avctx, "compression %d\n", compr);
652 if (bytestream2_get_bytes_left(&ctx->gb) < width * height)
653 return AVERROR_INVALIDDATA;
654 for (j = 0; j < height; j++) {
655 for (i = 0; i < width; i++)
656 bytestream2_get_bufferu(&ctx->gb, dst, width);
661 if (bytestream2_get_bytes_left(&ctx->gb) < ((width + 1) >> 1) * ((height + 1) >> 1))
662 return AVERROR_INVALIDDATA;
663 for (j = 0; j < height; j += 2) {
664 for (i = 0; i < width; i += 2) {
665 dst[i] = dst[i + 1] =
666 dst[stride + i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->gb);
672 if (seq == ctx->prev_seq + 1) {
673 for (j = 0; j < height; j += 8) {
674 for (i = 0; i < width; i += 8) {
675 if (process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
677 return AVERROR_INVALIDDATA;
686 memcpy(ctx->frm0, ctx->frm2, ctx->pitch * ctx->height);
689 memcpy(ctx->frm0, ctx->frm1, ctx->pitch * ctx->height);
692 if (rle_decode(ctx, dst, decoded_size))
693 return AVERROR_INVALIDDATA;
696 av_log(ctx->avctx, AV_LOG_ERROR,
697 "subcodec 47 compression %d not implemented\n", compr);
698 return AVERROR_PATCHWELCOME;
700 if (seq == ctx->prev_seq + 1)
701 ctx->rotate_code = new_rot;
703 ctx->rotate_code = 0;
709 static int process_frame_obj(SANMVideoContext *ctx)
711 uint16_t codec, top, left, w, h;
713 codec = bytestream2_get_le16u(&ctx->gb);
714 left = bytestream2_get_le16u(&ctx->gb);
715 top = bytestream2_get_le16u(&ctx->gb);
716 w = bytestream2_get_le16u(&ctx->gb);
717 h = bytestream2_get_le16u(&ctx->gb);
719 if (ctx->width < left + w || ctx->height < top + h) {
720 ctx->avctx->width = FFMAX(left + w, ctx->width);
721 ctx->avctx->height = FFMAX(top + h, ctx->height);
722 init_sizes(ctx, left + w, top + h);
723 if (init_buffers(ctx)) {
724 av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
725 return AVERROR(ENOMEM);
728 bytestream2_skip(&ctx->gb, 4);
730 av_dlog(ctx->avctx, "subcodec %d\n", codec);
734 return old_codec1(ctx, top, left, w, h);
737 return old_codec37(ctx, top, left, w, h);
740 return old_codec47(ctx, top, left, w, h);
743 av_log_ask_for_sample(ctx->avctx, "unknown subcodec %d\n", codec);
744 return AVERROR_PATCHWELCOME;
748 static int decode_0(SANMVideoContext *ctx)
750 uint16_t *frm = ctx->frm0;
753 if (bytestream2_get_bytes_left(&ctx->gb) < ctx->width * ctx->height * 2) {
754 av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for raw frame\n");
755 return AVERROR_INVALIDDATA;
757 for (y = 0; y < ctx->height; y++) {
758 for (x = 0; x < ctx->width; x++)
759 frm[x] = bytestream2_get_le16u(&ctx->gb);
765 static int decode_nop(SANMVideoContext *ctx)
767 av_log_ask_for_sample(ctx->avctx, "unknown/unsupported compression type\n");
768 return AVERROR_PATCHWELCOME;
771 static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
775 for (y = 0; y != block_size; y++, pdest += pitch, psrc += pitch)
776 memcpy(pdest, psrc, block_size * sizeof(pdest[0]));
779 static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
784 for (y = 0; y != block_size; y++, pdest += pitch)
785 for (x = 0; x != block_size; x++)
789 static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color,
790 uint16_t bg_color, int block_size, int pitch)
793 uint16_t colors[2] = { fg_color, bg_color };
796 if (index > NGLYPHS) {
797 av_log(ctx->avctx, AV_LOG_ERROR, "ignoring nonexistent glyph #%u\n", index);
798 return AVERROR_INVALIDDATA;
801 pglyph = block_size == 8 ? ctx->p8x8glyphs[index] : ctx->p4x4glyphs[index];
804 for (y = 0; y < block_size; y++, dst += pitch)
805 for (x = 0; x < block_size; x++)
806 *dst++ = colors[*pglyph++];
810 static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
812 uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
814 if (block_size == 2) {
817 if (bytestream2_get_bytes_left(&ctx->gb) < 4)
818 return AVERROR_INVALIDDATA;
820 indices = bytestream2_get_le32u(&ctx->gb);
821 dst[0] = ctx->codebook[indices & 0xFF]; indices >>= 8;
822 dst[1] = ctx->codebook[indices & 0xFF]; indices >>= 8;
823 dst[pitch] = ctx->codebook[indices & 0xFF]; indices >>= 8;
824 dst[pitch + 1] = ctx->codebook[indices & 0xFF];
826 uint16_t fgcolor, bgcolor;
829 if (bytestream2_get_bytes_left(&ctx->gb) < 3)
830 return AVERROR_INVALIDDATA;
832 glyph = bytestream2_get_byteu(&ctx->gb);
833 bgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
834 fgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
836 draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
841 static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
843 uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
845 if (block_size == 2) {
846 if (bytestream2_get_bytes_left(&ctx->gb) < 8)
847 return AVERROR_INVALIDDATA;
849 dst[0] = bytestream2_get_le16u(&ctx->gb);
850 dst[1] = bytestream2_get_le16u(&ctx->gb);
851 dst[pitch] = bytestream2_get_le16u(&ctx->gb);
852 dst[pitch + 1] = bytestream2_get_le16u(&ctx->gb);
854 uint16_t fgcolor, bgcolor;
857 if (bytestream2_get_bytes_left(&ctx->gb) < 5)
858 return AVERROR_INVALIDDATA;
860 glyph = bytestream2_get_byteu(&ctx->gb);
861 bgcolor = bytestream2_get_le16u(&ctx->gb);
862 fgcolor = bytestream2_get_le16u(&ctx->gb);
864 draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
869 static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my,
872 int start_pos = cx + mx + (cy + my) * ctx->pitch;
873 int end_pos = start_pos + (block_size - 1) * (ctx->pitch + 1);
875 int good = start_pos >= 0 && end_pos < (ctx->buf_size >> 1);
878 av_log(ctx->avctx, AV_LOG_ERROR, "ignoring invalid motion vector (%i, %i)->(%u, %u), block size = %u\n",
879 cx + mx, cy + my, cx, cy, block_size);
885 static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
887 int16_t mx, my, index;
890 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
891 return AVERROR_INVALIDDATA;
893 opcode = bytestream2_get_byteu(&ctx->gb);
895 av_dlog(ctx->avctx, "opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
898 mx = motion_vectors[opcode][0];
899 my = motion_vectors[opcode][1];
901 if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
902 copy_block(ctx->frm0 + cx + ctx->pitch * cy,
903 ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
904 blk_size, ctx->pitch);
908 if (bytestream2_get_bytes_left(&ctx->gb) < 2)
909 return AVERROR_INVALIDDATA;
910 index = bytestream2_get_le16u(&ctx->gb);
912 mx = index % ctx->width;
913 my = index / ctx->width;
915 if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
916 copy_block(ctx->frm0 + cx + ctx->pitch * cy,
917 ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
918 blk_size, ctx->pitch);
922 copy_block(ctx->frm0 + cx + ctx->pitch * cy,
923 ctx->frm1 + cx + ctx->pitch * cy,
924 blk_size, ctx->pitch);
927 opcode_0xf7(ctx, cx, cy, blk_size, ctx->pitch);
931 opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
937 fill_block(ctx->frm0 + cx + cy * ctx->pitch,
938 ctx->small_codebook[opcode - 0xf9], blk_size, ctx->pitch);
941 if (bytestream2_get_bytes_left(&ctx->gb) < 1)
942 return AVERROR_INVALIDDATA;
943 fill_block(ctx->frm0 + cx + cy * ctx->pitch,
944 ctx->codebook[bytestream2_get_byteu(&ctx->gb)], blk_size, ctx->pitch);
947 if (bytestream2_get_bytes_left(&ctx->gb) < 2)
948 return AVERROR_INVALIDDATA;
949 fill_block(ctx->frm0 + cx + cy * ctx->pitch,
950 bytestream2_get_le16u(&ctx->gb), blk_size, ctx->pitch);
954 opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
957 if (codec2subblock(ctx, cx , cy , blk_size))
958 return AVERROR_INVALIDDATA;
959 if (codec2subblock(ctx, cx + blk_size, cy , blk_size))
960 return AVERROR_INVALIDDATA;
961 if (codec2subblock(ctx, cx , cy + blk_size, blk_size))
962 return AVERROR_INVALIDDATA;
963 if (codec2subblock(ctx, cx + blk_size, cy + blk_size, blk_size))
964 return AVERROR_INVALIDDATA;
971 static int decode_2(SANMVideoContext *ctx)
975 for (cy = 0; cy != ctx->aligned_height; cy += 8) {
976 for (cx = 0; cx != ctx->aligned_width; cx += 8) {
977 if (ret = codec2subblock(ctx, cx, cy, 8))
985 static int decode_3(SANMVideoContext *ctx)
987 memcpy(ctx->frm0, ctx->frm2, ctx->frm2_size);
991 static int decode_4(SANMVideoContext *ctx)
993 memcpy(ctx->frm0, ctx->frm1, ctx->frm1_size);
997 static int decode_5(SANMVideoContext *ctx)
1001 uint8_t *dst = (uint8_t*)ctx->frm0;
1003 if (rle_decode(ctx, dst, ctx->buf_size))
1004 return AVERROR_INVALIDDATA;
1006 npixels = ctx->npixels;
1010 *frm++ = av_bswap16(*frm);
1016 static int decode_6(SANMVideoContext *ctx)
1018 int npixels = ctx->npixels;
1019 uint16_t *frm = ctx->frm0;
1021 if (bytestream2_get_bytes_left(&ctx->gb) < npixels) {
1022 av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for frame\n");
1023 return AVERROR_INVALIDDATA;
1026 *frm++ = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
1031 static int decode_8(SANMVideoContext *ctx)
1033 uint16_t *pdest = ctx->frm0;
1035 long npixels = ctx->npixels;
1037 av_fast_malloc(&ctx->rle_buf, &ctx->rle_buf_size, npixels);
1038 if (!ctx->rle_buf) {
1039 av_log(ctx->avctx, AV_LOG_ERROR, "RLE buffer allocation failed\n");
1040 return AVERROR(ENOMEM);
1042 rsrc = ctx->rle_buf;
1044 if (rle_decode(ctx, rsrc, npixels))
1045 return AVERROR_INVALIDDATA;
1048 *pdest++ = ctx->codebook[*rsrc++];
1053 typedef int (*frm_decoder)(SANMVideoContext *ctx);
1055 static const frm_decoder v1_decoders[] = {
1056 decode_0, decode_nop, decode_2, decode_3, decode_4, decode_5,
1057 decode_6, decode_nop, decode_8
1060 static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
1064 if ((ret = bytestream2_get_bytes_left(&ctx->gb)) < 560) {
1065 av_log(ctx->avctx, AV_LOG_ERROR, "too short input frame (%d bytes)\n",
1067 return AVERROR_INVALIDDATA;
1069 bytestream2_skip(&ctx->gb, 8); // skip pad
1071 hdr->width = bytestream2_get_le32u(&ctx->gb);
1072 hdr->height = bytestream2_get_le32u(&ctx->gb);
1074 if (hdr->width != ctx->width || hdr->height != ctx->height) {
1075 av_log(ctx->avctx, AV_LOG_ERROR, "variable size frames are not implemented\n");
1076 return AVERROR_PATCHWELCOME;
1079 hdr->seq_num = bytestream2_get_le16u(&ctx->gb);
1080 hdr->codec = bytestream2_get_byteu(&ctx->gb);
1081 hdr->rotate_code = bytestream2_get_byteu(&ctx->gb);
1083 bytestream2_skip(&ctx->gb, 4); // skip pad
1085 for (i = 0; i < 4; i++)
1086 ctx->small_codebook[i] = bytestream2_get_le16u(&ctx->gb);
1087 hdr->bg_color = bytestream2_get_le16u(&ctx->gb);
1089 bytestream2_skip(&ctx->gb, 2); // skip pad
1091 hdr->rle_output_size = bytestream2_get_le32u(&ctx->gb);
1092 for (i = 0; i < 256; i++)
1093 ctx->codebook[i] = bytestream2_get_le16u(&ctx->gb);
1095 bytestream2_skip(&ctx->gb, 8); // skip pad
1097 av_dlog(ctx->avctx, "subcodec %d\n", hdr->codec);
1101 static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
1107 static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
1110 const uint8_t *src = (uint8_t*) ctx->frm0;
1111 int ret, dstpitch, height = ctx->height;
1112 int srcpitch = ctx->pitch * (hdr ? sizeof(ctx->frm0[0]) : 1);
1114 if ((ret = ctx->avctx->get_buffer(ctx->avctx, ctx->output)) < 0) {
1115 av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1119 dst = ctx->output->data[0];
1120 dstpitch = ctx->output->linesize[0];
1123 memcpy(dst, src, srcpitch);
1131 static int decode_frame(AVCodecContext *avctx, void *data,
1132 int *got_frame_ptr, AVPacket *pkt)
1134 SANMVideoContext *ctx = avctx->priv_data;
1137 bytestream2_init(&ctx->gb, pkt->data, pkt->size);
1138 if (ctx->output->data[0])
1139 avctx->release_buffer(avctx, ctx->output);
1141 if (!ctx->version) {
1144 while (bytestream2_get_bytes_left(&ctx->gb) >= 8) {
1148 sig = bytestream2_get_be32u(&ctx->gb);
1149 size = bytestream2_get_be32u(&ctx->gb);
1150 pos = bytestream2_tell(&ctx->gb);
1152 if (bytestream2_get_bytes_left(&ctx->gb) < size) {
1153 av_log(avctx, AV_LOG_ERROR, "incorrect chunk size %d\n", size);
1157 case MKBETAG('N', 'P', 'A', 'L'):
1158 if (size != 256 * 3) {
1159 av_log(avctx, AV_LOG_ERROR, "incorrect palette block size %d\n",
1161 return AVERROR_INVALIDDATA;
1163 for (i = 0; i < 256; i++)
1164 ctx->pal[i] = bytestream2_get_be24u(&ctx->gb);
1166 case MKBETAG('F', 'O', 'B', 'J'):
1168 return AVERROR_INVALIDDATA;
1169 if (ret = process_frame_obj(ctx))
1172 case MKBETAG('X', 'P', 'A', 'L'):
1173 if (size == 6 || size == 4) {
1177 for (i = 0; i < 256; i++) {
1178 for (j = 0; j < 3; j++) {
1179 int t = (ctx->pal[i] >> (16 - j * 8)) & 0xFF;
1180 tmp[j] = av_clip_uint8((t * 129 + ctx->delta_pal[i * 3 + j]) >> 7);
1182 ctx->pal[i] = AV_RB24(tmp);
1185 if (size < 768 * 2 + 4) {
1186 av_log(avctx, AV_LOG_ERROR, "incorrect palette change block size %d\n",
1188 return AVERROR_INVALIDDATA;
1190 bytestream2_skipu(&ctx->gb, 4);
1191 for (i = 0; i < 768; i++)
1192 ctx->delta_pal[i] = bytestream2_get_le16u(&ctx->gb);
1193 if (size >= 768 * 5 + 4) {
1194 for (i = 0; i < 256; i++)
1195 ctx->pal[i] = bytestream2_get_be24u(&ctx->gb);
1197 memset(ctx->pal, 0, sizeof(ctx->pal));
1201 case MKBETAG('S', 'T', 'O', 'R'):
1204 case MKBETAG('F', 'T', 'C', 'H'):
1205 memcpy(ctx->frm0, ctx->stored_frame, ctx->buf_size);
1208 bytestream2_skip(&ctx->gb, size);
1209 av_log(avctx, AV_LOG_DEBUG, "unknown/unsupported chunk %x\n", sig);
1213 bytestream2_seek(&ctx->gb, pos + size, SEEK_SET);
1215 bytestream2_skip(&ctx->gb, 1);
1218 memcpy(ctx->stored_frame, ctx->frm0, ctx->buf_size);
1219 if ((ret = copy_output(ctx, NULL)))
1221 memcpy(ctx->output->data[1], ctx->pal, 1024);
1223 SANMFrameHeader header;
1225 if ((ret = read_frame_header(ctx, &header)))
1228 ctx->rotate_code = header.rotate_code;
1229 if ((ctx->output->key_frame = !header.seq_num)) {
1230 ctx->output->pict_type = AV_PICTURE_TYPE_I;
1231 fill_frame(ctx->frm1, ctx->npixels, header.bg_color);
1232 fill_frame(ctx->frm2, ctx->npixels, header.bg_color);
1234 ctx->output->pict_type = AV_PICTURE_TYPE_P;
1237 if (header.codec < FF_ARRAY_ELEMS(v1_decoders)) {
1238 if ((ret = v1_decoders[header.codec](ctx))) {
1239 av_log(avctx, AV_LOG_ERROR,
1240 "subcodec %d: error decoding frame\n", header.codec);
1244 av_log_ask_for_sample(avctx, "subcodec %d is not implemented\n",
1246 return AVERROR_PATCHWELCOME;
1249 if ((ret = copy_output(ctx, &header)))
1252 if (ctx->rotate_code)
1253 rotate_bufs(ctx, ctx->rotate_code);
1256 *(AVFrame*)data = *ctx->output;
1261 AVCodec ff_sanm_decoder = {
1263 .type = AVMEDIA_TYPE_VIDEO,
1264 .id = CODEC_ID_SANM,
1265 .priv_data_size = sizeof(SANMVideoContext),
1266 .init = decode_init,
1267 .close = decode_end,
1268 .decode = decode_frame,
1269 .capabilities = CODEC_CAP_DR1,
1270 .long_name = NULL_IF_CONFIG_SMALL("LucasArts SMUSH video"),