X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Frl2.c;h=ba539e7d3abb1310c95e2cb74f286a9ca1a5406b;hb=bb198c4997d5036f3bf91de51e44f807115677d0;hp=4d1937a4b4efff5ae065c5d2652dab1a6f0853c1;hpb=800ab1bafa0698bdac32061c1bc72d5e04821e96;p=ffmpeg diff --git a/libavcodec/rl2.c b/libavcodec/rl2.c index 4d1937a4b4e..ba539e7d3ab 100644 --- a/libavcodec/rl2.c +++ b/libavcodec/rl2.c @@ -30,20 +30,22 @@ #include #include +#include "libavutil/internal.h" #include "libavutil/intreadwrite.h" +#include "libavutil/mem.h" #include "avcodec.h" +#include "internal.h" #define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr count, palette typedef struct Rl2Context { AVCodecContext *avctx; - AVFrame frame; - unsigned short video_base; ///< initial drawing offset - unsigned int clr_count; ///< number of used colors (currently unused) - unsigned char* back_frame; ///< background frame - unsigned int palette[AVPALETTE_COUNT]; + uint16_t video_base; ///< initial drawing offset + uint32_t clr_count; ///< number of used colors (currently unused) + uint8_t *back_frame; ///< background frame + uint32_t palette[AVPALETTE_COUNT]; } Rl2Context; /** @@ -55,67 +57,68 @@ typedef struct Rl2Context { * @param stride stride of the output buffer * @param video_base offset of the rle data inside the frame */ -static void rl2_rle_decode(Rl2Context *s,const unsigned char* in,int size, - unsigned char* out,int stride,int video_base){ +static void rl2_rle_decode(Rl2Context *s, const uint8_t *in, int size, + uint8_t *out, int stride, int video_base) +{ int base_x = video_base % s->avctx->width; int base_y = video_base / s->avctx->width; int stride_adj = stride - s->avctx->width; int i; - const unsigned char* back_frame = s->back_frame; - const unsigned char* in_end = in + size; - const unsigned char* out_end = out + stride * s->avctx->height; - unsigned char* line_end; + const uint8_t *back_frame = s->back_frame; + const uint8_t *in_end = in + size; + const uint8_t *out_end = out + stride * s->avctx->height; + uint8_t *line_end; /** copy start of the background frame */ - for(i=0;i<=base_y;i++){ - if(s->back_frame) - memcpy(out,back_frame,s->avctx->width); - out += stride; + for (i = 0; i <= base_y; i++) { + if (s->back_frame) + memcpy(out, back_frame, s->avctx->width); + out += stride; back_frame += s->avctx->width; } back_frame += base_x - s->avctx->width; - line_end = out - stride_adj; - out += base_x - stride; + line_end = out - stride_adj; + out += base_x - stride; /** decode the variable part of the frame */ - while(in < in_end){ - unsigned char val = *in++; - int len = 1; - if(val >= 0x80){ - if(in >= in_end) + while (in < in_end) { + uint8_t val = *in++; + int len = 1; + if (val >= 0x80) { + if (in >= in_end) break; len = *in++; - if(!len) + if (!len) break; } - if(len >= out_end - out) + if (len >= out_end - out) break; - if(s->back_frame) + if (s->back_frame) val |= 0x80; else val &= ~0x80; - while(len--){ - *out++ = (val == 0x80)? *back_frame:val; + while (len--) { + *out++ = (val == 0x80) ? *back_frame : val; back_frame++; - if(out == line_end){ - out += stride_adj; + if (out == line_end) { + out += stride_adj; line_end += stride; - if(len >= out_end - out) + if (len >= out_end - out) break; } } } /** copy the rest from the background frame */ - if(s->back_frame){ - while(out < out_end){ + if (s->back_frame) { + while (out < out_end) { memcpy(out, back_frame, line_end - out); back_frame += line_end - out; - out = line_end + stride_adj; - line_end += stride; + out = line_end + stride_adj; + line_end += stride; } } } @@ -131,37 +134,38 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx) Rl2Context *s = avctx->priv_data; int back_size; int i; - s->avctx = avctx; - avctx->pix_fmt = PIX_FMT_PAL8; + + s->avctx = avctx; + avctx->pix_fmt = AV_PIX_FMT_PAL8; /** parse extra data */ - if(!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE){ + if (!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid extradata size\n"); - return -1; + return AVERROR(EINVAL); } /** get frame_offset */ s->video_base = AV_RL16(&avctx->extradata[0]); - s->clr_count = AV_RL32(&avctx->extradata[2]); + s->clr_count = AV_RL32(&avctx->extradata[2]); - if(s->video_base >= avctx->width * avctx->height){ + if (s->video_base >= avctx->width * avctx->height) { av_log(avctx, AV_LOG_ERROR, "invalid video_base\n"); - return -1; + return AVERROR_INVALIDDATA; } /** initialize palette */ - for(i=0;ipalette[i] = AV_RB24(&avctx->extradata[6 + i * 3]); /** decode background frame if present */ back_size = avctx->extradata_size - EXTRADATA1_SIZE; - if(back_size > 0){ - unsigned char* back_frame = av_mallocz(avctx->width*avctx->height); - if(!back_frame) - return -1; - rl2_rle_decode(s,avctx->extradata + EXTRADATA1_SIZE,back_size, - back_frame,avctx->width,0); + if (back_size > 0) { + uint8_t *back_frame = av_mallocz(avctx->width*avctx->height); + if (!back_frame) + return AVERROR(ENOMEM); + rl2_rle_decode(s, avctx->extradata + EXTRADATA1_SIZE, back_size, + back_frame, avctx->width, 0); s->back_frame = back_frame; } return 0; @@ -169,31 +173,27 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx) static int rl2_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) + void *data, int *got_frame, + AVPacket *avpkt) { + AVFrame *frame = data; const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; + int ret, buf_size = avpkt->size; Rl2Context *s = avctx->priv_data; - if(s->frame.data[0]) - avctx->release_buffer(avctx, &s->frame); - - /** get buffer */ - s->frame.reference= 0; - if(avctx->get_buffer(avctx, &s->frame)) { + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); - return -1; + return ret; } /** run length decode */ - rl2_rle_decode(s,buf,buf_size,s->frame.data[0],s->frame.linesize[0],s->video_base); + rl2_rle_decode(s, buf, buf_size, frame->data[0], frame->linesize[0], + s->video_base); /** make the palette available on the way out */ - memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); + memcpy(frame->data[1], s->palette, AVPALETTE_SIZE); - *data_size = sizeof(AVFrame); - *(AVFrame*)data = s->frame; + *got_frame = 1; /** report that the buffer was completely consumed */ return buf_size; @@ -209,9 +209,6 @@ static av_cold int rl2_decode_end(AVCodecContext *avctx) { Rl2Context *s = avctx->priv_data; - if(s->frame.data[0]) - avctx->release_buffer(avctx, &s->frame); - av_free(s->back_frame); return 0; @@ -220,12 +217,12 @@ static av_cold int rl2_decode_end(AVCodecContext *avctx) AVCodec ff_rl2_decoder = { .name = "rl2", + .long_name = NULL_IF_CONFIG_SMALL("RL2 video"), .type = AVMEDIA_TYPE_VIDEO, - .id = CODEC_ID_RL2, + .id = AV_CODEC_ID_RL2, .priv_data_size = sizeof(Rl2Context), .init = rl2_decode_init, .close = rl2_decode_end, .decode = rl2_decode_frame, - .capabilities = CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("RL2 video"), + .capabilities = AV_CODEC_CAP_DR1, };