X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fffv1enc.c;h=f689bd98b49ac74771bb630e5f970478b0dcb559;hb=88c612e3a4d5b584e2d9f6e2541de78d67bdfb9f;hp=b0c79b932fa37f25a40b87979744f33e965e012b;hpb=e6c4ac7b5f038be56dfbb0171f5dd0cb850d9b28;p=ffmpeg diff --git a/libavcodec/ffv1enc.c b/libavcodec/ffv1enc.c index b0c79b932fa..f689bd98b49 100644 --- a/libavcodec/ffv1enc.c +++ b/libavcodec/ffv1enc.c @@ -71,7 +71,7 @@ static void find_best_state(uint8_t best_state[256][256], best_len[k] = len; best_state[i][k] = j; } - for (m = 0; m < 256; m++) + for (m = 1; m < 256; m++) if (occ[m]) { newocc[one_state[m]] += occ[m] * p; newocc[256 - one_state[256 - m]] += occ[m] * (1 - p); @@ -160,7 +160,7 @@ static inline void put_vlc_symbol(PutBitContext *pb, VlcState *const state, code = v ^ ((2 * state->drift + state->count) >> 31); #endif - av_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, + ff_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k); set_sr_golomb(pb, code, k, 12, bits); @@ -234,7 +234,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w, } } - av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n", + ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n", run_count, run_index, run_mode, x, (int)put_bits_count(&s->pb)); @@ -293,8 +293,8 @@ static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, } } -static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, - int stride[3]) +static void encode_rgb_frame(FFV1Context *s, const uint8_t *src[3], + int w, int h, const int stride[3]) { int x, y, p, i; const int ring_size = s->avctx->context_model ? 3 : 2; @@ -320,15 +320,15 @@ static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, for (x = 0; x < w; x++) { int b, g, r, av_uninit(a); if (lbd) { - unsigned v = *((uint32_t *)(src[0] + x * 4 + stride[0] * y)); + unsigned v = *((const uint32_t *)(src[0] + x * 4 + stride[0] * y)); b = v & 0xFF; g = (v >> 8) & 0xFF; r = (v >> 16) & 0xFF; a = v >> 24; } else { - b = *((uint16_t *)(src[0] + x * 2 + stride[0] * y)); - g = *((uint16_t *)(src[1] + x * 2 + stride[1] * y)); - r = *((uint16_t *)(src[2] + x * 2 + stride[2] * y)); + b = *((const uint16_t *)(src[0] + x * 2 + stride[0] * y)); + g = *((const uint16_t *)(src[1] + x * 2 + stride[1] * y)); + r = *((const uint16_t *)(src[2] + x * 2 + stride[2] * y)); } b -= g; @@ -721,7 +721,12 @@ static av_cold int ffv1_encode_init(AVCodecContext *avctx) if ((ret = ffv1_allocate_initial_states(s)) < 0) return ret; - avctx->coded_frame = &s->picture; + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + if (!s->transparency) s->plane_count = 2; @@ -858,12 +863,12 @@ static void encode_slice_header(FFV1Context *f, FFV1Context *fs) put_symbol(c, state, f->plane[j].quant_table_index, 0); av_assert0(f->plane[j].quant_table_index == f->avctx->context_model); } - if (!f->picture.interlaced_frame) + if (!f->avctx->coded_frame->interlaced_frame) put_symbol(c, state, 3, 0); else - put_symbol(c, state, 1 + !f->picture.top_field_first, 0); - put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0); - put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0); + put_symbol(c, state, 1 + !f->avctx->coded_frame->top_field_first, 0); + put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.num, 0); + put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.den, 0); } static int encode_slice(AVCodecContext *c, void *arg) @@ -874,12 +879,12 @@ static int encode_slice(AVCodecContext *c, void *arg) int height = fs->slice_height; int x = fs->slice_x; int y = fs->slice_y; - AVFrame *const p = &f->picture; + const AVFrame *const p = f->frame; const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) ? (f->bits_per_raw_sample > 8) + 1 : 4; - if (p->key_frame) + if (c->coded_frame->key_frame) ffv1_clear_slice_state(f, fs); if (f->version > 2) { encode_slice_header(f, fs); @@ -911,9 +916,9 @@ static int encode_slice(AVCodecContext *c, void *arg) encode_plane(fs, p->data[3] + ps * x + y * p->linesize[3], width, height, p->linesize[3], 2); } else { - uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0], - p->data[1] + ps * x + y * p->linesize[1], - p->data[2] + ps * x + y * p->linesize[2] }; + const uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0], + p->data[1] + ps * x + y * p->linesize[1], + p->data[2] + ps * x + y * p->linesize[2] }; encode_rgb_frame(fs, planes, width, height, p->linesize); } emms_c(); @@ -926,12 +931,14 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, { FFV1Context *f = avctx->priv_data; RangeCoder *const c = &f->slice_context[0]->c; - AVFrame *const p = &f->picture; + AVFrame *const p = avctx->coded_frame; int used_count = 0; uint8_t keystate = 128; uint8_t *buf_p; int i, ret; + f->frame = pict; + if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * ((8 * 2 + 1 + 1) * 4) / 8 + FF_MIN_BUFFER_SIZE)) < 0) { @@ -942,9 +949,6 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ff_init_range_encoder(c, pkt->data, pkt->size); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); - *p = *pict; - p->pict_type = AV_PICTURE_TYPE_I; - if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) { put_rac(c, &keystate, 1); p->key_frame = 1; @@ -1054,6 +1058,13 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, return 0; } +static av_cold int ffv1_encode_close(AVCodecContext *avctx) +{ + av_frame_free(&avctx->coded_frame); + ffv1_close(avctx); + return 0; +} + #define OFFSET(x) offsetof(FFV1Context, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { @@ -1076,12 +1087,13 @@ static const AVCodecDefault ffv1_defaults[] = { AVCodec ff_ffv1_encoder = { .name = "ffv1", + .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_FFV1, .priv_data_size = sizeof(FFV1Context), .init = ffv1_encode_init, .encode2 = ffv1_encode_frame, - .close = ffv1_close, + .close = ffv1_encode_close, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, @@ -1096,7 +1108,6 @@ AVCodec ff_ffv1_encoder = { AV_PIX_FMT_NONE }, - .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), .defaults = ffv1_defaults, .priv_class = &class, };