best_len[k] = len;
best_state[i][k] = j;
}
- for (m = 0; m < 256; m++)
+ for (m = 1; m < 256; m++)
if (occ[m]) {
newocc[one_state[m]] += occ[m] * p;
newocc[256 - one_state[256 - m]] += occ[m] * (1 - p);
code = v ^ ((2 * state->drift + state->count) >> 31);
#endif
- av_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
+ ff_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
state->bias, state->error_sum, state->drift, state->count, k);
set_sr_golomb(pb, code, k, 12, bits);
}
}
- av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
+ ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
run_count, run_index, run_mode, x,
(int)put_bits_count(&s->pb));
}
}
-static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h,
- int stride[3])
+static void encode_rgb_frame(FFV1Context *s, const uint8_t *src[3],
+ int w, int h, const int stride[3])
{
int x, y, p, i;
const int ring_size = s->avctx->context_model ? 3 : 2;
for (x = 0; x < w; x++) {
int b, g, r, av_uninit(a);
if (lbd) {
- unsigned v = *((uint32_t *)(src[0] + x * 4 + stride[0] * y));
+ unsigned v = *((const uint32_t *)(src[0] + x * 4 + stride[0] * y));
b = v & 0xFF;
g = (v >> 8) & 0xFF;
r = (v >> 16) & 0xFF;
a = v >> 24;
} else {
- b = *((uint16_t *)(src[0] + x * 2 + stride[0] * y));
- g = *((uint16_t *)(src[1] + x * 2 + stride[1] * y));
- r = *((uint16_t *)(src[2] + x * 2 + stride[2] * y));
+ b = *((const uint16_t *)(src[0] + x * 2 + stride[0] * y));
+ g = *((const uint16_t *)(src[1] + x * 2 + stride[1] * y));
+ r = *((const uint16_t *)(src[2] + x * 2 + stride[2] * y));
}
b -= g;
if ((ret = ffv1_allocate_initial_states(s)) < 0)
return ret;
- avctx->coded_frame = &s->picture;
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+
if (!s->transparency)
s->plane_count = 2;
put_symbol(c, state, f->plane[j].quant_table_index, 0);
av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
}
- if (!f->picture.interlaced_frame)
+ if (!f->avctx->coded_frame->interlaced_frame)
put_symbol(c, state, 3, 0);
else
- put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
- put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
- put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
+ put_symbol(c, state, 1 + !f->avctx->coded_frame->top_field_first, 0);
+ put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.num, 0);
+ put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.den, 0);
}
static int encode_slice(AVCodecContext *c, void *arg)
int height = fs->slice_height;
int x = fs->slice_x;
int y = fs->slice_y;
- AVFrame *const p = &f->picture;
+ const AVFrame *const p = f->frame;
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
? (f->bits_per_raw_sample > 8) + 1
: 4;
- if (p->key_frame)
+ if (c->coded_frame->key_frame)
ffv1_clear_slice_state(f, fs);
if (f->version > 2) {
encode_slice_header(f, fs);
encode_plane(fs, p->data[3] + ps * x + y * p->linesize[3], width,
height, p->linesize[3], 2);
} else {
- uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0],
- p->data[1] + ps * x + y * p->linesize[1],
- p->data[2] + ps * x + y * p->linesize[2] };
+ const uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0],
+ p->data[1] + ps * x + y * p->linesize[1],
+ p->data[2] + ps * x + y * p->linesize[2] };
encode_rgb_frame(fs, planes, width, height, p->linesize);
}
emms_c();
{
FFV1Context *f = avctx->priv_data;
RangeCoder *const c = &f->slice_context[0]->c;
- AVFrame *const p = &f->picture;
+ AVFrame *const p = avctx->coded_frame;
int used_count = 0;
uint8_t keystate = 128;
uint8_t *buf_p;
int i, ret;
+ f->frame = pict;
+
if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height *
((8 * 2 + 1 + 1) * 4) / 8 +
FF_MIN_BUFFER_SIZE)) < 0) {
ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
- *p = *pict;
- p->pict_type = AV_PICTURE_TYPE_I;
-
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
put_rac(c, &keystate, 1);
p->key_frame = 1;
return 0;
}
+static av_cold int ffv1_encode_close(AVCodecContext *avctx)
+{
+ av_frame_free(&avctx->coded_frame);
+ ffv1_close(avctx);
+ return 0;
+}
+
#define OFFSET(x) offsetof(FFV1Context, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
AVCodec ff_ffv1_encoder = {
.name = "ffv1",
+ .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FFV1,
.priv_data_size = sizeof(FFV1Context),
.init = ffv1_encode_init,
.encode2 = ffv1_encode_frame,
- .close = ffv1_close,
+ .close = ffv1_encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE
},
- .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
.defaults = ffv1_defaults,
.priv_class = &class,
};