Dav1dData data;
int tile_threads;
+ int frame_threads;
int apply_grain;
} Libdav1dContext;
[DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
};
+static const enum AVPixelFormat pix_fmt_rgb[3] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
+};
+
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
{
AVCodecContext *c = opaque;
av_buffer_pool_uninit(&dav1d->pool);
// Use twice the amount of required padding bytes for aligned_ptr below.
dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
- if (!dav1d->pool)
+ if (!dav1d->pool) {
+ dav1d->pool_size = 0;
return AVERROR(ENOMEM);
+ }
dav1d->pool_size = ret;
}
buf = av_buffer_pool_get(dav1d->pool);
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dSettings s;
+ int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
int res;
av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
s.allocator.cookie = dav1d;
s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
s.allocator.release_picture_callback = libdav1d_picture_release;
- s.n_tile_threads = dav1d->tile_threads;
s.apply_grain = dav1d->apply_grain;
- s.n_frame_threads = FFMIN(c->thread_count ? c->thread_count : av_cpu_count(), DAV1D_MAX_FRAME_THREADS);
+
+ s.n_tile_threads = dav1d->tile_threads
+ ? dav1d->tile_threads
+ : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
+ s.n_frame_threads = dav1d->frame_threads
+ ? dav1d->frame_threads
+ : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
+ av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
+ s.n_frame_threads, s.n_tile_threads);
res = dav1d_open(&dav1d->c, &s);
if (res < 0)
av_buffer_unref(&buf);
}
-static void libdav1d_frame_free(void *opaque, uint8_t *data) {
- Dav1dPicture *p = opaque;
-
- dav1d_picture_unref(p);
- av_free(p);
-}
-
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data;
- Dav1dPicture *p;
+ Dav1dPicture pic = { 0 }, *p = &pic;
int res;
if (!data->sz) {
return res;
}
- p = av_mallocz(sizeof(*p));
- if (!p)
- return AVERROR(ENOMEM);
-
res = dav1d_get_picture(dav1d->c, p);
if (res < 0) {
if (res == AVERROR(EINVAL))
else if (res == AVERROR(EAGAIN) && c->internal->draining)
res = AVERROR_EOF;
- av_free(p);
return res;
}
- av_assert0(p->data[0] != NULL);
+ av_assert0(p->data[0] && p->allocator_data);
- frame->buf[0] = av_buffer_create(NULL, 0, libdav1d_frame_free,
- p, AV_BUFFER_FLAG_READONLY);
+ // This requires the custom allocator above
+ frame->buf[0] = av_buffer_ref(p->allocator_data);
if (!frame->buf[0]) {
dav1d_picture_unref(p);
- av_free(p);
return AVERROR(ENOMEM);
}
frame->linesize[2] = p->stride[1];
c->profile = p->seq_hdr->profile;
- frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
+ c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
+ | p->seq_hdr->operating_points[0].minor_level;
frame->width = p->p.w;
frame->height = p->p.h;
if (c->width != p->p.w || c->height != p->p.h) {
res = ff_set_dimensions(c, p->p.w, p->p.h);
if (res < 0)
- return res;
+ goto fail;
}
switch (p->seq_hdr->chr) {
frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
+ p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
+ p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
+ p->seq_hdr->trc == DAV1D_TRC_SRGB)
+ frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
+ else
+ frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
+
// match timestamps and packet size
frame->pts = frame->best_effort_timestamp = p->m.timestamp;
#if FF_API_PKT_PTS
frame->pict_type = AV_PICTURE_TYPE_SP;
break;
default:
- return AVERROR_INVALIDDATA;
+ res = AVERROR_INVALIDDATA;
+ goto fail;
}
if (p->mastering_display) {
AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
- if (!mastering)
- return AVERROR(ENOMEM);
+ if (!mastering) {
+ res = AVERROR(ENOMEM);
+ goto fail;
+ }
for (int i = 0; i < 3; i++) {
mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
}
if (p->content_light) {
AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
- if (!light)
- return AVERROR(ENOMEM);
-
+ if (!light) {
+ res = AVERROR(ENOMEM);
+ goto fail;
+ }
light->MaxCLL = p->content_light->max_content_light_level;
light->MaxFALL = p->content_light->max_frame_average_light_level;
}
- return 0;
+ res = 0;
+fail:
+ dav1d_picture_unref(p);
+ if (res < 0)
+ av_frame_unref(frame);
+ return res;
}
static av_cold int libdav1d_close(AVCodecContext *c)
#define OFFSET(x) offsetof(Libdav1dContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption libdav1d_options[] = {
- { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, DAV1D_MAX_TILE_THREADS, VD },
+ { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
+ { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
{ "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VD },
{ NULL }
};