X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Fvf_overlay.c;h=05e9e1b48095ea7fdb5dfeff4108fc12b32cf959;hb=b4780d03d04d402b490be21b01ec886565cabe29;hp=04cacad6358a18cf27184fa2ad3f03f52df15348;hpb=a8cedbebf163ad376abc4703b3156c44d0858404;p=ffmpeg diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 04cacad6358..05e9e1b4809 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -28,6 +28,7 @@ /* #define DEBUG */ #include "avfilter.h" +#include "formats.h" #include "libavutil/eval.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" @@ -36,7 +37,9 @@ #include "libavutil/mathematics.h" #include "libavutil/timestamp.h" #include "internal.h" +#include "bufferqueue.h" #include "drawutils.h" +#include "video.h" static const char *const var_names[] = { "main_w", "W", ///< width of the main video @@ -71,6 +74,8 @@ typedef struct { int x, y; ///< position of overlayed picture int allow_packed_rgb; + uint8_t frame_requested; + uint8_t overlay_eof; uint8_t main_is_packed_rgb; uint8_t main_rgba_map[4]; uint8_t main_has_alpha; @@ -78,7 +83,9 @@ typedef struct { uint8_t overlay_rgba_map[4]; uint8_t overlay_has_alpha; - AVFilterBufferRef *overpicref, *overpicref_next; + AVFilterBufferRef *overpicref; + struct FFBufQueue queue_main; + struct FFBufQueue queue_over; int main_pix_step[4]; ///< steps per pixel for each plane of the main output int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay @@ -96,18 +103,9 @@ static const AVOption overlay_options[] = { {NULL}, }; -static const char *overlay_get_name(void *ctx) -{ - return "overlay"; -} - -static const AVClass overlay_class = { - "OverlayContext", - overlay_get_name, - overlay_options -}; +AVFILTER_DEFINE_CLASS(overlay); -static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +static av_cold int init(AVFilterContext *ctx, const char *args) { OverlayContext *over = ctx->priv; char *args1 = av_strdup(args); @@ -147,10 +145,9 @@ static av_cold void uninit(AVFilterContext *ctx) av_freep(&over->x_expr); av_freep(&over->y_expr); - if (over->overpicref) - avfilter_unref_buffer(over->overpicref); - if (over->overpicref_next) - avfilter_unref_buffer(over->overpicref_next); + avfilter_unref_bufferp(&over->overpicref); + ff_bufqueue_discard_all(&over->queue_main); + ff_bufqueue_discard_all(&over->queue_over); } static int query_formats(AVFilterContext *ctx) @@ -176,16 +173,16 @@ static int query_formats(AVFilterContext *ctx) AVFilterFormats *overlay_formats; if (over->allow_packed_rgb) { - main_formats = avfilter_make_format_list(main_pix_fmts_rgb); - overlay_formats = avfilter_make_format_list(overlay_pix_fmts_rgb); + main_formats = ff_make_format_list(main_pix_fmts_rgb); + overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb); } else { - main_formats = avfilter_make_format_list(main_pix_fmts_yuv); - overlay_formats = avfilter_make_format_list(overlay_pix_fmts_yuv); + main_formats = ff_make_format_list(main_pix_fmts_yuv); + overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv); } - avfilter_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats); - avfilter_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats); - avfilter_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats ); + ff_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats); + ff_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats); + ff_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats ); return 0; } @@ -247,7 +244,7 @@ static int config_input_overlay(AVFilterLink *inlink) ff_fill_rgba_map(over->overlay_rgba_map, inlink->format) >= 0; over->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts); - av_log(ctx, AV_LOG_INFO, + av_log(ctx, AV_LOG_VERBOSE, "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, av_pix_fmt_descriptors[ctx->inputs[MAIN]->format].name, @@ -286,7 +283,7 @@ static int config_output(AVFilterLink *outlink) av_gcd((int64_t)tb1.num * tb2.den, (int64_t)tb2.num * tb1.den), (int64_t)tb1.den * tb2.den, INT_MAX); - av_log(ctx, AV_LOG_INFO, + av_log(ctx, AV_LOG_VERBOSE, "main_tb:%d/%d overlay_tb:%d/%d -> tb:%d/%d exact:%d\n", tb1.num, tb1.den, tb2.num, tb2.den, tb->num, tb->den, exact); if (!exact) @@ -301,52 +298,7 @@ static int config_output(AVFilterLink *outlink) static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h) { - return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); -} - -static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) -{ - AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); - AVFilterContext *ctx = inlink->dst; - OverlayContext *over = ctx->priv; - av_unused AVFilterLink *outlink = ctx->outputs[0]; - - inlink->dst->outputs[0]->out_buf = outpicref; - outpicref->pts = av_rescale_q(outpicref->pts, ctx->inputs[MAIN]->time_base, - ctx->outputs[0]->time_base); - - if (!over->overpicref || over->overpicref->pts < outpicref->pts) { - if (!over->overpicref_next) - avfilter_request_frame(ctx->inputs[OVERLAY]); - - if (over->overpicref && over->overpicref_next && - over->overpicref_next->pts <= outpicref->pts) { - avfilter_unref_buffer(over->overpicref); - over->overpicref = over->overpicref_next; - over->overpicref_next = NULL; - } - } - - av_dlog(ctx, "main_pts:%s main_pts_time:%s", - av_ts2str(outpicref->pts), av_ts2timestr(outpicref->pts, &outlink->time_base)); - if (over->overpicref) - av_dlog(ctx, " over_pts:%s over_pts_time:%s", - av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base)); - av_dlog(ctx, "\n"); - - avfilter_start_frame(inlink->dst->outputs[0], outpicref); -} - -static void start_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *inpicref) -{ - AVFilterContext *ctx = inlink->dst; - OverlayContext *over = ctx->priv; - - inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base, - ctx->outputs[0]->time_base); - - if (!over->overpicref) over->overpicref = inpicref; - else over->overpicref_next = inpicref; + return ff_get_video_buffer(link->dst->outputs[0], perms, w, h); } // divide by 255 and round to nearest @@ -483,32 +435,173 @@ static void blend_slice(AVFilterContext *ctx, } } -static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) +static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) { - AVFilterContext *ctx = inlink->dst; + OverlayContext *over = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *next_overpic, *outpicref; + + /* Discard obsolete overlay frames: if there is a next frame with pts is + * before the main frame, we can drop the current overlay. */ + while (1) { + next_overpic = ff_bufqueue_peek(&over->queue_over, 0); + if (!next_overpic || next_overpic->pts > mainpic->pts) + break; + ff_bufqueue_get(&over->queue_over); + avfilter_unref_buffer(over->overpicref); + over->overpicref = next_overpic; + } + /* If there is no next frame and no EOF and the overlay frame is before + * the main frame, we can not know yet if it will be superseded. */ + if (!over->queue_over.available && !over->overlay_eof && + (!over->overpicref || over->overpicref->pts < mainpic->pts)) + return AVERROR(EAGAIN); + /* At this point, we know that the current overlay frame extends to the + * time of the main frame. */ + outlink->out_buf = outpicref = avfilter_ref_buffer(mainpic, ~0); + + av_dlog(ctx, "main_pts:%s main_pts_time:%s", + av_ts2str(outpicref->pts), av_ts2timestr(outpicref->pts, &outlink->time_base)); + if (over->overpicref) + av_dlog(ctx, " over_pts:%s over_pts_time:%s", + av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base)); + av_dlog(ctx, "\n"); + + ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(outpicref, ~0)); + over->frame_requested = 0; + return 0; +} + +static int try_start_next_frame(AVFilterContext *ctx) +{ + OverlayContext *over = ctx->priv; + AVFilterBufferRef *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); + if (!next_mainpic || try_start_frame(ctx, next_mainpic) < 0) + return AVERROR(EAGAIN); + avfilter_unref_buffer(ff_bufqueue_get(&over->queue_main)); + return 0; +} + +static int try_push_frame(AVFilterContext *ctx) +{ + OverlayContext *over = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *outpicref = outlink->out_buf; + + if (try_start_next_frame(ctx) < 0) + return AVERROR(EAGAIN); + outpicref = outlink->out_buf; + if (over->overpicref) + blend_slice(ctx, outpicref, over->overpicref, over->x, over->y, + over->overpicref->video->w, over->overpicref->video->h, + 0, outpicref->video->w, outpicref->video->h); + ff_draw_slice(outlink, 0, outpicref->video->h, +1); + ff_end_frame(outlink); + return 0; +} + +static void flush_frames(AVFilterContext *ctx) +{ + while (!try_push_frame(ctx)); +} + +static int start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) +{ + AVFilterContext *ctx = inlink->dst; OverlayContext *over = ctx->priv; + flush_frames(ctx); + inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[MAIN]->time_base, + ctx->outputs[0]->time_base); + if (try_start_frame(ctx, inpicref) < 0) { + ff_bufqueue_add(ctx, &over->queue_main, inpicref); + av_assert1(inpicref == inlink->cur_buf); + inlink->cur_buf = NULL; + } + return 0; +} + +static int draw_slice_main(AVFilterLink *inlink, int y, int h, int slice_dir) +{ + AVFilterContext *ctx = inlink->dst; + OverlayContext *over = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *outpicref = outlink->out_buf; + + if (!outpicref) + return 0; if (over->overpicref && - !(over->x >= outpicref->video->w || over->y >= outpicref->video->h || - y+h < over->y || y >= over->y + over->overpicref->video->h)) { + y + h > over->y && y < over->y + over->overpicref->video->h) { blend_slice(ctx, outpicref, over->overpicref, over->x, over->y, over->overpicref->video->w, over->overpicref->video->h, y, outpicref->video->w, h); } - avfilter_draw_slice(outlink, y, h, slice_dir); + return ff_draw_slice(outlink, y, h, slice_dir); } -static void end_frame(AVFilterLink *inlink) +static int end_frame_main(AVFilterLink *inlink) { - avfilter_end_frame(inlink->dst->outputs[0]); - avfilter_unref_buffer(inlink->cur_buf); + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *outpicref = outlink->out_buf; + flush_frames(ctx); + + if (!outpicref) + return 0; + return ff_end_frame(ctx->outputs[0]); +} + +static int start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref) +{ + return 0; +} + +static int end_frame_over(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + OverlayContext *over = ctx->priv; + AVFilterBufferRef *inpicref = inlink->cur_buf; + inlink->cur_buf = NULL; + + flush_frames(ctx); + inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base, + ctx->outputs[0]->time_base); + ff_bufqueue_add(ctx, &over->queue_over, inpicref); + return try_push_frame(ctx); } -static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { } +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + OverlayContext *over = ctx->priv; + int input, ret; + + if (!try_push_frame(ctx)) + return 0; + over->frame_requested = 1; + while (over->frame_requested) { + /* TODO if we had a frame duration, we could guess more accurately */ + input = !over->overlay_eof && (over->queue_main.available || + over->queue_over.available < 2) ? + OVERLAY : MAIN; + ret = ff_request_frame(ctx->inputs[input]); + /* EOF on main is reported immediately */ + if (ret == AVERROR_EOF && input == OVERLAY) { + over->overlay_eof = 1; + if (!try_start_next_frame(ctx)) + return 0; + ret = 0; /* continue requesting frames on main */ + } + if (ret < 0) + return ret; + } + return 0; +} -static void null_end_frame(AVFilterLink *inlink) { } +static int null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) +{ + return 0; +} AVFilter avfilter_vf_overlay = { .name = "overlay", @@ -521,26 +614,27 @@ AVFilter avfilter_vf_overlay = { .query_formats = query_formats, - .inputs = (const AVFilterPad[]) {{ .name = "main", - .type = AVMEDIA_TYPE_VIDEO, - .start_frame = start_frame, - .get_video_buffer= get_video_buffer, - .config_props = config_input_main, - .draw_slice = draw_slice, - .end_frame = end_frame, - .min_perms = AV_PERM_READ, - .rej_perms = AV_PERM_REUSE2|AV_PERM_PRESERVE, }, - { .name = "overlay", - .type = AVMEDIA_TYPE_VIDEO, - .start_frame = start_frame_overlay, - .config_props = config_input_overlay, - .draw_slice = null_draw_slice, - .end_frame = null_end_frame, - .min_perms = AV_PERM_READ, - .rej_perms = AV_PERM_REUSE2, }, - { .name = NULL}}, - .outputs = (const AVFilterPad[]) {{ .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_output, }, - { .name = NULL}}, + .inputs = (const AVFilterPad[]) {{ .name = "main", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer= get_video_buffer, + .config_props = config_input_main, + .start_frame = start_frame_main, + .draw_slice = draw_slice_main, + .end_frame = end_frame_main, + .min_perms = AV_PERM_READ, + .rej_perms = AV_PERM_REUSE2|AV_PERM_PRESERVE, }, + { .name = "overlay", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input_overlay, + .start_frame = start_frame_over, + .draw_slice = null_draw_slice, + .end_frame = end_frame_over, + .min_perms = AV_PERM_READ, + .rej_perms = AV_PERM_REUSE2, }, + { .name = NULL}}, + .outputs = (const AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, }, + { .name = NULL}}, };