#include "formats.h"
#include "internal.h"
#include "avfilter.h"
+#include "filters.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
char *cx, *cy, *cw, *ch;
char *ow, *oh;
char *output_format_str;
+
+ int async_depth;
+ int eof;
} VPPContext;
static const AVOption options[] = {
{ "hflip", "flip horizontally", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, .flags=FLAGS, .unit = "transpose" },
{ "vflip", "flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "transpose" },
- { "cw", "set the width crop area expression", OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, CHAR_MIN, CHAR_MAX, FLAGS },
- { "ch", "set the height crop area expression", OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, CHAR_MIN, CHAR_MAX, FLAGS },
- { "cx", "set the x crop area expression", OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
- { "cy", "set the y crop area expression", OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cw", "set the width crop area expression", OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, 0, 0, FLAGS },
+ { "ch", "set the height crop area expression", OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, 0, 0, FLAGS },
+ { "cx", "set the x crop area expression", OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, 0, 0, FLAGS },
+ { "cy", "set the y crop area expression", OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, 0, 0, FLAGS },
{ "w", "Output video width", OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
{ "width", "Output video width", OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
{ "h", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
{ "height", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
{ "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+ { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(async_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
{ NULL }
};
param.filter_frame = NULL;
param.num_ext_buf = 0;
param.ext_buf = ext_buf;
+ param.async_depth = vpp->async_depth;
if (inlink->format == AV_PIX_FMT_QSV) {
if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+static int activate(AVFilterContext *ctx)
{
- int ret = 0;
- AVFilterContext *ctx = inlink->dst;
- VPPContext *vpp = inlink->dst->priv;
- AVFilterLink *outlink = ctx->outputs[0];
-
- if (vpp->qsv) {
- ret = ff_qsvvpp_filter_frame(vpp->qsv, inlink, picref);
- av_frame_free(&picref);
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ VPPContext *s =ctx->priv;
+ QSVVPPContext *qsv = s->qsv;
+ AVFrame *in = NULL;
+ int ret, status;
+ int64_t pts;
+
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (!s->eof) {
+ ret = ff_inlink_consume_frame(inlink, &in);
+ if (ret < 0)
+ return ret;
+
+ if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF) {
+ s->eof = 1;
+ }
+ }
+ }
+
+ if (qsv) {
+ if (in || s->eof) {
+ qsv->eof = s->eof;
+ ret = ff_qsvvpp_filter_frame(qsv, inlink, in);
+ av_frame_free(&in);
+
+ if (s->eof) {
+ ff_outlink_set_status(outlink, status, pts);
+ return 0;
+ }
+
+ if (qsv->got_frame) {
+ qsv->got_frame = 0;
+ return ret;
+ }
+ }
} else {
- if (picref->pts != AV_NOPTS_VALUE)
- picref->pts = av_rescale_q(picref->pts, inlink->time_base, outlink->time_base);
- ret = ff_filter_frame(outlink, picref);
+ if (in) {
+ if (in->pts != AV_NOPTS_VALUE)
+ in->pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base);
+
+ ret = ff_filter_frame(outlink, in);
+ return ret;
+ }
}
- return ret;
+ if (s->eof) {
+ ff_outlink_set_status(outlink, status, pts);
+ return 0;
+ } else {
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+ }
+
+ return FFERROR_NOT_READY;
}
static int query_formats(AVFilterContext *ctx)
{
int ret;
- AVFilterFormats *in_fmts, *out_fmts;
static const enum AVPixelFormat in_pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NV12,
AV_PIX_FMT_NONE
};
- in_fmts = ff_make_format_list(in_pix_fmts);
- out_fmts = ff_make_format_list(out_pix_fmts);
- ret = ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
+ ret = ff_formats_ref(ff_make_format_list(in_pix_fmts),
+ &ctx->inputs[0]->outcfg.formats);
if (ret < 0)
return ret;
- ret = ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
- if (ret < 0)
- return ret;
-
- return 0;
+ return ff_formats_ref(ff_make_format_list(out_pix_fmts),
+ &ctx->outputs[0]->incfg.formats);
}
static av_cold void vpp_uninit(AVFilterContext *ctx)
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
- .filter_frame = filter_frame,
},
{ NULL }
};
{ NULL }
};
-AVFilter ff_vf_vpp_qsv = {
+const AVFilter ff_vf_vpp_qsv = {
.name = "vpp_qsv",
.description = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
.priv_size = sizeof(VPPContext),
.uninit = vpp_uninit,
.inputs = vpp_inputs,
.outputs = vpp_outputs,
+ .activate = activate,
.priv_class = &vpp_class,
.flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
};