X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Fvf_sr.c;h=4360439ca6b21b4b933e4424693832f9464ce9fe;hb=a04ad248a05e7b613abe09b3bb067f555108d794;hp=6423d2ea118bc6ebd75c3105185b5d9444b6c122;hpb=fbd607dd560afe44c3b90de1e6cbe5265cac8f1e;p=ffmpeg diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c index 6423d2ea118..4360439ca6b 100644 --- a/libavfilter/vf_sr.c +++ b/libavfilter/vf_sr.c @@ -29,33 +29,32 @@ #include "formats.h" #include "internal.h" #include "libavutil/opt.h" +#include "libavutil/pixdesc.h" #include "libavformat/avio.h" #include "libswscale/swscale.h" -#include "dnn_interface.h" +#include "dnn_filter_common.h" typedef struct SRContext { const AVClass *class; - - char *model_filename; - DNNBackendType backend_type; - DNNModule *dnn_module; - DNNModel *model; - DNNData input, output; + DnnContext dnnctx; int scale_factor; - struct SwsContext *sws_contexts[3]; - int sws_slice_h, sws_input_linesize, sws_output_linesize; + struct SwsContext *sws_uv_scale; + int sws_uv_height; + struct SwsContext *sws_pre_scale; } SRContext; #define OFFSET(x) offsetof(SRContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM static const AVOption sr_options[] = { - { "dnn_backend", "DNN backend used for model execution", OFFSET(backend_type), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, 0, 1, FLAGS, "backend" }, + { "dnn_backend", "DNN backend used for model execution", OFFSET(dnnctx.backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" }, { "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" }, #if (CONFIG_LIBTENSORFLOW == 1) { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" }, #endif { "scale_factor", "scale factor for SRCNN model", OFFSET(scale_factor), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, 4, FLAGS }, - { "model", "path to model file specifying network architecture and its parameters", OFFSET(model_filename), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, + { "model", "path to model file specifying network architecture and its parameters", OFFSET(dnnctx.model_filename), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, + { "input", "input name of the model", OFFSET(dnnctx.model_inputname), AV_OPT_TYPE_STRING, { .str = "x" }, 0, 0, FLAGS }, + { "output", "output name of the model", OFFSET(dnnctx.model_outputname), AV_OPT_TYPE_STRING, { .str = "y" }, 0, 0, FLAGS }, { NULL } }; @@ -64,34 +63,7 @@ AVFILTER_DEFINE_CLASS(sr); static av_cold int init(AVFilterContext *context) { SRContext *sr_context = context->priv; - - sr_context->dnn_module = ff_get_dnn_module(sr_context->backend_type); - if (!sr_context->dnn_module){ - av_log(context, AV_LOG_ERROR, "could not create DNN module for requested backend\n"); - return AVERROR(ENOMEM); - } - if (!sr_context->model_filename){ - av_log(context, AV_LOG_ERROR, "model file for network was not specified\n"); - return AVERROR(EIO); - } - else{ - if (!sr_context->dnn_module->load_model) { - av_log(context, AV_LOG_ERROR, "load_model for network was not specified\n"); - return AVERROR(EIO); - } else { - sr_context->model = (sr_context->dnn_module->load_model)(sr_context->model_filename); - } - } - if (!sr_context->model){ - av_log(context, AV_LOG_ERROR, "could not load DNN model\n"); - return AVERROR(EIO); - } - - sr_context->sws_contexts[0] = NULL; - sr_context->sws_contexts[1] = NULL; - sr_context->sws_contexts[2] = NULL; - - return 0; + return ff_dnn_init(&sr_context->dnnctx, DFT_PROCESS_FRAME, context); } static int query_formats(AVFilterContext *context) @@ -110,111 +82,52 @@ static int query_formats(AVFilterContext *context) return ff_set_common_formats(context, formats_list); } -static int config_props(AVFilterLink *inlink) +static int config_output(AVFilterLink *outlink) { - AVFilterContext *context = inlink->dst; - SRContext *sr_context = context->priv; - AVFilterLink *outlink = context->outputs[0]; + AVFilterContext *context = outlink->src; + SRContext *ctx = context->priv; DNNReturnType result; - int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w; + AVFilterLink *inlink = context->inputs[0]; + int out_width, out_height; - sr_context->input.width = inlink->w * sr_context->scale_factor; - sr_context->input.height = inlink->h * sr_context->scale_factor; - sr_context->input.channels = 1; - - result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output); - if (result != DNN_SUCCESS){ - av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n"); + // have a try run in case that the dnn model resize the frame + result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &out_width, &out_height); + if (result != DNN_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n"); return AVERROR(EIO); } - else{ - if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){ - sr_context->input.width = inlink->w; - sr_context->input.height = inlink->h; - result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output); - if (result != DNN_SUCCESS){ - av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n"); - return AVERROR(EIO); - } - sr_context->scale_factor = 0; - } - outlink->h = sr_context->output.height; - outlink->w = sr_context->output.width; - sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8, - sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32, - 0, NULL, NULL, NULL); - sr_context->sws_input_linesize = sr_context->input.width << 2; - sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32, - sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8, - 0, NULL, NULL, NULL); - sr_context->sws_output_linesize = sr_context->output.width << 2; - if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){ - av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n"); - return AVERROR(ENOMEM); - } - if (sr_context->scale_factor){ - sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format, - outlink->w, outlink->h, outlink->format, - SWS_BICUBIC, NULL, NULL, NULL); - if (!sr_context->sws_contexts[0]){ - av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n"); - return AVERROR(ENOMEM); - } - sr_context->sws_slice_h = inlink->h; - } - else{ - if (inlink->format != AV_PIX_FMT_GRAY8){ - sws_src_h = sr_context->input.height; - sws_src_w = sr_context->input.width; - sws_dst_h = sr_context->output.height; - sws_dst_w = sr_context->output.width; - switch (inlink->format){ - case AV_PIX_FMT_YUV420P: - sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1); - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1); - sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1); - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1); - break; - case AV_PIX_FMT_YUV422P: - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1); - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1); - break; - case AV_PIX_FMT_YUV444P: - break; - case AV_PIX_FMT_YUV410P: - sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2); - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2); - sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2); - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2); - break; - case AV_PIX_FMT_YUV411P: - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2); - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2); - break; - default: - av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format"); - return AVERROR(EIO); - } - sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8, - sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8, - SWS_BICUBIC, NULL, NULL, NULL); - if (!sr_context->sws_contexts[0]){ - av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n"); - return AVERROR(ENOMEM); - } - sr_context->sws_slice_h = sws_src_h; - } + if (inlink->w != out_width || inlink->h != out_height) { + //espcn + outlink->w = out_width; + outlink->h = out_height; + if (inlink->format != AV_PIX_FMT_GRAY8){ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); + int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w); + int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h); + int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w); + ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8, + sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8, + SWS_BICUBIC, NULL, NULL, NULL); + ctx->sws_uv_height = sws_src_h; } - - return 0; + } else { + //srcnn + outlink->w = out_width * ctx->scale_factor; + outlink->h = out_height * ctx->scale_factor; + ctx->sws_pre_scale = sws_getContext(inlink->w, inlink->h, inlink->format, + outlink->w, outlink->h, outlink->format, + SWS_BICUBIC, NULL, NULL, NULL); } + + return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *context = inlink->dst; - SRContext *sr_context = context->priv; + SRContext *ctx = context->priv; AVFilterLink *outlink = context->outputs[0]; AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); DNNReturnType dnn_result; @@ -225,65 +138,47 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) return AVERROR(ENOMEM); } av_frame_copy_props(out, in); - out->height = sr_context->output.height; - out->width = sr_context->output.width; - if (sr_context->scale_factor){ - sws_scale(sr_context->sws_contexts[0], (const uint8_t **)in->data, in->linesize, - 0, sr_context->sws_slice_h, out->data, out->linesize); - - sws_scale(sr_context->sws_contexts[1], (const uint8_t **)out->data, out->linesize, - 0, out->height, (uint8_t * const*)(&sr_context->input.data), - (const int [4]){sr_context->sws_input_linesize, 0, 0, 0}); - } - else{ - if (sr_context->sws_contexts[0]){ - sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 1), in->linesize + 1, - 0, sr_context->sws_slice_h, out->data + 1, out->linesize + 1); - sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 2), in->linesize + 2, - 0, sr_context->sws_slice_h, out->data + 2, out->linesize + 2); - } - sws_scale(sr_context->sws_contexts[1], (const uint8_t **)in->data, in->linesize, - 0, in->height, (uint8_t * const*)(&sr_context->input.data), - (const int [4]){sr_context->sws_input_linesize, 0, 0, 0}); + if (ctx->sws_pre_scale) { + sws_scale(ctx->sws_pre_scale, + (const uint8_t **)in->data, in->linesize, 0, in->height, + out->data, out->linesize); + dnn_result = ff_dnn_execute_model(&ctx->dnnctx, out, out); + } else { + dnn_result = ff_dnn_execute_model(&ctx->dnnctx, in, out); } - av_frame_free(&in); - dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model); if (dnn_result != DNN_SUCCESS){ - av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n"); + av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n"); + av_frame_free(&in); + av_frame_free(&out); return AVERROR(EIO); } - sws_scale(sr_context->sws_contexts[2], (const uint8_t *[4]){(const uint8_t *)sr_context->output.data, 0, 0, 0}, - (const int[4]){sr_context->sws_output_linesize, 0, 0, 0}, - 0, out->height, (uint8_t * const*)out->data, out->linesize); + if (ctx->sws_uv_scale) { + sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1, + 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1); + sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2, + 0, ctx->sws_uv_height, out->data + 2, out->linesize + 2); + } + av_frame_free(&in); return ff_filter_frame(outlink, out); } static av_cold void uninit(AVFilterContext *context) { - int i; SRContext *sr_context = context->priv; - if (sr_context->dnn_module){ - (sr_context->dnn_module->free_model)(&sr_context->model); - av_freep(&sr_context->dnn_module); - } - - for (i = 0; i < 3; ++i){ - if (sr_context->sws_contexts[i]){ - sws_freeContext(sr_context->sws_contexts[i]); - } - } + ff_dnn_uninit(&sr_context->dnnctx); + sws_freeContext(sr_context->sws_uv_scale); + sws_freeContext(sr_context->sws_pre_scale); } static const AVFilterPad sr_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_props, .filter_frame = filter_frame, }, { NULL } @@ -292,12 +187,13 @@ static const AVFilterPad sr_inputs[] = { static const AVFilterPad sr_outputs[] = { { .name = "default", + .config_props = config_output, .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; -AVFilter ff_vf_sr = { +const AVFilter ff_vf_sr = { .name = "sr", .description = NULL_IF_CONFIG_SMALL("Apply DNN-based image super resolution to the input."), .priv_size = sizeof(SRContext), @@ -307,6 +203,4 @@ AVFilter ff_vf_sr = { .inputs = sr_inputs, .outputs = sr_outputs, .priv_class = &sr_class, - .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, }; -