for (int i = 0; i < request->task_count; ++i) {
task = request->tasks[i];
if (task->do_ioproc) {
- if (ov_model->model->pre_proc != NULL) {
- ov_model->model->pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
+ if (ov_model->model->frame_pre_proc != NULL) {
+ ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
} else {
ff_proc_from_frame_to_dnn(task->in_frame, &input, ov_model->model->func_type, ctx);
}
av_assert0(request->task_count >= 1);
for (int i = 0; i < request->task_count; ++i) {
task = request->tasks[i];
- if (task->do_ioproc) {
- if (task->ov_model->model->post_proc != NULL) {
- task->ov_model->model->post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
+
+ switch (task->ov_model->model->func_type) {
+ case DFT_PROCESS_FRAME:
+ if (task->do_ioproc) {
+ if (task->ov_model->model->frame_post_proc != NULL) {
+ task->ov_model->model->frame_post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
+ } else {
+ ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx);
+ }
} else {
- ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx);
+ task->out_frame->width = output.width;
+ task->out_frame->height = output.height;
}
- } else {
- task->out_frame->width = output.width;
- task->out_frame->height = output.height;
+ break;
+ case DFT_ANALYTICS_DETECT:
+ if (!task->ov_model->model->detect_post_proc) {
+ av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
+ return;
+ }
+ task->ov_model->model->detect_post_proc(task->out_frame, &output, 1, task->ov_model->model->filter_ctx);
+ break;
+ default:
+ av_assert0(!"should not reach here");
+ break;
}
+
task->done = 1;
output.data = (uint8_t *)output.data
+ output.width * output.height * output.channels * get_datatype_size(output.dt);
}
// all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
- // we don't have a AVPixelFormat to descibe it, so we'll use AV_PIX_FMT_BGR24 and
+ // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
// ask openvino to do the conversion internally.
// the current supported SR model (frame processing) is generated from tensorflow model,
// and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.