]> git.sesse.net Git - ffmpeg/blob - libavfilter/dnn/dnn_backend_tf.c
avfilter/dnn: get the data type of network output from dnn execution result
[ffmpeg] / libavfilter / dnn / dnn_backend_tf.c
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25
26 #include "dnn_backend_tf.h"
27 #include "dnn_backend_native.h"
28 #include "dnn_backend_native_layer_conv2d.h"
29 #include "dnn_backend_native_layer_depth2space.h"
30 #include "libavformat/avio.h"
31 #include "libavutil/avassert.h"
32 #include "dnn_backend_native_layer_pad.h"
33 #include "dnn_backend_native_layer_maximum.h"
34
35 #include <tensorflow/c/c_api.h>
36
37 typedef struct TFModel{
38     TF_Graph *graph;
39     TF_Session *session;
40     TF_Status *status;
41     TF_Output input;
42     TF_Tensor *input_tensor;
43     TF_Output *outputs;
44     TF_Tensor **output_tensors;
45     uint32_t nb_output;
46 } TFModel;
47
48 static void free_buffer(void *data, size_t length)
49 {
50     av_freep(&data);
51 }
52
53 static TF_Buffer *read_graph(const char *model_filename)
54 {
55     TF_Buffer *graph_buf;
56     unsigned char *graph_data = NULL;
57     AVIOContext *model_file_context;
58     long size, bytes_read;
59
60     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
61         return NULL;
62     }
63
64     size = avio_size(model_file_context);
65
66     graph_data = av_malloc(size);
67     if (!graph_data){
68         avio_closep(&model_file_context);
69         return NULL;
70     }
71     bytes_read = avio_read(model_file_context, graph_data, size);
72     avio_closep(&model_file_context);
73     if (bytes_read != size){
74         av_freep(&graph_data);
75         return NULL;
76     }
77
78     graph_buf = TF_NewBuffer();
79     graph_buf->data = (void *)graph_data;
80     graph_buf->length = size;
81     graph_buf->data_deallocator = free_buffer;
82
83     return graph_buf;
84 }
85
86 static TF_Tensor *allocate_input_tensor(const DNNData *input)
87 {
88     TF_DataType dt;
89     size_t size;
90     int64_t input_dims[] = {1, input->height, input->width, input->channels};
91     switch (input->dt) {
92     case DNN_FLOAT:
93         dt = TF_FLOAT;
94         size = sizeof(float);
95         break;
96     case DNN_UINT8:
97         dt = TF_UINT8;
98         size = sizeof(char);
99         break;
100     default:
101         av_assert0(!"should not reach here");
102     }
103
104     return TF_AllocateTensor(dt, input_dims, 4,
105                              input_dims[1] * input_dims[2] * input_dims[3] * size);
106 }
107
108 static DNNReturnType set_input_output_tf(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
109 {
110     TFModel *tf_model = (TFModel *)model;
111     TF_SessionOptions *sess_opts;
112     const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph, "init");
113
114     // Input operation
115     tf_model->input.oper = TF_GraphOperationByName(tf_model->graph, input_name);
116     if (!tf_model->input.oper){
117         return DNN_ERROR;
118     }
119     tf_model->input.index = 0;
120     if (tf_model->input_tensor){
121         TF_DeleteTensor(tf_model->input_tensor);
122     }
123     tf_model->input_tensor = allocate_input_tensor(input);
124     if (!tf_model->input_tensor){
125         return DNN_ERROR;
126     }
127     input->data = (float *)TF_TensorData(tf_model->input_tensor);
128
129     // Output operation
130     if (nb_output == 0)
131         return DNN_ERROR;
132
133     av_freep(&tf_model->outputs);
134     tf_model->outputs = av_malloc_array(nb_output, sizeof(*tf_model->outputs));
135     if (!tf_model->outputs)
136         return DNN_ERROR;
137     for (int i = 0; i < nb_output; ++i) {
138         tf_model->outputs[i].oper = TF_GraphOperationByName(tf_model->graph, output_names[i]);
139         if (!tf_model->outputs[i].oper){
140             av_freep(&tf_model->outputs);
141             return DNN_ERROR;
142         }
143         tf_model->outputs[i].index = 0;
144     }
145
146     if (tf_model->output_tensors) {
147         for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
148             if (tf_model->output_tensors[i]) {
149                 TF_DeleteTensor(tf_model->output_tensors[i]);
150                 tf_model->output_tensors[i] = NULL;
151             }
152         }
153     }
154     av_freep(&tf_model->output_tensors);
155     tf_model->output_tensors = av_mallocz_array(nb_output, sizeof(*tf_model->output_tensors));
156     if (!tf_model->output_tensors) {
157         av_freep(&tf_model->outputs);
158         return DNN_ERROR;
159     }
160
161     tf_model->nb_output = nb_output;
162
163     if (tf_model->session){
164         TF_CloseSession(tf_model->session, tf_model->status);
165         TF_DeleteSession(tf_model->session, tf_model->status);
166     }
167
168     sess_opts = TF_NewSessionOptions();
169     tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
170     TF_DeleteSessionOptions(sess_opts);
171     if (TF_GetCode(tf_model->status) != TF_OK)
172     {
173         return DNN_ERROR;
174     }
175
176     // Run initialization operation with name "init" if it is present in graph
177     if (init_op){
178         TF_SessionRun(tf_model->session, NULL,
179                       NULL, NULL, 0,
180                       NULL, NULL, 0,
181                       &init_op, 1, NULL, tf_model->status);
182         if (TF_GetCode(tf_model->status) != TF_OK)
183         {
184             return DNN_ERROR;
185         }
186     }
187
188     return DNN_SUCCESS;
189 }
190
191 static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename)
192 {
193     TF_Buffer *graph_def;
194     TF_ImportGraphDefOptions *graph_opts;
195
196     graph_def = read_graph(model_filename);
197     if (!graph_def){
198         return DNN_ERROR;
199     }
200     tf_model->graph = TF_NewGraph();
201     tf_model->status = TF_NewStatus();
202     graph_opts = TF_NewImportGraphDefOptions();
203     TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
204     TF_DeleteImportGraphDefOptions(graph_opts);
205     TF_DeleteBuffer(graph_def);
206     if (TF_GetCode(tf_model->status) != TF_OK){
207         TF_DeleteGraph(tf_model->graph);
208         TF_DeleteStatus(tf_model->status);
209         return DNN_ERROR;
210     }
211
212     return DNN_SUCCESS;
213 }
214
215 #define NAME_BUFFER_SIZE 256
216
217 static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op,
218                                     ConvolutionalParams* params, const int layer)
219 {
220     TF_Operation *op;
221     TF_OperationDescription *op_desc;
222     TF_Output input;
223     int64_t strides[] = {1, 1, 1, 1};
224     TF_Tensor *tensor;
225     int64_t dims[4];
226     int dims_len;
227     char name_buffer[NAME_BUFFER_SIZE];
228     int32_t size;
229
230     size = params->input_num * params->output_num * params->kernel_size * params->kernel_size;
231     input.index = 0;
232
233     snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_kernel%d", layer);
234     op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
235     TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
236     dims[0] = params->output_num;
237     dims[1] = params->kernel_size;
238     dims[2] = params->kernel_size;
239     dims[3] = params->input_num;
240     dims_len = 4;
241     tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float));
242     memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float));
243     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
244     if (TF_GetCode(tf_model->status) != TF_OK){
245         return DNN_ERROR;
246     }
247     op = TF_FinishOperation(op_desc, tf_model->status);
248     if (TF_GetCode(tf_model->status) != TF_OK){
249         return DNN_ERROR;
250     }
251
252     snprintf(name_buffer, NAME_BUFFER_SIZE, "transpose%d", layer);
253     op_desc = TF_NewOperation(tf_model->graph, "Transpose", name_buffer);
254     input.oper = op;
255     TF_AddInput(op_desc, input);
256     input.oper = transpose_op;
257     TF_AddInput(op_desc, input);
258     TF_SetAttrType(op_desc, "T", TF_FLOAT);
259     TF_SetAttrType(op_desc, "Tperm", TF_INT32);
260     op = TF_FinishOperation(op_desc, tf_model->status);
261     if (TF_GetCode(tf_model->status) != TF_OK){
262         return DNN_ERROR;
263     }
264
265     snprintf(name_buffer, NAME_BUFFER_SIZE, "conv2d%d", layer);
266     op_desc = TF_NewOperation(tf_model->graph, "Conv2D", name_buffer);
267     input.oper = *cur_op;
268     TF_AddInput(op_desc, input);
269     input.oper = op;
270     TF_AddInput(op_desc, input);
271     TF_SetAttrType(op_desc, "T", TF_FLOAT);
272     TF_SetAttrIntList(op_desc, "strides", strides, 4);
273     TF_SetAttrString(op_desc, "padding", "VALID", 5);
274     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
275     if (TF_GetCode(tf_model->status) != TF_OK){
276         return DNN_ERROR;
277     }
278
279     snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_biases%d", layer);
280     op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
281     TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
282     dims[0] = params->output_num;
283     dims_len = 1;
284     tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float));
285     memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float));
286     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
287     if (TF_GetCode(tf_model->status) != TF_OK){
288         return DNN_ERROR;
289     }
290     op = TF_FinishOperation(op_desc, tf_model->status);
291     if (TF_GetCode(tf_model->status) != TF_OK){
292         return DNN_ERROR;
293     }
294
295     snprintf(name_buffer, NAME_BUFFER_SIZE, "bias_add%d", layer);
296     op_desc = TF_NewOperation(tf_model->graph, "BiasAdd", name_buffer);
297     input.oper = *cur_op;
298     TF_AddInput(op_desc, input);
299     input.oper = op;
300     TF_AddInput(op_desc, input);
301     TF_SetAttrType(op_desc, "T", TF_FLOAT);
302     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
303     if (TF_GetCode(tf_model->status) != TF_OK){
304         return DNN_ERROR;
305     }
306
307     snprintf(name_buffer, NAME_BUFFER_SIZE, "activation%d", layer);
308     switch (params->activation){
309     case RELU:
310         op_desc = TF_NewOperation(tf_model->graph, "Relu", name_buffer);
311         break;
312     case TANH:
313         op_desc = TF_NewOperation(tf_model->graph, "Tanh", name_buffer);
314         break;
315     case SIGMOID:
316         op_desc = TF_NewOperation(tf_model->graph, "Sigmoid", name_buffer);
317         break;
318     default:
319         return DNN_ERROR;
320     }
321     input.oper = *cur_op;
322     TF_AddInput(op_desc, input);
323     TF_SetAttrType(op_desc, "T", TF_FLOAT);
324     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
325     if (TF_GetCode(tf_model->status) != TF_OK){
326         return DNN_ERROR;
327     }
328
329     return DNN_SUCCESS;
330 }
331
332 static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
333                                               DepthToSpaceParams *params, const int layer)
334 {
335     TF_OperationDescription *op_desc;
336     TF_Output input;
337     char name_buffer[NAME_BUFFER_SIZE];
338
339     snprintf(name_buffer, NAME_BUFFER_SIZE, "depth_to_space%d", layer);
340     op_desc = TF_NewOperation(tf_model->graph, "DepthToSpace", name_buffer);
341     input.oper = *cur_op;
342     input.index = 0;
343     TF_AddInput(op_desc, input);
344     TF_SetAttrType(op_desc, "T", TF_FLOAT);
345     TF_SetAttrInt(op_desc, "block_size", params->block_size);
346     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
347     if (TF_GetCode(tf_model->status) != TF_OK){
348         return DNN_ERROR;
349     }
350
351     return DNN_SUCCESS;
352 }
353
354 static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
355                                               LayerPadParams *params, const int layer)
356 {
357     TF_Operation *op;
358     TF_Tensor *tensor;
359     TF_OperationDescription *op_desc;
360     TF_Output input;
361     int32_t *pads;
362     int64_t pads_shape[] = {4, 2};
363
364     char name_buffer[NAME_BUFFER_SIZE];
365     snprintf(name_buffer, NAME_BUFFER_SIZE, "pad%d", layer);
366
367     op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
368     TF_SetAttrType(op_desc, "dtype", TF_INT32);
369     tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 * sizeof(int32_t));
370     pads = (int32_t *)TF_TensorData(tensor);
371     pads[0] = params->paddings[0][0];
372     pads[1] = params->paddings[0][1];
373     pads[2] = params->paddings[1][0];
374     pads[3] = params->paddings[1][1];
375     pads[4] = params->paddings[2][0];
376     pads[5] = params->paddings[2][1];
377     pads[6] = params->paddings[3][0];
378     pads[7] = params->paddings[3][1];
379     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
380     if (TF_GetCode(tf_model->status) != TF_OK){
381         return DNN_ERROR;
382     }
383     op = TF_FinishOperation(op_desc, tf_model->status);
384     if (TF_GetCode(tf_model->status) != TF_OK){
385         return DNN_ERROR;
386     }
387
388     op_desc = TF_NewOperation(tf_model->graph, "MirrorPad", "mirror_pad");
389     input.oper = *cur_op;
390     input.index = 0;
391     TF_AddInput(op_desc, input);
392     input.oper = op;
393     TF_AddInput(op_desc, input);
394     TF_SetAttrType(op_desc, "T", TF_FLOAT);
395     TF_SetAttrType(op_desc, "Tpaddings", TF_INT32);
396     TF_SetAttrString(op_desc, "mode", "SYMMETRIC", 9);
397     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
398     if (TF_GetCode(tf_model->status) != TF_OK){
399         return DNN_ERROR;
400     }
401
402     return DNN_SUCCESS;
403 }
404
405 static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
406                                        DnnLayerMaximumParams *params, const int layer)
407 {
408     TF_Operation *op;
409     TF_Tensor *tensor;
410     TF_OperationDescription *op_desc;
411     TF_Output input;
412     float *y;
413
414     char name_buffer[NAME_BUFFER_SIZE];
415     snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum/y%d", layer);
416
417     op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
418     TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
419     tensor = TF_AllocateTensor(TF_FLOAT, NULL, 0, TF_DataTypeSize(TF_FLOAT));
420     y = (float *)TF_TensorData(tensor);
421     *y = params->val.y;
422     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
423     if (TF_GetCode(tf_model->status) != TF_OK){
424         return DNN_ERROR;
425     }
426     op = TF_FinishOperation(op_desc, tf_model->status);
427     if (TF_GetCode(tf_model->status) != TF_OK){
428         return DNN_ERROR;
429     }
430
431     snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum%d", layer);
432     op_desc = TF_NewOperation(tf_model->graph, "Maximum", name_buffer);
433     input.oper = *cur_op;
434     input.index = 0;
435     TF_AddInput(op_desc, input);
436     input.oper = op;
437     TF_AddInput(op_desc, input);
438     TF_SetAttrType(op_desc, "T", TF_FLOAT);
439     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
440     if (TF_GetCode(tf_model->status) != TF_OK){
441         return DNN_ERROR;
442     }
443
444     return DNN_SUCCESS;
445 }
446
447 static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
448 {
449     int32_t layer;
450     TF_OperationDescription *op_desc;
451     TF_Operation *op;
452     TF_Operation *transpose_op;
453     TF_Tensor *tensor;
454     TF_Output input;
455     int32_t *transpose_perm;
456     int64_t transpose_perm_shape[] = {4};
457     int64_t input_shape[] = {1, -1, -1, -1};
458     DNNReturnType layer_add_res;
459     DNNModel *native_model = NULL;
460     ConvolutionalNetwork *conv_network;
461
462     native_model = ff_dnn_load_model_native(model_filename);
463     if (!native_model){
464         return DNN_ERROR;
465     }
466
467     conv_network = (ConvolutionalNetwork *)native_model->model;
468     tf_model->graph = TF_NewGraph();
469     tf_model->status = TF_NewStatus();
470
471 #define CLEANUP_ON_ERROR(tf_model) \
472     { \
473         TF_DeleteGraph(tf_model->graph); \
474         TF_DeleteStatus(tf_model->status); \
475         return DNN_ERROR; \
476     }
477
478     op_desc = TF_NewOperation(tf_model->graph, "Placeholder", "x");
479     TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
480     TF_SetAttrShape(op_desc, "shape", input_shape, 4);
481     op = TF_FinishOperation(op_desc, tf_model->status);
482     if (TF_GetCode(tf_model->status) != TF_OK){
483         CLEANUP_ON_ERROR(tf_model);
484     }
485
486     op_desc = TF_NewOperation(tf_model->graph, "Const", "transpose_perm");
487     TF_SetAttrType(op_desc, "dtype", TF_INT32);
488     tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 * sizeof(int32_t));
489     transpose_perm = (int32_t *)TF_TensorData(tensor);
490     transpose_perm[0] = 1;
491     transpose_perm[1] = 2;
492     transpose_perm[2] = 3;
493     transpose_perm[3] = 0;
494     TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
495     if (TF_GetCode(tf_model->status) != TF_OK){
496         CLEANUP_ON_ERROR(tf_model);
497     }
498     transpose_op = TF_FinishOperation(op_desc, tf_model->status);
499
500     for (layer = 0; layer < conv_network->layers_num; ++layer){
501         switch (conv_network->layers[layer].type){
502         case DLT_INPUT:
503             layer_add_res = DNN_SUCCESS;
504             break;
505         case DLT_CONV2D:
506             layer_add_res = add_conv_layer(tf_model, transpose_op, &op,
507                                            (ConvolutionalParams *)conv_network->layers[layer].params, layer);
508             break;
509         case DLT_DEPTH_TO_SPACE:
510             layer_add_res = add_depth_to_space_layer(tf_model, &op,
511                                                      (DepthToSpaceParams *)conv_network->layers[layer].params, layer);
512             break;
513         case DLT_MIRROR_PAD:
514             layer_add_res = add_pad_layer(tf_model, &op,
515                                           (LayerPadParams *)conv_network->layers[layer].params, layer);
516             break;
517         case DLT_MAXIMUM:
518             layer_add_res = add_maximum_layer(tf_model, &op,
519                                           (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer);
520             break;
521         default:
522             CLEANUP_ON_ERROR(tf_model);
523         }
524
525         if (layer_add_res != DNN_SUCCESS){
526             CLEANUP_ON_ERROR(tf_model);
527         }
528     }
529
530     op_desc = TF_NewOperation(tf_model->graph, "Identity", "y");
531     input.oper = op;
532     input.index = 0;
533     TF_AddInput(op_desc, input);
534     TF_FinishOperation(op_desc, tf_model->status);
535     if (TF_GetCode(tf_model->status) != TF_OK){
536         CLEANUP_ON_ERROR(tf_model);
537     }
538
539     ff_dnn_free_model_native(&native_model);
540
541     return DNN_SUCCESS;
542 }
543
544 DNNModel *ff_dnn_load_model_tf(const char *model_filename)
545 {
546     DNNModel *model = NULL;
547     TFModel *tf_model = NULL;
548
549     model = av_malloc(sizeof(DNNModel));
550     if (!model){
551         return NULL;
552     }
553
554     tf_model = av_mallocz(sizeof(TFModel));
555     if (!tf_model){
556         av_freep(&model);
557         return NULL;
558     }
559
560     if (load_tf_model(tf_model, model_filename) != DNN_SUCCESS){
561         if (load_native_model(tf_model, model_filename) != DNN_SUCCESS){
562             av_freep(&tf_model);
563             av_freep(&model);
564
565             return NULL;
566         }
567     }
568
569     model->model = (void *)tf_model;
570     model->set_input_output = &set_input_output_tf;
571
572     return model;
573 }
574
575
576
577 DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
578 {
579     TFModel *tf_model = (TFModel *)model->model;
580     uint32_t nb = FFMIN(nb_output, tf_model->nb_output);
581     if (nb == 0)
582         return DNN_ERROR;
583
584     av_assert0(tf_model->output_tensors);
585     for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
586         if (tf_model->output_tensors[i]) {
587             TF_DeleteTensor(tf_model->output_tensors[i]);
588             tf_model->output_tensors[i] = NULL;
589         }
590     }
591
592     TF_SessionRun(tf_model->session, NULL,
593                   &tf_model->input, &tf_model->input_tensor, 1,
594                   tf_model->outputs, tf_model->output_tensors, nb,
595                   NULL, 0, NULL, tf_model->status);
596
597     if (TF_GetCode(tf_model->status) != TF_OK){
598         return DNN_ERROR;
599     }
600
601     for (uint32_t i = 0; i < nb; ++i) {
602         outputs[i].height = TF_Dim(tf_model->output_tensors[i], 1);
603         outputs[i].width = TF_Dim(tf_model->output_tensors[i], 2);
604         outputs[i].channels = TF_Dim(tf_model->output_tensors[i], 3);
605         outputs[i].data = TF_TensorData(tf_model->output_tensors[i]);
606         outputs[i].dt = TF_TensorType(tf_model->output_tensors[i]);
607     }
608
609     return DNN_SUCCESS;
610 }
611
612 void ff_dnn_free_model_tf(DNNModel **model)
613 {
614     TFModel *tf_model;
615
616     if (*model){
617         tf_model = (TFModel *)(*model)->model;
618         if (tf_model->graph){
619             TF_DeleteGraph(tf_model->graph);
620         }
621         if (tf_model->session){
622             TF_CloseSession(tf_model->session, tf_model->status);
623             TF_DeleteSession(tf_model->session, tf_model->status);
624         }
625         if (tf_model->status){
626             TF_DeleteStatus(tf_model->status);
627         }
628         if (tf_model->input_tensor){
629             TF_DeleteTensor(tf_model->input_tensor);
630         }
631         if (tf_model->output_tensors) {
632             for (uint32_t i = 0; i < tf_model->nb_output; ++i) {
633                 if (tf_model->output_tensors[i]) {
634                     TF_DeleteTensor(tf_model->output_tensors[i]);
635                     tf_model->output_tensors[i] = NULL;
636                 }
637             }
638         }
639         av_freep(&tf_model->outputs);
640         av_freep(&tf_model->output_tensors);
641         av_freep(&tf_model);
642         av_freep(model);
643     }
644 }