1 # Copyright (c) 2019 Guo Yejun
3 # This file is part of FFmpeg.
5 # FFmpeg is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # FFmpeg is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with FFmpeg; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 # ==============================================================================
20 import tensorflow as tf
23 import convert_header as header
25 __all__ = ['convert_from_tensorflow']
27 class Operand(object):
30 IOTYPE_INTERMEDIATE = IOTYPE_INPUT | IOTYPE_OUTPUT
34 def __init__(self, name, dtype, dims):
40 self.index = Operand.index
41 Operand.index = Operand.index + 1
42 self.iotype2str = {Operand.IOTYPE_INPUT: 'in', Operand.IOTYPE_OUTPUT: 'out', Operand.IOTYPE_INTERMEDIATE: 'inout'}
43 self.dtype2str = {Operand.DTYPE_FLOAT: 'DT_FLOAT', Operand.DTYPE_UINT8: 'DT_UINT8'}
45 def add_iotype(self, iotype):
46 self.iotype = self.iotype | iotype
47 if iotype == Operand.IOTYPE_INPUT:
48 self.used_count = self.used_count + 1
51 return "{}: (name: {}, iotype: {}, dtype: {}, dims: {}, used_count: {})".format(self.index,
52 self.name, self.iotype2str[self.iotype], self.dtype2str[self.dtype],
53 self.dims, self.used_count)
55 def __lt__(self, other):
56 return self.index < other.index
59 def __init__(self, graph_def, nodes, outfile, dump4tb):
60 self.graph_def = graph_def
62 self.outfile = outfile
63 self.dump4tb = dump4tb
65 self.output_names = []
66 self.name_node_dict = {}
68 self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'None':3, 'LeakyRelu':4}
69 self.conv_paddings = {'VALID':0, 'SAME':1}
70 self.pool_paddings = {'VALID':0, 'SAME':1}
71 self.converted_nodes = set()
72 self.conv2d_scope_names = set()
73 self.conv2d_scopename_inputname_dict = {}
74 self.dense_scope_names = set()
75 self.dense_scopename_inputname_dict = {}
76 self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4,
77 'MathBinary':5, 'MathUnary':6, 'AvgPool':7, 'MatMul':8}
78 self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4, 'FloorMod':5}
79 self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
80 'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
81 'Acosh':11, 'Atanh':12, 'Ceil':13, 'Floor':14, 'Round':15}
82 self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
83 self.name_operand_dict = {}
86 def add_operand(self, name, type):
87 node = self.name_node_dict[name]
88 if name not in self.name_operand_dict:
89 dtype = node.attr['dtype'].type
91 dtype = node.attr['T'].type
93 if 'shape' in node.attr:
94 dims[0] = node.attr['shape'].shape.dim[0].size
95 dims[1] = node.attr['shape'].shape.dim[1].size
96 dims[2] = node.attr['shape'].shape.dim[2].size
97 dims[3] = node.attr['shape'].shape.dim[3].size
98 operand = Operand(name, dtype, dims)
99 self.name_operand_dict[name] = operand;
100 self.name_operand_dict[name].add_iotype(type)
101 return self.name_operand_dict[name].index
104 def dump_for_tensorboard(self):
105 graph = tf.get_default_graph()
106 tf.import_graph_def(self.graph_def, name="")
107 tf.summary.FileWriter('/tmp/graph', graph)
108 print('graph saved, run "tensorboard --logdir=/tmp/graph" to see it')
111 def get_conv2d_params(self, conv2d_scope_name):
112 knode = self.name_node_dict[conv2d_scope_name + '/kernel']
113 bnode = self.name_node_dict[conv2d_scope_name + '/bias']
115 if conv2d_scope_name + '/dilation_rate' in self.name_node_dict:
116 dnode = self.name_node_dict[conv2d_scope_name + '/dilation_rate']
120 # the BiasAdd name is possible be changed into the output name,
121 # if activation is None, and BiasAdd.next is the last op which is Identity
122 if conv2d_scope_name + '/BiasAdd' in self.edges:
123 anode = self.edges[conv2d_scope_name + '/BiasAdd'][0]
124 if anode.op not in self.conv_activations:
128 return knode, bnode, dnode, anode
131 def get_dense_params(self, dense_scope_name):
132 knode = self.name_node_dict[dense_scope_name + '/kernel']
133 bnode = self.name_node_dict.get(dense_scope_name + '/bias')
134 # the BiasAdd name is possible be changed into the output name,
135 # if activation is None, and BiasAdd.next is the last op which is Identity
138 if dense_scope_name + '/BiasAdd' in self.edges:
139 anode = self.edges[dense_scope_name + '/BiasAdd'][0]
140 if anode.op not in self.conv_activations:
144 return knode, bnode, anode
147 def dump_complex_conv2d_to_file(self, node, f):
148 assert(node.op == 'Conv2D')
149 self.layer_number = self.layer_number + 1
150 self.converted_nodes.add(node.name)
152 scope_name = TFConverter.get_scope_name(node.name)
153 #knode for kernel, bnode for bias, dnode for dilation, anode for activation
154 knode, bnode, dnode, anode = self.get_conv2d_params(scope_name)
156 if dnode is not None:
157 dilation = struct.unpack('i', dnode.attr['value'].tensor.tensor_content[0:4])[0]
161 if anode is not None:
162 activation = anode.op
166 padding = node.attr['padding'].s.decode("utf-8")
167 # conv2d with dilation > 1 generates tens of nodes, not easy to parse them, so use this tricky method.
168 if dilation > 1 and scope_name + '/stack' in self.name_node_dict:
169 if self.name_node_dict[scope_name + '/stack'].op == "Const":
171 padding = self.conv_paddings[padding]
173 ktensor = knode.attr['value'].tensor
174 filter_height = ktensor.tensor_shape.dim[0].size
175 filter_width = ktensor.tensor_shape.dim[1].size
176 in_channels = ktensor.tensor_shape.dim[2].size
177 out_channels = ktensor.tensor_shape.dim[3].size
178 kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
179 kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
180 kernel = np.transpose(kernel, [3, 0, 1, 2])
183 np.array([self.op2code[node.op], dilation, padding, self.conv_activations[activation], in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
186 btensor = bnode.attr['value'].tensor
187 if btensor.tensor_shape.dim[0].size == 1:
188 bias = struct.pack("f", btensor.float_val[0])
190 bias = btensor.tensor_content
193 input_name = self.conv2d_scopename_inputname_dict[scope_name]
194 input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
196 if anode is not None:
197 output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
199 output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
200 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
202 def dump_dense_to_file(self, node, f):
203 assert(node.op == 'MatMul')
204 self.layer_number = self.layer_number + 1
205 self.converted_nodes.add(node.name)
207 scope_name = TFConverter.get_scope_name(node.name)
208 #knode for kernel, bnode for bias, anode for activation
209 knode, bnode, anode = self.get_dense_params(scope_name.split('/')[0])
211 if bnode is not None:
213 btensor = bnode.attr['value'].tensor
214 if btensor.tensor_shape.dim[0].size == 1:
215 bias = struct.pack("f", btensor.float_val[0])
217 bias = btensor.tensor_content
221 if anode is not None:
222 activation = anode.op
226 ktensor = knode.attr['value'].tensor
227 in_channels = ktensor.tensor_shape.dim[0].size
228 out_channels = ktensor.tensor_shape.dim[1].size
229 if in_channels * out_channels == 1:
230 kernel = np.float32(ktensor.float_val[0])
232 kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
233 kernel = kernel.reshape(in_channels, out_channels)
234 kernel = np.transpose(kernel, [1, 0])
236 np.array([self.op2code[node.op], self.conv_activations[activation], in_channels, out_channels, has_bias], dtype=np.uint32).tofile(f)
241 input_name = self.dense_scopename_inputname_dict[scope_name.split('/')[0]]
242 input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
244 if anode is not None:
245 output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
247 if bnode is not None:
248 output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
250 output_operand_index = self.add_operand(self.edges[scope_name+'/concat_1'][0].name, Operand.IOTYPE_OUTPUT)
251 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
254 def dump_simple_conv2d_to_file(self, node, f):
255 assert(node.op == 'Conv2D')
256 self.layer_number = self.layer_number + 1
257 self.converted_nodes.add(node.name)
259 node0 = self.name_node_dict[node.input[0]]
260 node1 = self.name_node_dict[node.input[1]]
261 if node0.op == 'Const':
263 input_name = node.input[1]
266 input_name = node.input[0]
268 ktensor = knode.attr['value'].tensor
269 filter_height = ktensor.tensor_shape.dim[0].size
270 filter_width = ktensor.tensor_shape.dim[1].size
271 in_channels = ktensor.tensor_shape.dim[2].size
272 out_channels = ktensor.tensor_shape.dim[3].size
273 if filter_height * filter_width * in_channels * out_channels == 1:
274 kernel = np.float32(ktensor.float_val[0])
276 kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
277 kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
278 kernel = np.transpose(kernel, [3, 0, 1, 2])
282 padding = node.attr['padding'].s.decode("utf-8")
283 np.array([self.op2code[node.op], dilation, self.conv_paddings[padding], self.conv_activations['None'],
284 in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
287 input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
288 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
289 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
292 def dump_depth2space_to_file(self, node, f):
293 assert(node.op == 'DepthToSpace')
294 self.layer_number = self.layer_number + 1
295 block_size = node.attr['block_size'].i
296 np.array([self.op2code[node.op], block_size], dtype=np.uint32).tofile(f)
297 self.converted_nodes.add(node.name)
298 input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
299 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
300 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
303 def dump_mirrorpad_to_file(self, node, f):
304 assert(node.op == 'MirrorPad')
305 self.layer_number = self.layer_number + 1
306 mode = node.attr['mode'].s
307 mode = self.mirrorpad_mode[mode.decode("utf-8")]
308 np.array([self.op2code[node.op], mode], dtype=np.uint32).tofile(f)
309 pnode = self.name_node_dict[node.input[1]]
310 self.converted_nodes.add(pnode.name)
311 paddings = pnode.attr['value'].tensor.tensor_content
313 self.converted_nodes.add(node.name)
314 input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
315 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
316 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
319 def dump_maximum_to_file(self, node, f):
320 assert(node.op == 'Maximum')
321 self.layer_number = self.layer_number + 1
322 ynode = self.name_node_dict[node.input[1]]
323 y = ynode.attr['value'].tensor.float_val[0]
324 np.array([self.op2code[node.op]], dtype=np.uint32).tofile(f)
325 np.array([y], dtype=np.float32).tofile(f)
326 self.converted_nodes.add(node.name)
327 input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
328 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
329 np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
332 def dump_mathbinary_to_file(self, node, f):
333 self.layer_number = self.layer_number + 1
334 self.converted_nodes.add(node.name)
335 i0_node = self.name_node_dict[node.input[0]]
336 i1_node = self.name_node_dict[node.input[1]]
337 np.array([self.op2code['MathBinary'], self.mathbin2code[node.op]], dtype=np.uint32).tofile(f)
338 if i0_node.op == 'Const':
339 scalar = i0_node.attr['value'].tensor.float_val[0]
340 np.array([1], dtype=np.uint32).tofile(f) # broadcast: 1
341 np.array([scalar], dtype=np.float32).tofile(f)
342 np.array([0], dtype=np.uint32).tofile(f) # broadcast: 0
343 input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
344 np.array([input_operand_index], dtype=np.uint32).tofile(f)
345 elif i1_node.op == 'Const':
346 scalar = i1_node.attr['value'].tensor.float_val[0]
347 np.array([0], dtype=np.uint32).tofile(f)
348 input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
349 np.array([input_operand_index], dtype=np.uint32).tofile(f)
350 np.array([1], dtype=np.uint32).tofile(f)
351 np.array([scalar], dtype=np.float32).tofile(f)
353 np.array([0], dtype=np.uint32).tofile(f)
354 input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
355 np.array([input_operand_index], dtype=np.uint32).tofile(f)
356 np.array([0], dtype=np.uint32).tofile(f)
357 input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
358 np.array([input_operand_index], dtype=np.uint32).tofile(f)
359 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
360 np.array([output_operand_index], dtype=np.uint32).tofile(f)
363 def dump_mathunary_to_file(self, node, f):
364 self.layer_number = self.layer_number + 1
365 self.converted_nodes.add(node.name)
366 i0_node = self.name_node_dict[node.input[0]]
367 np.array([self.op2code['MathUnary'], self.mathun2code[node.op]], dtype=np.uint32).tofile(f)
368 input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
369 np.array([input_operand_index], dtype=np.uint32).tofile(f)
370 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
371 np.array([output_operand_index],dtype=np.uint32).tofile(f)
374 def dump_avg_pool_to_file(self, node, f):
375 assert(node.op == 'AvgPool')
376 self.layer_number = self.layer_number + 1
377 self.converted_nodes.add(node.name)
378 node0 = self.name_node_dict[node.input[0]]
379 strides = node.attr['strides']
381 # Tensorflow do not support pooling strides in batch dimension and
382 # current native NN do not support pooling strides in channel dimension, added assert() here.
383 assert(strides.list.i[1]==strides.list.i[2])
384 assert(strides.list.i[0]==1)
385 assert(strides.list.i[3]==1)
386 strides = strides.list.i[1]
387 filter_node = node.attr['ksize']
388 input_name = node.input[0]
390 # Tensorflow do not support pooling ksize in batch dimension and channel dimension.
391 assert(filter_node.list.i[0]==1)
392 assert(filter_node.list.i[3]==1)
393 filter_height = filter_node.list.i[1]
394 filter_width = filter_node.list.i[2]
396 padding = node.attr['padding'].s.decode("utf-8")
397 np.array([self.op2code[node.op], strides, self.pool_paddings[padding], filter_height],
398 dtype=np.uint32).tofile(f)
400 input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
401 output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
402 np.array([input_operand_index, output_operand_index],dtype=np.uint32).tofile(f)
405 def dump_layers_to_file(self, f):
406 for node in self.nodes:
407 if node.name in self.converted_nodes:
410 # conv2d with dilation generates very complex nodes, so handle it in special
411 if self.in_conv2d_scope(node.name):
412 if node.op == 'Conv2D':
413 self.dump_complex_conv2d_to_file(node, f)
415 if self.in_dense_scope(node.name):
416 if node.op == 'MatMul':
417 self.dump_dense_to_file(node, f)
421 if node.op == 'Conv2D':
422 self.dump_simple_conv2d_to_file(node, f)
424 if node.name in self.output_names:
425 input_name = self.id_different_scope_dict[node.name]
426 if TFConverter.get_scope_name(input_name)!=TFConverter.get_scope_name(node.name):
428 if node.op == 'AvgPool':
429 self.dump_avg_pool_to_file(node, f)
430 elif node.op == 'DepthToSpace':
431 self.dump_depth2space_to_file(node, f)
432 elif node.op == 'MirrorPad':
433 self.dump_mirrorpad_to_file(node, f)
434 elif node.op == 'Maximum':
435 self.dump_maximum_to_file(node, f)
436 elif node.op in self.mathbin2code:
437 self.dump_mathbinary_to_file(node, f)
438 elif node.op in self.mathun2code:
439 self.dump_mathunary_to_file(node, f)
442 def dump_operands_to_file(self, f):
443 operands = sorted(self.name_operand_dict.values())
444 for operand in operands:
445 #print('{}'.format(operand))
446 np.array([operand.index, len(operand.name)], dtype=np.uint32).tofile(f)
447 f.write(operand.name.encode('utf-8'))
448 np.array([operand.iotype, operand.dtype], dtype=np.uint32).tofile(f)
449 np.array(operand.dims, dtype=np.uint32).tofile(f)
452 def dump_to_file(self):
453 with open(self.outfile, 'wb') as f:
454 f.write(header.str.encode('utf-8'))
455 np.array([header.major, header.minor], dtype=np.uint32).tofile(f)
456 self.dump_layers_to_file(f)
457 self.dump_operands_to_file(f)
458 np.array([self.layer_number, len(self.name_operand_dict)], dtype=np.uint32).tofile(f)
461 def generate_name_node_dict(self):
462 for node in self.nodes:
463 self.name_node_dict[node.name] = node
466 def generate_output_names(self):
468 for node in self.nodes:
469 for input in node.input:
470 used_names.append(input)
472 for node in self.nodes:
473 if node.name not in used_names:
474 self.output_names.append(node.name)
477 def remove_identity(self):
478 self.id_different_scope_dict = {}
481 for node in self.nodes:
482 if node.op == 'Identity':
484 input = node.input[0]
485 id_nodes.append(node)
486 # do not change the output name
487 if name in self.output_names:
488 self.name_node_dict[input].name = name
489 self.name_node_dict[name] = self.name_node_dict[input]
490 del self.name_node_dict[input]
491 self.id_different_scope_dict[name] = input
493 id_dict[name] = input
495 for idnode in id_nodes:
496 self.nodes.remove(idnode)
498 for node in self.nodes:
499 for i in range(len(node.input)):
500 input = node.input[i]
502 node.input[i] = id_dict[input]
505 def generate_edges(self):
506 for node in self.nodes:
507 for input in node.input:
508 if input in self.edges:
509 self.edges[input].append(node)
511 self.edges[input] = [node]
515 def get_scope_name(name):
516 index = name.rfind('/')
522 def in_conv2d_scope(self, name):
523 inner_scope = TFConverter.get_scope_name(name)
524 if inner_scope == "":
526 for scope in self.conv2d_scope_names:
527 index = inner_scope.find(scope)
533 def in_dense_scope(self, name):
534 inner_scope = TFConverter.get_scope_name(name)
535 if inner_scope == "":
537 for scope in self.dense_scope_names:
538 index = inner_scope.find(scope)
543 def generate_sub_block_op_scope_info(self):
544 # mostly, conv2d/dense is a sub block in graph, get the scope name
545 for node in self.nodes:
546 if node.op == 'Conv2D':
547 scope = TFConverter.get_scope_name(node.name)
548 # for the case tf.nn.conv2d is called directly
551 # for the case tf.nn.conv2d is called within a scope
552 if scope + '/kernel' not in self.name_node_dict:
554 self.conv2d_scope_names.add(scope)
555 elif node.op == 'MatMul':
556 scope = TFConverter.get_scope_name(node.name)
557 # for the case tf.nn.dense is called directly
560 # for the case tf.nn.dense is called within a scope
561 if scope + '/kernel' not in self.name_node_dict and scope.split('/Tensordot')[0] + '/kernel' not in self.name_node_dict:
563 self.dense_scope_names.add(scope.split('/Tensordot')[0])
565 # get the input name to the conv2d/dense sub block
566 for node in self.nodes:
567 scope = TFConverter.get_scope_name(node.name)
568 if scope in self.conv2d_scope_names:
569 if node.op == 'Conv2D' or node.op == 'Shape':
570 for inp in node.input:
571 if TFConverter.get_scope_name(inp) != scope:
572 self.conv2d_scopename_inputname_dict[scope] = inp
573 elif scope in self.dense_scope_names:
574 if node.op == 'MatMul' or node.op == 'Shape':
575 for inp in node.input:
576 if TFConverter.get_scope_name(inp) != scope:
577 self.dense_scopename_inputname_dict[scope] = inp
578 elif scope.split('/Tensordot')[0] in self.dense_scope_names:
579 if node.op == 'Transpose':
580 for inp in node.input:
581 if TFConverter.get_scope_name(inp).find(scope)<0 and TFConverter.get_scope_name(inp).find(scope.split('/')[0])<0:
582 self.dense_scopename_inputname_dict[scope.split('/Tensordot')[0]] = inp
586 self.generate_name_node_dict()
587 self.generate_output_names()
588 self.remove_identity()
589 self.generate_edges()
590 self.generate_sub_block_op_scope_info()
593 self.dump_for_tensorboard()
598 def convert_from_tensorflow(infile, outfile, dump4tb):
599 with open(infile, 'rb') as f:
600 # read the file in .proto format
601 graph_def = tf.GraphDef()
602 graph_def.ParseFromString(f.read())
603 nodes = graph_def.node
605 converter = TFConverter(graph_def, nodes, outfile, dump4tb)