You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

215 lines
7.5KB

  1. # Copyright (c) 2019 Guo Yejun
  2. #
  3. # This file is part of FFmpeg.
  4. #
  5. # FFmpeg is free software; you can redistribute it and/or
  6. # modify it under the terms of the GNU Lesser General Public
  7. # License as published by the Free Software Foundation; either
  8. # version 2.1 of the License, or (at your option) any later version.
  9. #
  10. # FFmpeg is distributed in the hope that it will be useful,
  11. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. # Lesser General Public License for more details.
  14. #
  15. # You should have received a copy of the GNU Lesser General Public
  16. # License along with FFmpeg; if not, write to the Free Software
  17. # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. # ==============================================================================
  19. import tensorflow as tf
  20. import numpy as np
  21. import sys, struct
  22. __all__ = ['convert_from_tensorflow']
  23. class TFConverter:
  24. def __init__(self, graph_def, nodes, outfile):
  25. self.graph_def = graph_def
  26. self.nodes = nodes
  27. self.outfile = outfile
  28. self.layer_number = 0
  29. self.output_names = []
  30. self.name_node_dict = {}
  31. self.edges = {}
  32. self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'LeakyRelu':4}
  33. self.conv_paddings = {'VALID':0, 'SAME':1}
  34. self.converted_nodes = set()
  35. self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3}
  36. self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
  37. def dump_for_tensorboard(self):
  38. graph = tf.get_default_graph()
  39. tf.import_graph_def(self.graph_def, name="")
  40. # tensorboard --logdir=/tmp/graph
  41. tf.summary.FileWriter('/tmp/graph', graph)
  42. def get_conv2d_params(self, node):
  43. knode = self.name_node_dict[node.input[1]]
  44. bnode = None
  45. activation = 'None'
  46. next = self.edges[node.name][0]
  47. if next.op == 'BiasAdd':
  48. self.converted_nodes.add(next.name)
  49. bnode = self.name_node_dict[next.input[1]]
  50. next = self.edges[next.name][0]
  51. if next.op in self.conv_activations:
  52. self.converted_nodes.add(next.name)
  53. activation = next.op
  54. return knode, bnode, activation
  55. def dump_conv2d_to_file(self, node, f):
  56. assert(node.op == 'Conv2D')
  57. self.layer_number = self.layer_number + 1
  58. self.converted_nodes.add(node.name)
  59. knode, bnode, activation = self.get_conv2d_params(node)
  60. dilation = node.attr['dilations'].list.i[0]
  61. padding = node.attr['padding'].s
  62. padding = self.conv_paddings[padding.decode("utf-8")]
  63. ktensor = knode.attr['value'].tensor
  64. filter_height = ktensor.tensor_shape.dim[0].size
  65. filter_width = ktensor.tensor_shape.dim[1].size
  66. in_channels = ktensor.tensor_shape.dim[2].size
  67. out_channels = ktensor.tensor_shape.dim[3].size
  68. kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
  69. kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
  70. kernel = np.transpose(kernel, [3, 0, 1, 2])
  71. np.array([self.op2code[node.op], dilation, padding, self.conv_activations[activation], in_channels, out_channels, filter_height], dtype=np.uint32).tofile(f)
  72. kernel.tofile(f)
  73. btensor = bnode.attr['value'].tensor
  74. if btensor.tensor_shape.dim[0].size == 1:
  75. bias = struct.pack("f", btensor.float_val[0])
  76. else:
  77. bias = btensor.tensor_content
  78. f.write(bias)
  79. def dump_depth2space_to_file(self, node, f):
  80. assert(node.op == 'DepthToSpace')
  81. self.layer_number = self.layer_number + 1
  82. block_size = node.attr['block_size'].i
  83. np.array([self.op2code[node.op], block_size], dtype=np.uint32).tofile(f)
  84. self.converted_nodes.add(node.name)
  85. def dump_mirrorpad_to_file(self, node, f):
  86. assert(node.op == 'MirrorPad')
  87. self.layer_number = self.layer_number + 1
  88. mode = node.attr['mode'].s
  89. mode = self.mirrorpad_mode[mode.decode("utf-8")]
  90. np.array([self.op2code[node.op], mode], dtype=np.uint32).tofile(f)
  91. pnode = self.name_node_dict[node.input[1]]
  92. self.converted_nodes.add(pnode.name)
  93. paddings = pnode.attr['value'].tensor.tensor_content
  94. f.write(paddings)
  95. self.converted_nodes.add(node.name)
  96. def generate_layer_number(self):
  97. # in current hard code implementation, the layer number is the first data written to the native model file
  98. # it is not easy to know it at the beginning time in the general converter, so first do a dry run for compatibility
  99. # will be refined later.
  100. with open('/tmp/tmp.model', 'wb') as f:
  101. self.dump_layers_to_file(f)
  102. self.converted_nodes.clear()
  103. def dump_layers_to_file(self, f):
  104. for node in self.nodes:
  105. if node.name in self.converted_nodes:
  106. continue
  107. if node.op == 'Conv2D':
  108. self.dump_conv2d_to_file(node, f)
  109. elif node.op == 'DepthToSpace':
  110. self.dump_depth2space_to_file(node, f)
  111. elif node.op == 'MirrorPad':
  112. self.dump_mirrorpad_to_file(node, f)
  113. def dump_to_file(self):
  114. self.generate_layer_number()
  115. with open(self.outfile, 'wb') as f:
  116. np.array([self.layer_number], dtype=np.uint32).tofile(f)
  117. self.dump_layers_to_file(f)
  118. def generate_name_node_dict(self):
  119. for node in self.nodes:
  120. self.name_node_dict[node.name] = node
  121. def generate_output_names(self):
  122. used_names = []
  123. for node in self.nodes:
  124. for input in node.input:
  125. used_names.append(input)
  126. for node in self.nodes:
  127. if node.name not in used_names:
  128. self.output_names.append(node.name)
  129. def remove_identity(self):
  130. id_nodes = []
  131. id_dict = {}
  132. for node in self.nodes:
  133. if node.op == 'Identity':
  134. name = node.name
  135. input = node.input[0]
  136. id_nodes.append(node)
  137. # do not change the output name
  138. if name in self.output_names:
  139. self.name_node_dict[input].name = name
  140. self.name_node_dict[name] = self.name_node_dict[input]
  141. del self.name_node_dict[input]
  142. else:
  143. id_dict[name] = input
  144. for idnode in id_nodes:
  145. self.nodes.remove(idnode)
  146. for node in self.nodes:
  147. for i in range(len(node.input)):
  148. input = node.input[i]
  149. if input in id_dict:
  150. node.input[i] = id_dict[input]
  151. def generate_edges(self):
  152. for node in self.nodes:
  153. for input in node.input:
  154. if input in self.edges:
  155. self.edges[input].append(node)
  156. else:
  157. self.edges[input] = [node]
  158. def run(self):
  159. self.generate_name_node_dict()
  160. self.generate_output_names()
  161. self.remove_identity()
  162. self.generate_edges()
  163. #check the graph with tensorboard with human eyes
  164. #self.dump_for_tensorboard()
  165. self.dump_to_file()
  166. def convert_from_tensorflow(infile, outfile):
  167. with open(infile, 'rb') as f:
  168. # read the file in .proto format
  169. graph_def = tf.GraphDef()
  170. graph_def.ParseFromString(f.read())
  171. nodes = graph_def.node
  172. converter = TFConverter(graph_def, nodes, outfile)
  173. converter.run()