TFLite: 输出模型结构,tensors, node 输入数据、预测、得到结果

1. keras 训练时得到的模型结构

   ...: model.summary()
   ...: 
Model: "mnist_model"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
img (InputLayer)             [(None, 784)]             0         
_________________________________________________________________
dense (Dense)                (None, 64)                50240     (64*784 + 64)
_________________________________________________________________
dense_1 (Dense)              (None, 64)                4160      (64*64 + 64)
_________________________________________________________________
dense_2 (Dense)              (None, 10)                650       (64*10 + 10)
=================================================================
Total params: 55,050
Trainable params: 55,050
Non-trainable params: 0
_________________________________________________________________

TFLite: 输出模型结构,tensors, node 输入数据、预测、得到结果

h5转换为pb然后通过tensorboard分析

https://github.com/amir-abdi/keras_to_tensorflow/blob/master/keras_to_tensorflow.py

有问题,分析下python代码应该可以解决

通过log打印分析

lite/optional_debug_tools.h lite/optional_debug_tools.cc

  // Prints a dump of what tensors and what nodes are in the interpreter.
  void PrintInterpreterState(Interpreter* interpreter) {
    printf("Interpreter has %zu tensors and %zu nodes\n",
           interpreter->tensors_size(), interpreter->nodes_size());
    printf("Inputs:");
    PrintIntVector(interpreter->inputs());//内容是tensor index组成的vector<int>
    printf("Outputs:");
    PrintIntVector(interpreter->outputs());
    printf("\n");
    for (int tensor_index = 0; tensor_index < interpreter->tensors_size();
         tensor_index++) {
      TfLiteTensor* tensor = interpreter->tensor(tensor_index);
      printf("Tensor %3d %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
             tensor->name, TensorTypeName(tensor->type),
             AllocTypeName(tensor->allocation_type), tensor->bytes,
             (static_cast<float>(tensor->bytes) / (1 << 20)));
      PrintTfLiteIntVector(tensor->dims);
    }
    printf("\n");
    for (int node_index = 0; node_index < interpreter->nodes_size();
         node_index++) {
      const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
          interpreter->node_and_registration(node_index);
      const TfLiteNode& node = node_and_reg->first;
      const TfLiteRegistration& reg = node_and_reg->second;
      if (reg.custom_name != nullptr) {
        printf("Node %3d Operator Custom Name %s\n", node_index, reg.custom_name);
      } else {
        printf("Node %3d Operator Builtin Code %3d\n", node_index, reg.builtin_code);
      }   
      printf("  Inputs:");
      PrintTfLiteIntVector(node.inputs);
      printf("  Outputs:");
      PrintTfLiteIntVector(node.outputs);
    }
  }
 

  void PrintTfLiteIntVector(const TfLiteIntArray* v) {                                                                                                                                                      
    if (!v) {
      printf(" (null)\n");
      return;
    }
    for (int k = 0; k < v->size; k++) { 
      printf(" %d", v->data[k]); //data的Index表示轴的索引,每个轴上元素的形状(个数)就是data的内容
    } 
    printf("\n");
  }

模型文件包含的内容

In [1]: import tensorflow as tf
   ...: from tensorflow import keras
   ...: from tensorflow.keras import layers
   ...: 
   ...: inputs = keras.Input(shape=(784,), name='img')
   ...: x = layers.Dense(64, activation='relu')(inputs)
   ...: x = layers.Dense(64, activation='relu')(x)
   ...: outputs = layers.Dense(10, activation='softmax')(x)
   ...: 

In [2]: model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')
   ...: model.summary()
 

Layer (type)                 Output Shape              Param #   
=================================================================
img (InputLayer)             [(None, 784)]             0         
_________________________________________________________________
dense (Dense)                (None, 64)                50240     (64*784 + 64)
_________________________________________________________________
dense_1 (Dense)              (None, 64)                4160      (64*64 + 64)
_________________________________________________________________
dense_2 (Dense)              (None, 10)                650       (64*10 + 10)

 Interpreter

Interpreter has 17 tensors and 4 nodes

tensor


Inputs: 1
Outputs: 0

Tensor   0 Identity             kTfLiteFloat32  kTfLiteArenaRw         40 bytes ( 0.0 MB)  1 10
Tensor   1 img                  kTfLiteFloat32  kTfLiteArenaRw       3136 bytes ( 0.0 MB)  1 784
Tensor   2 mnist_model/dense/MatMul/ReadVariableOp/transpose kTfLiteFloat32   kTfLiteMmapRo     200704 bytes ( 0.2 MB)  64 784
Tensor   3 mnist_model/dense/MatMul_bias kTfLiteFloat32   kTfLiteMmapRo        256 bytes ( 0.0 MB)  64
Tensor   4 mnist_model/dense/Relu kTfLiteFloat32  kTfLiteArenaRw        256 bytes ( 0.0 MB)  1 64
Tensor   5 mnist_model/dense_1/MatMul/ReadVariableOp/transpose kTfLiteFloat32   kTfLiteMmapRo      16384 bytes ( 0.0 MB)  64 64
Tensor   6 mnist_model/dense_1/MatMul_bias kTfLiteFloat32   kTfLiteMmapRo        256 bytes ( 0.0 MB)  64
Tensor   7 mnist_model/dense_1/Relu kTfLiteFloat32  kTfLiteArenaRw        256 bytes ( 0.0 MB)  1 64
Tensor   8 mnist_model/dense_2/BiasAdd kTfLiteFloat32  kTfLiteArenaRw         40 bytes ( 0.0 MB)  1 10
Tensor   9 mnist_model/dense_2/MatMul/ReadVariableOp/transpose kTfLiteFloat32   kTfLiteMmapRo       2560 bytes ( 0.0 MB)  10 64
Tensor  10 mnist_model/dense_2/MatMul_bias kTfLiteFloat32   kTfLiteMmapRo         40 bytes ( 0.0 MB)  10
Tensor  11 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)
Tensor  12 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)
Tensor  13 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)
Tensor  14 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)
Tensor  15 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)
Tensor  16 (null)               kTfLiteNoType  kTfLiteMemNone          0 bytes ( 0.0 MB)  (null)

Node

Node   0 Operator Builtin Code   9 FULLY_CONNECTED
  Inputs: 1 2 3 : input/weight/bias,为什么有3个inputs
  Outputs: 4
Node   1 Operator Builtin Code   9 FULLY_CONNECTED
  Inputs: 4 5 6    : input/weight/bias
  Outputs: 7
Node   2 Operator Builtin Code   9 FULLY_CONNECTED
  Inputs: 7 9 10: input/weight/bias
  Outputs: 8
Node   3 Operator Builtin Code  25 SOFTMAX
  Inputs: 8
  Outputs: 0

怎么给输入赋值然后进行预测

以简单的float feature_test[784] = {};为例

1. 获得input tensor

TfLiteTensor* input = interpreter->tensor(interpreter->inputs()[0]);

2. Tensor的数据保存在哪里?

typedef struct {
  // The data type specification for data stored in `data`. This affects
  // what member of `data` union should be used.
  TfLiteType type;
  // A union of data pointers. The appropriate type should be used for a typed
  // tensor based on `type`.
  TfLitePtrUnion data;

}

// A union of pointers that points to memory for a given tensor.
typedef union {
  int* i32;
  int64_t* i64;
  float* f;
  char* raw;
  const char* raw_const;
  uint8_t* uint8;
  bool* b;
  int16_t* i16;
  TfLiteComplex64* c64;
} TfLitePtrUnion;

2.1 如果保存的数据是uint8

    const uint8_t* yes_features_data = g_yes_f2e59fea_nohash_1_data;                     
    for (int i = 0; i < input->bytes; ++i) {
      input->data.uint8[i] = yes_features_data[i];
    }

2.2 如果保存的数据是float类型

    for (int i = 0; i < input->bytes/4; ++i) { //注意下float是4个字节
      input->data.f[i] = test_features_data[i];
    }

获得推断后的结果

    // Read output buffers
    // TODO(user): Insert getting data out code.
    TfLiteTensor* output = interp->tensor(interp->outputs()[0]);
    for (int i=0; i < output->bytes/4; i++) {
      printf("%f, ", output->data.f[i]);
    }
    printf("\n");