目标物识别object_detection三:固化模型以及测试
D:\Tensorflow\toy_detector\slim路径下,拷贝export_inference_graph.py到toy_detector根目录下,然后cmd中输入:
export_inference_graph.py --input_type image_tensor --pipeline_config_path ssd_mobilenet_v1_toy.config --trained_checkpoint_prefix train/model.ckpt-1244 --output_directory train/frozen_toydetection_graph.pb
模型固化只需要四个参数: 1.image_tensor是默认的 2.config配置文件 3.ckpt文件 4. 保存文件 见下图:其中1244对应的是我最后一次训练好的ckpt文件,你根据实际情况改成自己的。
frozen_toydetection_graph.pb文件夹是成功以后输出的,展开如下:
frozen_inference_graph.pb就是我们想要的固化好的最终模型。
用opencv读入图片测试。代码如下:
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
class TOD(object):
def init(self):
self.PATH_TO_CKPT = r’D:\Tensorflow\toy_detector\train\frozen_toydetection_graph.pb\frozen_inference_graph.pb’
self.PATH_TO_LABELS = r’D:\Tensorflow\toy_detector\toy_label_map.pbtxt’
self.NUM_CLASSES = 1
self.detection_graph = self._load_model()
self.category_index = self._load_label_map()
def _load_model(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect(self, image):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.namedWindow("detection", cv2.WINDOW_NORMAL)
cv2.imshow("detection", image)
cv2.waitKey(0)
if name == ‘main’:
image = cv2.imread(‘image.jpg’)# 放image.jpg在toy_detector下
detecotr = TOD()
detecotr.detect(image)
结果如下:成功识别! 虽然没训练完,可是过程模型 还是可以用的。