tf1.0中透過帶weight的pb檔案與get_tensor_by_name函式可以獲取每一層的輸出
import os
import os.path as ops
import argparse
import time
import math
import tensorflow as tf
import glob
import numpy as np
import matplotlib.pyplot as plt
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gragh_path = "./model.pb"
image_path = "./lvds1901.JPG"
inputtensorname = "input_tensor:0"
tensorname = "loss/inference/encode/resize_images/ResizeBilinear"
filepath="./net_output.txt"
HEIGHT=256
WIDTH=256
VGG_MEAN = [103.939, 116.779, 123.68]
with tf.Graph().as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(gragh_path, "rb") as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name="")
image = cv2.imread(image_path)
image = cv2.resize(image, (WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
image_np = np.array(image)
image_np = image_np - VGG_MEAN
image_np_expanded = np.expand_dims(image_np, axis=0)
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
tensor_name = tensorname + ":0"
tensor_dict = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name(inputtensorname)
output = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})
ftxt = open(filepath,"w")
transform = output.transpose(0, 3, 1, 2)
transform = transform.flatten()
weight_count = 0
for i in transform:
if weight_count % 10 == 0 and weight_count != 0:
ftxt.write("\n")
ftxt.write(str(i) + ",")
weight_count += 1
ftxt.close()
tf1.0中透過帶weight的pb檔案與get_tensor_by_name函式可以獲取每一層的輸出
import os
import os.path as ops
import argparse
import time
import math
import tensorflow as tf
import glob
import numpy as np
import matplotlib.pyplot as plt
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gragh_path = "./model.pb"
image_path = "./lvds1901.JPG"
inputtensorname = "input_tensor:0"
tensorname = "loss/inference/encode/resize_images/ResizeBilinear"
filepath="./net_output.txt"
HEIGHT=256
WIDTH=256
VGG_MEAN = [103.939, 116.779, 123.68]
with tf.Graph().as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(gragh_path, "rb") as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name="")
image = cv2.imread(image_path)
image = cv2.resize(image, (WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
image_np = np.array(image)
image_np = image_np - VGG_MEAN
image_np_expanded = np.expand_dims(image_np, axis=0)
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
tensor_name = tensorname + ":0"
tensor_dict = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name(inputtensorname)
output = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})
ftxt = open(filepath,"w")
transform = output.transpose(0, 3, 1, 2)
transform = transform.flatten()
weight_count = 0
for i in transform:
if weight_count % 10 == 0 and weight_count != 0:
ftxt.write("\n")
ftxt.write(str(i) + ",")
weight_count += 1
ftxt.close()