# [TensorFlow] 기초 개념 및 수행 결과 _ 4. 저장한 weight를 restore 해서 사용하기

## tensorflow 저장했던 weight를 restore(읽어오기) 해서 사용하기

출처

### 저장된 친구들을 불러오자

In [2]:
import tensorflow as tf
In [3]:
input_data = [[1,5,3,7,8,10,12],
              [5,8,10,3,9,7,1]]
label_data = [[0,0,0,1,0],
              [1,0,0,0,0]]
​
INPUT_SIZE = 7
HIDDEN_SIZE_1 = 10
HIDDEN_SIZE_2 = 8
CLASSES = 5
In [4]:
x = tf.placeholder(tf.float32, shape = [None, INPUT_SIZE], name="x" )
y = tf.placeholder(tf.float32, shape = [None, CLASSES], name="y" )
In [5]:
tensor_map = {x : input_data, y : label_data}

### name 을 이용하여 이름을 명시 해 주는 것이 좋다 !!

In [6]:
weight_hidden_1 = tf.Variable( tf.truncated_normal(shape=[INPUT_SIZE, HIDDEN_SIZE_1]) , dtype=tf.float32, name="weight_hidden_1" )
bias_hidden_1 = tf.Variable( tf.zeros(shape=[HIDDEN_SIZE_1]) , dtype=tf.float32, name = "bias_hidden_1" )
​
weight_hidden_2 = tf.Variable( tf.truncated_normal(shape=[HIDDEN_SIZE_1, HIDDEN_SIZE_2]) , dtype=tf.float32, name="weight_hidden_2" )
bias_hidden_2 = tf.Variable( tf.zeros(shape=[HIDDEN_SIZE_2]) , dtype=tf.float32, name = "bias_hidden_2" )
​
weight_output = tf.Variable( tf.truncated_normal(shape=[HIDDEN_SIZE_2, CLASSES]) , dtype=tf.float32, name="weight_output" )
bias_output = tf.Variable( tf.zeros(shape=[CLASSES]) , dtype=tf.float32, name = "bias_output" )
In [7]:
param_list = [weight_hidden_1, bias_hidden_1, weight_hidden_2, bias_hidden_2, weight_output, bias_output ]
saver = tf.train.Saver(param_list)
In [8]:
hidden_1 = tf.matmul( x , weight_hidden_1 , name="hidden_1" ) + bias_hidden_1
hidden_2 = tf.matmul( hidden_1 , weight_hidden_2, name="hidden_2" ) + bias_hidden_2
answer = tf.matmul( hidden_2 , weight_output, name="answer" ) + bias_output
In [9]:
sess = tf.Session()
# init = tf.global_variables_initializer()
# sess.run(init)

### restore 함수를 사용해보자 !!

global_variables_initializer 가 필요 없음 !! (불러오니까)

In [10]:
saver.restore(sess, './tensorflow_live.ckpt')
result = sess.run(y , tensor_map)
print (result)
[[ 0.  0.  0.  1.  0.]
[ 1.  0.  0.  0.  0.]]