面向機器智慧的TensorFlow實戰4:機器學習基礎

CopperDong發表於2018-05-25

程式碼 https://github.com/backstopmedia/tensorflowbook.git

1、監督學習簡介


資料流圖的高層、通用訓練閉環:一種常用的方法是將原始資料集一分為二,將70%的樣本用於訓練,30%用於評估。

2、儲存訓練檢查點

   防止突然斷電

3、 線性迴歸

   目標是找到一個與這些資料最為吻合的線性函式

   y(x1, x2, ... , xk) = w1*x1 + w2*x2 + ... + wk * xk + b

   其矩陣(或張量)形式為

       Y = X*W^T + b, 其中X=(x1, x2, ... , xk) , W = (w1, w2, ... , wk)

   如何計算損失:總平方誤差

       loss = Sum(Y - Y_predicted)^2

def loss(X, Y):
    Y_prediected = inference(X)
    return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))

資料集http://people.sc.fsu.edu/~jburkardt/datasets/regression/x09.txt

import tensorflow as tf
W = tf.Variable(tf.zeros([2,1]), name="weights")
b = tf.Variable(0., name="bias")
def inference(X): #計算推斷模型的資料X上的輸出
    return tf.matmul(X, W) + b
def loss(X, Y):
    Y_predicted = inference(X)
    return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))
def inputs():
    weight_age = [ [84,46],[73,20],[65,52],[70,30],[76,57],
                   [69,25],[63,28],[72,36],[79,57],[75,44],
                   [27,24],[89,31],[65,52],[57,23],[59,60],
                   [69,48],[60,34],[79,51],[75,50],[82,34],
                   [59,46],[67,23],[85,37],[55,40],[63,30] ]
    blood_fat_content = [354, 190, 405, 263, 451, 302, 288, 
                         385, 402, 365, 209, 290, 346, 254,
                         395, 434, 220, 374, 308, 220, 311,
                         181, 274, 303, 244]
    return tf.to_float(weight_age), tf.to_float(blood_fat_content)
def train(total_loss):
    learning_rate = 0.0000001
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
def evaluate(sess, X, Y):
    print sess.run(inference([[80., 25.]]))  # ~ 303
    print sess.run(inference([[65., 25.]]))  # ~ 256
saver = tf.train.Saver()

with tf.Session() as sess:
    tf.initialize_all_variables().run()
    X, Y = inputs()
    total_loss = loss(X, Y)
    train_op = train(total_loss)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    training_steps = 1000
    for step in range(training_steps):
        sess.run([train_op])
        if step % 10 == 0:
            print "loss: ", sess.run([total_loss])
            #saver.save(sess, 'my-model', global_step=step)
            
    evaluate(sess, X, Y)
    
    coord.request_stop()
    coord.join(threads)
    saver.save(sess, 'my-model', global_step=training_steps)
    sess.close()

4、對數機率迴歸

     sigmoid函式,能夠回答Yes-No型別的問題(是否為垃圾郵件)


     損失函式:logistic函式會計算回答為"Yes"的概率,損失是模型為那個樣本所分配的概率值,並取平方。

     採用交叉熵(cross entropy)損失函式會更為有效:輸出與期望越接近,熵會越小。


    資料集https://www.kaggle.com/c/titanic/data

import tensorflow as tf
import os
W = tf.Variable(tf.zeros([5, 1]), name="weights")
b = tf.Variable(0., name="bias")

def combine_inputs(X):
    return tf.matmul(X, W) + b

def inference(X):
    return tf.sigmoid(combine_inputs(X))

def loss(X, Y):
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=combine_inputs(X), labels=Y))

def read_csv(batch_size, file_name, record_defaults):
    filename_queue = tf.train.string_input_producer([os.path.join(os.getcwd(), file_name)])
    
    reader = tf.TextLineReader(skip_header_lines=1)
    key, value = reader.read(filename_queue)
    
    decoded = tf.decode_csv(value, record_defaults=record_defaults)
    return tf.train.shuffle_batch(decoded, batch_size=batch_size, 
                                  capacity=batch_size*50, min_after_dequeue=batch_size)

def inputs():
    passenger_id, survived, pclass, name, sex, age, sibsp, parch, ticket, fare, cabin, embarked = \
       read_csv(100, "train.csv", [[0.0], [0.0], [0], [""], [""], [0.0], [0.0], [0.0], [""], [0.0], [""], [""]])
    is_first_class = tf.to_float(tf.equal(pclass, [1]))
    is_second_class = tf.to_float(tf.equal(pclass, [2]))
    is_third_class = tf.to_float(tf.equal(pclass, [3]))
    
    gender = tf.to_float(tf.equal(sex, ["female"]))
    
    features = tf.transpose(tf.stack([is_first_class, is_second_class, is_third_class, gender, age]))
    survived = tf.reshape(survived, [100, 1])
    return features, survived
def train(total_loss):
    learning_rate = 0.01
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

def evaluate(sess, X, Y):
    predicted = tf.cast(inference(X) > 0.5, tf.float32)
    print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))
with tf.Session() as sess:
    tf.initialize_all_variables().run()
    X, Y = inputs()
    
    total_loss = loss(X, Y)
    train_op = train(total_loss)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    training_steps = 1000
    for step in range(training_steps):
        sess.run([train_op])
        if step % 10 == 0:
            print "loss: ", sess.run([total_loss])
    evaluate(sess, X, Y)
    
    import time
    time.sleep(5)
    
    coord.request_stop()
    coord.join(threads)
    sess.close()

5、softmax分類

     希望能夠回答具有多個選項的問題,使用softmax


     資料集https://archive.ics.uci.edu/ml/datasets/Iris,包含4個特徵及3個可能的輸出類

import tensorflow as tf
import os
import os.path
import sys
W = tf.Variable(tf.zeros([4,3]), name="weights")
b = tf.Variable(tf.zeros([3]), name="bias")

def combine_inputs(X):
    return tf.matmul(X, W) + b

def inference(X):
    return tf.sigmoid(combine_inputs(X))

def loss(X, Y):
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=combine_inputs(X), labels=Y))

def read_csv(batch_size, file_name, record_defaults):
    filename_queue = tf.train.string_input_producer([os.path.dirname(os.path.abspath(sys.argv[0])) + "/" + file_name])
    
    reader = tf.TextLineReader(skip_header_lines=1)
    key, value = reader.read(filename_queue)
    
    decoded = tf.decode_csv(value, record_defaults=record_defaults)
    return tf.train.shuffle_batch(decoded, 
                                  batch_size=batch_size, 
                                  capacity=batch_size*50, 
                                  min_after_dequeue=batch_size)

def inputs():
    sepal_length, sepal_width, petal_length, petal_width, label = \
    read_csv(100, "iris.data", [[0.0], [0.0], [0.0], [0.0], [""]])

    label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.stack([
        tf.equal(label, ["Iris-setosa"]),
        tf.equal(label, ["Iris-versicolor"]),
        tf.equal(label, ["Iris-virginica"]),
    ]))))
    print(sepal_length)
    
    features = tf.transpose(tf.stack([sepal_length, sepal_width, petal_length, petal_width]))
    return features, label_number
def train(total_loss):
    learning_rate = 0.01
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

def evaluate(sess, X, Y):
    predicted = tf.cast(tf.arg_max(inference(X), 1), tf.float32)
    print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))
with tf.Session() as sess:
    tf.initialize_all_variables().run()
    X, Y = inputs()
    
    total_loss = loss(X, Y)
    train_op = train(total_loss)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    training_steps = 1000
    for step in range(training_steps):
        sess.run([train_op])
        if step % 10 == 0:
            print "loss: ", sess.run([total_loss])
    evaluate(sess, X, Y)
    
    import time
    time.sleep(5)
    
    coord.request_stop()
    coord.join(threads)
    sess.close()

6、多層神經網路

    線性迴歸模型和對數機率迴歸模型本質上多是單個神經元,輸入加權和,啟用函式(恆等式或sigmoid)

    對於softmax分類,為含C個神經元的網路

    異或運算的網路

7、梯度下降法與誤差反向傳播演算法

    尋找損失函式的極值點。用tf.gradients方法

    BP是一種高效計算資料流圖中梯度的技術

相關文章