基于Tensorflow的支持向量机

4.1线性支持向量机的使用

什么是支持向量机算法?
支持向量机算法是一种二分类算法。其基本观点是找到两类直接按的一个线性可分的直线(或者超平面)。首先完成假设二分类目标是-1或者1.代替前面章节中的0或者1目标值。有许多条直线可以分割两类目标,但是我们定义分割两类目标有最大距离的直线作为最佳直线分类器
为了在Tensorflow上实现一个softmargin支持向量机,系哟啊实现特殊的损失函数,公式如下
在这里插入图片描述
其中A是斜率向量,b时截距,xi是输入下向量,yi是实际分类(-1或者1).a是软分类器的正则函数

  1. 还是使用iris数据集,导入相关的编程库

    import matplotlib.pyplot as plt
    import numpy as np
    import tensorflow as tf
    from sklearn import datasets
    
  2. 创建会话,加载数据集

    sess = tf.Session()
    iris = datasets.load_iris()
    x_vals = np.array([[x[0], x[3]] for x in iris.data])
    y_vals = np.array([1 if y==0 else -1 for y in iris.target ])
    
  3. 分割数据集未训练集和测试集

    train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
    test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
    x_vals_train = x_vals[train_indices]
    x_vals_test = x_vals[test_indices]
    y_vals_train = y_vals[train_indices]
    y_vals_test = y_vals[test_indices]
    
    
  4. 设置批处理大小,声明占位符和变量,以及模型变量

    bathc_size = 25
    
    x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
    A = tf.Variable(tf.random_normal(shape=[2, 1]))
    b = tf.Variable(tf.random_normal(shape=[1, 1]))
    
  5. 声明模型输出

    model_output = tf.subtract(tf.matmul(x_data, A), b)
    
  6. 声明损失函数

    l2_norm = tf.reduce_sum(tf.square(A))
    alpha = tf.constant([0.1])
    classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))
    loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))
    
  7. 声明预测函数和准确度函数

    prediction = tf.sign(model_output)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
    
  8. 声明优化器函数,初始化模型

    my_optimize = tf.train.GradientDescentOptimizer(0.01)
    train_step = my_optimize.minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    
  9. 遍历迭代模型

    loss_vec = []
    train_accuracy = []
    test_accuracy = []
    for i in range(2000):
        rand_index = np.random.choice(len(x_vals_train), size=bathc_size)
        rand_x = x_vals_train[rand_index]
        rand_y = np.transpose([y_vals_train[rand_index]])
        sess.run(train_step, feed_dict={x_data:rand_x, y_target:rand_y})
        temp_loss = sess.run(loss, feed_dict={x_data:rand_x, y_target:rand_y})
        loss_vec.append(temp_loss)
        train_acc_temp = sess.run(accuracy, feed_dict={x_data:x_vals_train, y_target:np.transpose([y_vals_train])})
        train_accuracy.append(train_acc_temp)
        test_acc_temp = sess.run(accuracy, feed_dict={x_data:x_vals_test, y_target:np.transpose([y_vals_test])})
        test_accuracy.append(test_acc_temp)
        if(i+1)%50==0:
            print("Step #" + str(i+1) + " A = "+ str(sess.run(A)) + " b = " + str(sess.run(b)))
            print("Loss = "+ str(temp_loss))
    
  10. 抽离系数绘制图像

    [[a1], [a2]] = sess.run(A)
    [[b]] = sess.run(b)
    slope = -a2/a1
    y_intercept = b/a1
    
    x1_vals = [d[1] for d in x_vals]
    best_fit = []
    for i in x1_vals:
        best_fit.append(slope*i+y_intercept)
    
    setosa_x = [d[1] for i,d in enumerate(x_vals) if y_vals[i] == 1]
    setosa_y = [d[0] for i,d in enumerate(x_vals) if y_vals[i] == 1]
    not_setosa_x = [d[1] for i,d in enumerate(x_vals) if y_vals[i] == -1]
    not_setosa_y = [d[0] for i,d in enumerate(x_vals) if y_vals[i] == -1]
    # 线性支持向量机拟合
    plt.plot(setosa_x, setosa_y, 'o', label='I. setosa')
    plt.plot(not_setosa_x, not_setosa_y, 'x', label='Non - setosa')
    plt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3)
    plt.ylim([0, 10])
    plt.legend(loc='lower right')
    plt.title('Sepal Length vs Petal Width')
    plt.xlabel('Petal Width')
    plt.ylabel('Sepal Length')
    plt.show()
    # 测试集和训练集得到的拟合曲线
    plt.plot(train_accuracy, 'k-', label='Training Accuracy')
    plt.plot(test_accuracy, 'r--', label='Test Accuracy')
    plt.title("Train an Test Set Accuracies")
    plt.xlabel('Generation')
    plt.ylabel('Accuracy')
    plt.legend(loc='lower right')
    plt.show()
    # 损失图线
    plt.plot(loss_vec, 'k-')
    plt.title('Loss per Generation')
    plt.xlabel('Generation')
    plt.ylabel("Loss")
    plt.show()
    
     ![在这里插入图片描述](https://img-blog.csdnimg.cn/34fd361192e842b29a07e9a15d9117d9.png)
    

    在这里插入图片描述
    在这里插入图片描述

4.2弱化为线性回归

支持向量机可以用来拟合线性回归
相同的最大间隔概念应用到线性回归上。代替最大化分割两类的目标是,最大化分割包含大部分的数据点(x,y)。使用鸢尾花的花萼长度与花瓣宽度之间的线性拟合,
相关的损失函数类似于在这里插入图片描述这里的ε是间隔宽度的一半。这意味着如果一个数据点在该区域,则损失等于0。

  1. 导入相关的外部库,加载数据,划分数据集

    sess = tf.Session()
    iris = datasets.load_iris()
    x_vals = np.array([x[3] for x in iris.data])
    y_vals = np.array([y[0] for y in iris.data])
    train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
    test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
    x_vals_train = x_vals[train_indices]
    x_vals_test = x_vals[test_indices]
    y_vals_train = y_vals[train_indices]
    y_vals_test = y_vals[test_indices]
    
  2. 声明变量和占位符,批处理大小,创建线性模型

    batch_size = 25
    x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
    y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
    
    A = tf.Variable(tf.random_normal(shape=[1, 1]))
    b = tf.Variable(tf.random_normal(shape=[1, 1]))
    
    model_out = tf.add(tf.matmul(x_data, A), b)
    
  3. 声明损失函数

    eplison = tf.constant([0.5])
    loss = tf.reduce_mean(tf.maximum(0., tf.subtract(tf.abs(tf.subtract(model_out, y_target)), eplison)))
    
  4. 创建优化器,初始化变量

    my_optiizer = tf.train.GradientDescentOptimizer(0.075)
    train_step = my_optiizer.minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    
  5. 迭代计算

    train_loss = []
    test_loss = []
    for i in range(200):
        rand_index = np.random.choice(len(x_vals_train), size=batch_size)
        rand_x = np.transpose([x_vals_train[rand_index]])
        rand_y = np.transpose([y_vals_train[rand_index]])
        sess.run(train_step, feed_dict={x_data:rand_x, y_target:rand_y})
        temp_train_loss = sess.run(loss, feed_dict={x_data:np.transpose([x_vals_train]), y_target:np.transpose([y_vals_train])})
        train_loss.append(temp_train_loss)
        temp_test_loss = sess.run(loss, feed_dict={x_data:np.transpose([x_vals_test]), y_target:np.transpose([y_vals_test])})
        test_loss.append(temp_test_loss)
        if (i+1)%25==0:
            print("Setp #" + str(i+1) + " A = "+ str(sess.run(A)) + " b = " + str(sess.run(b)))
            print("Tarin Loss = " + str(temp_train_loss))
            print("Test Loss = " +str(temp_test_loss))
    

    在这里插入图片描述

  6. 抽离系数,绘制图像

    [[slope]] = sess.run(A)
    [[y_intercept]] = sess.run(b)
    [width] = sess.run(eplison)
    best_fit = []
    best_fit_upper = []
    best_fit_lower = []
    for i in x_vals:
        best_fit.append(slope*i+y_intercept)
        best_fit_upper.append(slope*i+y_intercept + width)
        best_fit_lower.append(slope*i+y_intercept - width)
    
    plt.plot(x_vals, y_vals, 'o', label='Data Posints')
    plt.plot(x_vals, best_fit, 'r-', label='SVM Regression Line', linewidth=3)
    plt.plot(x_vals, best_fit_upper, 'r--', linewidth=2)
    plt.plot(x_vals, best_fit_lower, 'r--', linewidth=2)
    plt.ylim([0, 10])
    plt.legend(loc='lower right')
    plt.title('Sepal Length vs Petal Width')
    plt.xlabel('Petal Width')
    plt.ylabel('Sepal Length')
    plt.show()
    
    plt.plot(train_loss, 'k-', label='Training Set Loss')
    plt.plot(test_loss, 'r--', label='Test Set Loss')
    plt.title("L2 Loss per Generation")
    plt.xlabel('Generation')
    plt.ylabel('L2 Loss')
    plt.legend(loc='upper right')
    plt.show()
    
    

    在这里插入图片描述

    在这里插入图片描述

4.3Tensorflow上核函数的使用

对于上节中的SVM算法可以线性分割数据集,器页可以分割非线性数据集,方法是改变SVM损失函数中的核函数
本节中将生成两个同心圆数据集,分别使用线性核非线性来进行分割数据集

  1. 导入模块,创建会话图

    from tensorflow.python.framework import ops
    ops.reset_default_graph()
    sess = tf.Session()
    
  2. 生成模拟数据

    (x_vals, y_vals) = datasets.make_circles(n_samples=500, factor=.5, noise=.1)
    y_vals = np.array([1 if y==1 else -1 for y in y_vals])
    class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == 1]
    class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == 1]
    class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == -1]
    class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == -1]
    
    
  3. 声明批处理大小,占位符,创建按模型变量

    batch_size = 500
    x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
    prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    b = tf.Variable(tf.random_normal(shape=[1, batch_size]))
    
  4. 创建高斯核函数

    gamma = tf.constant(-50.0)
    dist = tf.reduce_sum(tf.square(x_data), 1)
    dist = tf.reshape(dist, [-1, 1])
    sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))),tf.transpose(dist))
    my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
    
  5. 创建损失函数

    model_output = tf.matmul(b, my_kernel)
    first_term = tf.reduce_sum(b)
    b_vec_cross = tf.matmul(tf.transpose(b), b)
    y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
    second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
    loss = tf.negative(tf.subtract(first_term, second_term))
    
  6. 创建预测函数和精确度函数

    rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
    pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
    prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b),pred_kernel)
    prediction = tf.sign(prediction_output - tf.reduce_mean(prediction_output))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32))
    
  7. 创建优化器,初始化变量

    my_optimize = tf.train.GradientDescentOptimizer(0.001)
    train_step = my_optimize.minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    
  8. 迭代计算

    loss_vec = []
    batch_accuracy = []
    for i in range(500):
        rand_index = np.random.choice(len(x_vals), size=batch_size)
        rand_x = x_vals[rand_index]
        rand_y = np.transpose([y_vals[rand_index]])
        sess.run(train_step, feed_dict={x_data: rand_x, y_target:rand_y})
        temp_loss = sess.run(loss, feed_dict={x_data:rand_x, y_target:rand_y})
        loss_vec.append(temp_loss)
        acc_temp = sess.run(accuracy, feed_dict={x_data:rand_x, y_target:rand_y, prediction_grid:rand_x})
        batch_accuracy.append(acc_temp)
        if(i+1)%100==0:
            print("Step #" + str(i+1))
            print("Loss = " + str(temp_loss))
    
  9. 绘制图像

    x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
    y_min, y_max = x_vals[:, 1].min() -1, x_vals[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
    grid_points = np.c_[xx.ravel(), yy.ravel()]
    [grid_predictions] = sess.run(prediction, feed_dict={x_data:x_vals, y_target:np.transpose([y_vals]), prediction_grid:grid_points})
    grid_predictions = grid_predictions.reshape(xx.shape)
    
    plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8)
    plt.plot(class1_x, class1_y, 'ro', label='Class 1')
    plt.plot(class2_x, class2_y, 'kx', label='Class -1')
    plt.legend(loc='lower right')
    plt.ylim([-1.5, 1.5])
    plt.xlim([-1.5, 1.5])
    plt.show()
    
    plt.plot(batch_accuracy, 'k-', label='Accuracy')
    plt.title("Batch Accuracy")
    plt.xlabel("Generation")
    plt.ylabel("Accuracy")
    plt.legend(loc='lower right')
    plt.show()
    
    plt.plot(loss_vec, 'k-')
    plt.title("Loss per Generation")
    plt.xlabel('Generation')
    plt.ylabel("Loss")
    plt.show()
    

    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述

4.4用Tensorflow实现非线性支持向量机

使用非线性核函数来分割数据集,将使用上节实现的高斯核函数SVM开分割真实数据集iris,创建一个山鸢尾花(I.setosa)的分类器

  1. 导入模块,创建会话

    import tensorflow as tf
    from sklearn import datasets
    import numpy as np
    import matplotlib.pyplot as plt
    sess = tf.Session()
    
  2. 加载数据集,分割数据集

    iris = datasets.load_iris()
    x_vals = np.array([[x[0], x[3]] for x in iris.data])
    y_vals = np.array([1 if y==0 else -1 for y in iris.target])
    class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == 1]
    class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == 1]
    class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == -1]
    class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == -1]
    
  3. 声明批量大小,占位符,模型变量

    batch_size = 150
    
    x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
    prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    
    b = tf.Variable(tf.random_normal(shape=[1, batch_size]))
    
  4. 声明高斯核函数

    gamma = tf.constant(-10.0)
    dist = tf.reduce_sum(tf.square(x_data), 1)
    dist = tf.reshape(dist, [-1, 1])
    sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
    my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
    model_output = tf.matmul(b, my_kernel)
    first_term = tf.reduce_sum(b)
    b_vec_cross = tf.matmul(tf.transpose(b), b)
    y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
    second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
    loss = tf.negative(tf.subtract(first_term, second_term))
    
  5. 声明预测核函数,准确度函数

    rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
    pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
    prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b), pred_kernel)
    prediction = tf.sign(prediction_output - tf.reduce_mean(prediction_output))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32))
    
  6. 声明优化器,初始化变量

    my_optimize = tf.train.GradientDescentOptimizer(0.01)
    train_step = my_optimize.minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    
  7. 迭代训练

    loss_vec = []
    batch_accuracy = []
    for i in range(1500):
        rand_index = np.random.choice(len(x_vals), size=batch_size)
        rand_x = x_vals[rand_index]
        rand_y = np.transpose([y_vals[rand_index]])
        sess.run(train_step, feed_dict={x_data:rand_x, y_target:rand_y})
        temp_loss = sess.run(loss, feed_dict={x_data:rand_x, y_target:rand_y})
        loss_vec.append(temp_loss)
        acc_temp = sess.run(accuracy, feed_dict={x_data:rand_x,y_target:rand_y, prediction_grid:rand_x})
        batch_accuracy.append(acc_temp)
        if(i+1)%50==0:
            print("Step #" + str(i+1))
            print("Loss = "+ str(temp_loss))
    
     ![在这里插入图片描述](https://img-blog.csdnimg.cn/5a2336dbfd5a48b682b157b715343559.png)
    
  8. 创建网格

    x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
    y_min, y_max = x_vals[:, 1].min() -1, x_vals[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
    grid_points = np.c_[xx.ravel(), yy.ravel()]
    [grid_predictions] = sess.run(prediction, feed_dict={x_data:x_vals, y_target:np.transpose([y_vals]), prediction_grid:grid_points})
    grid_predictions = grid_predictions.reshape(xx.shape)
    
  9. 绘制图像

    plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8)
    plt.plot(class1_x, class1_y, 'ro', label="I.setosa")
    plt.plot(class2_x, class2_y, 'kx', label='Non-setosa')
    plt.xlabel("Petal Length")
    plt.ylabel("Sepal Width")
    plt.legend(loc='lower right')
    plt.ylim([-0.5, 3.0])
    plt.xlim([4, 8])
    plt.show()
    

    在这里插入图片描述

4.5用Tensorflow实现多分类支持向量机

SVM算法最初是伪类解决二分类问题来进行设计的,但是也可以通过一些策略进行多分类;主要的策略就是一对多和一对一
一对一:在任意两类样本之间设计创建一个二值分类器,然后的票最多的类别就为该位置样本的预测类别,当时当列别很多的时候,就必须创建n!/(n-2)!2!个分类器,计算代价还是相当大的
一对多:为每类创建一个分类器。最后的预测类别是具有最大SVM间隔的类别。
本节将使用一对多的策略。
对于iris数据集,有三个类别山鸢尾,变色原为和维吉尼亚原为,需要创建三个高斯核函数SVM来预测

  1. 导入库,声明计算图

    import tensorflow as tf
    from sklearn import datasets
    import numpy as np
    import matplotlib.pyplot as plt
    
    sess = tf.Session()
    
  2. 加载数据,分离每类中的目标值

    iris = datasets.load_iris()
    x_vals = np.array([[x[0], x[3]] for x in iris.data])
    y_vals1 = np.array([1 if y == 0 else -1 for y in iris.target])
    y_vals2 = np.array([1 if y == 1 else -1 for y in iris.target])
    y_vals3 = np.array([1 if y == 2 else -1 for y in iris.target])
    y_vals = np.array([y_vals1, y_vals2, y_vals3])
    class1_x = [x[0] for i, x in enumerate(x_vals) if iris.target[i] == 0]
    class1_y = [x[1] for i, x in enumerate(x_vals) if iris.target[i] == 0]
    class2_x = [x[0] for i, x in enumerate(x_vals) if iris.target[i] == 1]
    class2_y = [x[1] for i, x in enumerate(x_vals) if iris.target[i] == 1]
    class3_x = [x[0] for i, x in enumerate(x_vals) if iris.target[i] == 2]
    class3_y = [x[1] for i, x in enumerate(x_vals) if iris.target[i] == 2]
    
  3. 声明占位符,变量,批处理大小

    batch_size = 50
    
    x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    y_target = tf.placeholder(shape=[3, None], dtype=tf.float32)
    prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    
    b= tf.Variable(tf.random_normal(shape=[3, batch_size]))
    
  4. 声明高斯核函数

    gamma = tf.constant(-15.0)
    dist = tf.reduce_mean(tf.square(x_data), 1)
    dist = tf.reshape(dist, [-1, 1])
    sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
    my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
    
  5. 扩展矩阵维度,进行矩阵转置

    def reshape_matmul(mat):
        v1 = tf.expand_dims(mat, 1)
        v2 = tf.reshape(v1, [3, batch_size, 1])
        return tf.matmul(v2, v1)
    
  6. 计算对偶损失函数

    mdoel_output = tf.matmul(b, my_kernel)
    first_term = tf.reduce_sum(b)
    b_vec_cross = tf.matmul(tf.transpose(b), b)
    y_target_cross = reshape_matmul(y_target)
    
    second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)), [1, 2])
    loss = tf.reduce_sum(tf.negative(tf.subtract(first_term, second_term)))
    
  7. 创建预测核函数

    rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
    pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
    
  8. 根据预测核函数来创建预测函数

    prediction_output = tf.matmul(tf.multiply(y_target, b), pred_kernel)
    prediction = tf.argmax(prediction_output - tf.expand_dims(tf.reduce_mean(prediction_output, 1), 1), 0)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(y_target, 0)), tf.float32))
    
  9. 声明优化器,初始化变量

    my_optimizer = tf.train.GradientDescentOptimizer(0.01)
    train_setp = my_optimizer.minimize(loss)
    init = tf.global_variables_initializer()
    sess.run(init)
    
  10. 迭代计算

    loss_vec = []
    accuracy_vec = []
    for i in range(100):
        rand_index = np.random.choice(len(x_vals), size=batch_size)
        rand_x = x_vals[rand_index]
        rand_y = y_vals[:, rand_index]
        _, loss_temp, acc_temp = sess.run([train_setp, loss, accuracy], feed_dict={x_data:rand_x, y_target:rand_y, prediction_grid:rand_x})
        loss_vec.append(loss_temp)
        accuracy_vec.append(acc_temp)
        if (i+1) %20 ==0:
            print("Step #" + str(i+1))
            print("Loss = " + str(loss_temp))
    
  11. 创建数据点的预测网络,运行预测函数

    x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
    y_min, y_max = x_vals[:, 1].min() -1, x_vals[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
    grid_points = np.c_[xx.ravel(), yy.ravel()]
    grid_predictions = sess.run(prediction, feed_dict={x_data:rand_x, y_target:rand_y, prediction_grid:grid_points})
    grid_predictions = grid_predictions.reshape(xx.shape)
    
    
  12. 绘制训练结果,批量准确度和损失函数

    plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8)
    plt.plot(class1_x, class1_y, 'ro', label="I. setosa")
    plt.plot(class2_x, class2_y, 'rx', label='I. versicolor')
    plt.plot(class3_x, class3_y, 'gv', label='I. virginica')
    plt.xlabel('Petal Length')
    plt.ylabel('Sepal Width')
    plt.show()
    

    在这里插入图片描述!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值