机器学习第5章支持向量机
程序员文章站
2022-06-22 09:22:28
参考:作者的 "Jupyter Notebook" "Chapter 5 – Support Vector Machines" 支持向量机(简称SVM)是一个功能强大并且全面的机器学习模型,它能够执行线性或非线性分类、回归,甚至是异常值检测任务。它是机器学习领域最受欢迎的模型之一,任何对机器学习感兴 ......
参考:作者的jupyter notebook
chapter 5 – support vector machines
支持向量机(简称svm)是一个功能强大并且全面的机器学习模型,它能够执行线性或非线性分类、回归,甚至是异常值检测任务。它是机器学习领域最受欢迎的模型之一,任何对机器学习感兴趣的人都应该在工具箱中配备一个。svm特别适用于中小型复杂数据集的分类。
- 保存图片
from __future__ import division, print_function, unicode_literals import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import os np.random.seed(42) mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # where to save the figures project_root_dir = "images" chapter_id = "traininglinearmodels" def save_fig(fig_id, tight_layout=true): path = os.path.join(project_root_dir, chapter_id, fig_id + ".png") print("saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=600)
线性svm分类
- 加载鸢尾花数据集,缩放特征,然后训练一个线性svm模型(使用linearsvc类,c=0.1,用即将介绍的hinge损失函数)用来检测virginica鸢尾花。
from sklearn.svm import svc from sklearn import datasets iris = datasets.load_iris() x = iris["data"][:, (2, 3)] # petal length, petal width y = iris["target"] setosa_or_versicolor = (y == 0) | (y == 1) x = x[setosa_or_versicolor] y = y[setosa_or_versicolor] # svm classifier model svm_clf = svc(kernel="linear", c=float("inf")) print(svm_clf.fit(x, y)) # bad models x0 = np.linspace(0, 5.5, 200) pred_1 = 5*x0 - 20 pred_2 = x0 - 1.8 pred_3 = 0.1 * x0 + 0.5 def plot_svc_decision_boundary(svm_clf, xmin, xmax): w = svm_clf.coef_[0] b = svm_clf.intercept_[0] # at the decision boundary, w0*x0 + w1*x1 + b = 0 # => x1 = -w0/w1 * x0 - b/w1 x0 = np.linspace(xmin, xmax, 200) decision_boundary = -w[0]/w[1] * x0 - b/w[1] margin = 1/w[1] gutter_up = decision_boundary + margin gutter_down = decision_boundary - margin svs = svm_clf.support_vectors_ plt.scatter(svs[:, 0], svs[:, 1], s=180, facecolors='#ffaaaa') plt.plot(x0, decision_boundary, "k-", linewidth=2) plt.plot(x0, gutter_up, "k--", linewidth=2) plt.plot(x0, gutter_down, "k--", linewidth=2) plt.figure(figsize=(12,2.7)) plt.subplot(121) plt.plot(x0, pred_1, "g--", linewidth=2) plt.plot(x0, pred_2, "m-", linewidth=2) plt.plot(x0, pred_3, "r-", linewidth=2) plt.plot(x[:, 0][y==1], x[:, 1][y==1], "bs", label="iris-versicolor") plt.plot(x[:, 0][y==0], x[:, 1][y==0], "yo", label="iris-setosa") plt.xlabel("petal length", fontsize=14) plt.ylabel("petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 5.5, 0, 2]) plt.subplot(122) plot_svc_decision_boundary(svm_clf, 0, 5.5) plt.plot(x[:, 0][y==1], x[:, 1][y==1], "bs") plt.plot(x[:, 0][y==0], x[:, 1][y==0], "yo") plt.xlabel("petal length", fontsize=14) plt.axis([0, 5.5, 0, 2]) save_fig("large_margin_classification_plot较少间隔违例和大间隔对比") plt.show()
非线性svm分类
-
通过添加特征使数据集线性可分离
x1d = np.linspace(-4, 4, 9).reshape(-1, 1) x2d = np.c_[x1d, x1d**2] y = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0]) ''' plt.figure(figsize=(11, 4)) plt.subplot(121) plt.grid(true, which='both') plt.axhline(y=0, color='k') plt.plot(x1d[:, 0][y==0], np.zeros(4), "bs") plt.plot(x1d[:, 0][y==1], np.zeros(5), "g^") plt.gca().get_yaxis().set_ticks([]) plt.xlabel(r"$x_1$", fontsize=20) plt.axis([-4.5, 4.5, -0.2, 0.2]) plt.subplot(122) plt.grid(true, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot(x2d[:, 0][y==0], x2d[:, 1][y==0], "bs") plt.plot(x2d[:, 0][y==1], x2d[:, 1][y==1], "g^") plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"$x_2$", fontsize=20, rotation=0) plt.gca().get_yaxis().set_ticks([0, 4, 8, 12, 16]) plt.plot([-4.5, 4.5], [6.5, 6.5], "r--", linewidth=3) plt.axis([-4.5, 4.5, -1, 17]) plt.subplots_adjust(right=1) save_fig("higher_dimensions_plot", tight_layout=false) plt.show()
-
要使用scikit-learn实现这个想法,可以搭建一条流水线:一个polynomialfeatures转换器,接着一个standardscaler,然后是linearsvc。我们用卫星数据集来测试一下
from sklearn.preprocessing import standardscaler from sklearn.svm import linearsvc from sklearn.datasets import make_moons x, y = make_moons(n_samples=100, noise=0.15, random_state=42) def plot_dataset(x, y, axes): plt.plot(x[:, 0][y==0], x[:, 1][y==0], "bs") plt.plot(x[:, 0][y==1], x[:, 1][y==1], "g^") plt.axis(axes) plt.grid(true, which='both') plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"$x_2$", fontsize=20, rotation=0) #plot_dataset(x, y, [-1.5, 2.5, -1, 1.5]) #plt.show() from sklearn.datasets import make_moons from sklearn.pipeline import pipeline from sklearn.preprocessing import polynomialfeatures polynomial_svm_clf = pipeline([ ("poly_features", polynomialfeatures(degree=3)), ("scaler", standardscaler()), ("svm_clf", linearsvc(c=10, loss="hinge", random_state=42)) ]) polynomial_svm_clf.fit(x, y) def plot_predictions(clf, axes): x0s = np.linspace(axes[0], axes[1], 100) x1s = np.linspace(axes[2], axes[3], 100) x0, x1 = np.meshgrid(x0s, x1s) x = np.c_[x0.ravel(), x1.ravel()] y_pred = clf.predict(x).reshape(x0.shape) y_decision = clf.decision_function(x).reshape(x0.shape) plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2) plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1) plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(x, y, [-1.5, 2.5, -1, 1.5]) save_fig("moons_polynomial_svc_plot") plt.show()
-
多项式核
from sklearn.svm import svc poly_kernel_svm_clf = pipeline([ ("scaler", standardscaler()), ("svm_clf", svc(kernel="poly", degree=3, coef0=1, c=5)) ]) poly_kernel_svm_clf.fit(x, y) #print(poly_kernel_svm_clf.fit(x, y)) poly100_kernel_svm_clf = pipeline([ ("scaler", standardscaler()), ("svm_clf", svc(kernel="poly", degree=10, coef0=100, c=5)) ]) poly100_kernel_svm_clf.fit(x, y) #print(poly100_kernel_svm_clf.fit(x, y)) plt.figure(figsize=(11, 4)) plt.subplot(121) plot_predictions(poly_kernel_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(x, y, [-1.5, 2.5, -1, 1.5]) plt.title(r"$d=3, r=1, c=5$", fontsize=18) plt.subplot(122) plot_predictions(poly100_kernel_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(x, y, [-1.5, 2.5, -1, 1.5]) plt.title(r"$d=10, r=100, c=5$", fontsize=18) save_fig("moons_kernelized_polynomial_svc_plot") plt.show()
-
添加相似特征
def gaussian_rbf(x, landmark, gamma): return np.exp(-gamma * np.linalg.norm(x - landmark, axis=1)**2) gamma = 0.3 x1s = np.linspace(-4.5, 4.5, 200).reshape(-1, 1) x2s = gaussian_rbf(x1s, -2, gamma) x3s = gaussian_rbf(x1s, 1, gamma) xk = np.c_[gaussian_rbf(x1d, -2, gamma), gaussian_rbf(x1d, 1, gamma)] yk = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0]) plt.figure(figsize=(11, 4)) plt.subplot(121) plt.grid(true, which='both') plt.axhline(y=0, color='k') plt.scatter(x=[-2, 1], y=[0, 0], s=150, alpha=0.5, c="red") plt.plot(x1d[:, 0][yk==0], np.zeros(4), "bs") plt.plot(x1d[:, 0][yk==1], np.zeros(5), "g^") plt.plot(x1s, x2s, "g--") plt.plot(x1s, x3s, "b:") plt.gca().get_yaxis().set_ticks([0, 0.25, 0.5, 0.75, 1]) plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"similarity", fontsize=14) plt.annotate(r'$\mathbf{x}$', xy=(x1d[3, 0], 0), xytext=(-0.5, 0.20), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=18, ) plt.text(-2, 0.9, "$x_2$", ha="center", fontsize=20) plt.text(1, 0.9, "$x_3$", ha="center", fontsize=20) plt.axis([-4.5, 4.5, -0.1, 1.1]) plt.subplot(122) plt.grid(true, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot(xk[:, 0][yk==0], xk[:, 1][yk==0], "bs") plt.plot(xk[:, 0][yk==1], xk[:, 1][yk==1], "g^") plt.xlabel(r"$x_2$", fontsize=20) plt.ylabel(r"$x_3$ ", fontsize=20, rotation=0) plt.annotate(r'$\phi\left(\mathbf{x}\right)$', xy=(xk[3, 0], xk[3, 1]), xytext=(0.65, 0.50), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=18, ) plt.plot([-0.1, 1.1], [0.57, -0.1], "r--", linewidth=3) plt.axis([-0.1, 1.1, -0.1, 1.1]) plt.subplots_adjust(right=1) #save_fig("kernel_method_plot") #plt.show()
-
高斯rbf
x1_example = x1d[3, 0] for landmark in (-2, 1): k = gaussian_rbf(np.array([[x1_example]]), np.array([[landmark]]), gamma) print("phi({}, {}) = {}".format(x1_example, landmark, k))
-
使用svc类试试高斯rbf核
rbf_kernel_svm_clf = pipeline([ ("scaler", standardscaler()), ("svm_clf", svc(kernel="rbf", gamma=5, c=0.001)) ]) rbf_kernel_svm_clf.fit(x, y) print(rbf_kernel_svm_clf.fit(x, y)) from sklearn.svm import svc gamma1, gamma2 = 0.1, 5 c1, c2 = 0.001, 1000 hyperparams = (gamma1, c1), (gamma1, c2), (gamma2, c1), (gamma2, c2) svm_clfs = [] for gamma, c in hyperparams: rbf_kernel_svm_clf = pipeline([ ("scaler", standardscaler()), ("svm_clf", svc(kernel="rbf", gamma=gamma, c=c)) ]) rbf_kernel_svm_clf.fit(x, y) svm_clfs.append(rbf_kernel_svm_clf) plt.figure(figsize=(11, 7)) for i, svm_clf in enumerate(svm_clfs): plt.subplot(221 + i) plot_predictions(svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(x, y, [-1.5, 2.5, -1, 1.5]) gamma, c = hyperparams[i] plt.title(r"$\gamma = {}, c = {}$".format(gamma, c), fontsize=16) #使用rbf核的svm分类器 save_fig("moons_rbf_svc_plot") plt.show()
svm回归
-
svm回归
np.random.seed(42) m = 50 x = 2 * np.random.rand(m, 1) y = (4 + 3 * x + np.random.randn(m, 1)).ravel() from sklearn.svm import linearsvr svm_reg = linearsvr(epsilon=1.5, random_state=42) svm_reg.fit(x, y) svm_reg1 = linearsvr(epsilon=1.5, random_state=42) svm_reg2 = linearsvr(epsilon=0.5, random_state=42) svm_reg1.fit(x, y) svm_reg2.fit(x, y) def find_support_vectors(svm_reg, x, y): y_pred = svm_reg.predict(x) off_margin = (np.abs(y - y_pred) >= svm_reg.epsilon) return np.argwhere(off_margin) svm_reg1.support_ = find_support_vectors(svm_reg1, x, y) svm_reg2.support_ = find_support_vectors(svm_reg2, x, y) eps_x1 = 1 eps_y_pred = svm_reg1.predict([[eps_x1]]) def plot_svm_regression(svm_reg, x, y, axes): x1s = np.linspace(axes[0], axes[1], 100).reshape(100, 1) y_pred = svm_reg.predict(x1s) plt.plot(x1s, y_pred, "k-", linewidth=2, label=r"$\hat{y}$") plt.plot(x1s, y_pred + svm_reg.epsilon, "k--") plt.plot(x1s, y_pred - svm_reg.epsilon, "k--") plt.scatter(x[svm_reg.support_], y[svm_reg.support_], s=180, facecolors='#ffaaaa') plt.plot(x, y, "bo") plt.xlabel(r"$x_1$", fontsize=18) plt.legend(loc="upper left", fontsize=18) plt.axis(axes) plt.figure(figsize=(9, 4)) plt.subplot(121) plot_svm_regression(svm_reg1, x, y, [0, 2, 3, 11]) plt.title(r"$\epsilon = {}$".format(svm_reg1.epsilon), fontsize=18) plt.ylabel(r"$y$", fontsize=18, rotation=0) #plt.plot([eps_x1, eps_x1], [eps_y_pred, eps_y_pred - svm_reg1.epsilon], "k-", linewidth=2) plt.annotate( '', xy=(eps_x1, eps_y_pred), xycoords='data', xytext=(eps_x1, eps_y_pred - svm_reg1.epsilon), textcoords='data', arrowprops={'arrowstyle': '<->', 'linewidth': 1.5} ) plt.text(0.91, 5.6, r"$\epsilon$", fontsize=20) plt.subplot(122) plot_svm_regression(svm_reg2, x, y, [0, 2, 3, 11]) plt.title(r"$\epsilon = {}$".format(svm_reg2.epsilon), fontsize=18) save_fig("svm_regression_plot使用二阶多项式核的svm回归") plt.show()
-
使用二阶多项式核的svm回归
np.random.seed(42) m = 100 x = 2 * np.random.rand(m, 1) - 1 y = (0.2 + 0.1 * x + 0.5 * x**2 + np.random.randn(m, 1)/10).ravel() #svr类是svc类的回归等价物,linearsvr类也是linearsvc类的回归等价物。linearsvr与训练集的大小线性相关 #(跟linearsvc一样),而svr则在训练集变大时,变得很慢(svc也是一样)。 from sklearn.svm import svr svm_poly_reg1 = svr(kernel="poly", degree=2, c=100, epsilon=0.1, gamma="auto") svm_poly_reg2 = svr(kernel="poly", degree=2, c=0.01, epsilon=0.1, gamma="auto") svm_poly_reg1.fit(x, y) svm_poly_reg2.fit(x, y) plt.figure(figsize=(9, 4)) plt.subplot(121) plot_svm_regression(svm_poly_reg1, x, y, [-1, 1, 0, 1]) plt.title(r"$degree={}, c={}, \epsilon = {}$".format(svm_poly_reg1.degree, svm_poly_reg1.c, svm_poly_reg1.epsilon), fontsize=18) plt.ylabel(r"$y$", fontsize=18, rotation=0) plt.subplot(122) plot_svm_regression(svm_poly_reg2, x, y, [-1, 1, 0, 1]) plt.title(r"$degree={}, c={}, \epsilon = {}$".format(svm_poly_reg2.degree, svm_poly_reg2.c, svm_poly_reg2.epsilon), fontsize=18) save_fig("svm_with_polynomial_kernel_plot使用二阶多项式核的svm回归") plt.show()
-
鸢尾花数据集的决策函数
scaler = standardscaler() svm_clf1 = linearsvc(c=1, loss="hinge", random_state=42) svm_clf2 = linearsvc(c=100, loss="hinge", random_state=42) scaled_svm_clf1 = pipeline([ ("scaler", scaler), ("linear_svc", svm_clf1), ]) scaled_svm_clf2 = pipeline([ ("scaler", scaler), ("linear_svc", svm_clf2), ]) scaled_svm_clf1.fit(x, y) scaled_svm_clf2.fit(x, y) # convert to unscaled parameters b1 = svm_clf1.decision_function([-scaler.mean_ / scaler.scale_]) b2 = svm_clf2.decision_function([-scaler.mean_ / scaler.scale_]) w1 = svm_clf1.coef_[0] / scaler.scale_ w2 = svm_clf2.coef_[0] / scaler.scale_ svm_clf1.intercept_ = np.array([b1]) svm_clf2.intercept_ = np.array([b2]) svm_clf1.coef_ = np.array([w1]) svm_clf2.coef_ = np.array([w2]) # find support vectors (linearsvc does not do this automatically) t = y * 2 - 1 support_vectors_idx1 = (t * (x.dot(w1) + b1) < 1).ravel() support_vectors_idx2 = (t * (x.dot(w2) + b2) < 1).ravel() svm_clf1.support_vectors_ = x[support_vectors_idx1] svm_clf2.support_vectors_ = x[support_vectors_idx2] from sklearn import datasets iris = datasets.load_iris() x = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64) # iris-virginica from mpl_toolkits.mplot3d import axes3d def plot_3d_decision_function(ax, w, b, x1_lim=[4, 6], x2_lim=[0.8, 2.8]): x1_in_bounds = (x[:, 0] > x1_lim[0]) & (x[:, 0] < x1_lim[1]) x_crop = x[x1_in_bounds] y_crop = y[x1_in_bounds] x1s = np.linspace(x1_lim[0], x1_lim[1], 20) x2s = np.linspace(x2_lim[0], x2_lim[1], 20) x1, x2 = np.meshgrid(x1s, x2s) xs = np.c_[x1.ravel(), x2.ravel()] df = (xs.dot(w) + b).reshape(x1.shape) m = 1 / np.linalg.norm(w) boundary_x2s = -x1s*(w[0]/w[1])-b/w[1] margin_x2s_1 = -x1s*(w[0]/w[1])-(b-1)/w[1] margin_x2s_2 = -x1s*(w[0]/w[1])-(b+1)/w[1] ax.plot_surface(x1s, x2, np.zeros_like(x1), color="b", alpha=0.2, cstride=100, rstride=100) ax.plot(x1s, boundary_x2s, 0, "k-", linewidth=2, label=r"$h=0$") ax.plot(x1s, margin_x2s_1, 0, "k--", linewidth=2, label=r"$h=\pm 1$") ax.plot(x1s, margin_x2s_2, 0, "k--", linewidth=2) ax.plot(x_crop[:, 0][y_crop==1], x_crop[:, 1][y_crop==1], 0, "g^") ax.plot_wireframe(x1, x2, df, alpha=0.3, color="k") ax.plot(x_crop[:, 0][y_crop==0], x_crop[:, 1][y_crop==0], 0, "bs") ax.axis(x1_lim + x2_lim) ax.text(4.5, 2.5, 3.8, "decision function $h$", fontsize=15) ax.set_xlabel(r"petal length", fontsize=15) ax.set_ylabel(r"petal width", fontsize=15) ax.set_zlabel(r"$h = \mathbf{w}^t \mathbf{x} + b$", fontsize=18) ax.legend(loc="upper left", fontsize=16) fig = plt.figure(figsize=(11, 6)) ax1 = fig.add_subplot(111, projection='3d') plot_3d_decision_function(ax1, w=svm_clf2.coef_[0], b=svm_clf2.intercept_[0]) #save_fig("iris_3d_plot鸢尾花数据集的决策函数") #plt.show()
-
权重向量越小,间隔越大
def plot_2d_decision_function(w, b, ylabel=true, x1_lim=[-3, 3]): x1 = np.linspace(x1_lim[0], x1_lim[1], 200) y = w * x1 + b m = 1 / w plt.plot(x1, y) plt.plot(x1_lim, [1, 1], "k:") plt.plot(x1_lim, [-1, -1], "k:") plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot([m, m], [0, 1], "k--") plt.plot([-m, -m], [0, -1], "k--") plt.plot([-m, m], [0, 0], "k-o", linewidth=3) plt.axis(x1_lim + [-2, 2]) plt.xlabel(r"$x_1$", fontsize=16) if ylabel: plt.ylabel(r"$w_1 x_1$ ", rotation=0, fontsize=16) plt.title(r"$w_1 = {}$".format(w), fontsize=16) plt.figure(figsize=(12, 3.2)) plt.subplot(121) plot_2d_decision_function(1, 0) plt.subplot(122) plot_2d_decision_function(0.5, 0, ylabel=false) save_fig("small_w_large_margin_plot") plt.show() ''' from sklearn.svm import svc from sklearn import datasets iris = datasets.load_iris() x = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64) # iris-virginica svm_clf = svc(kernel="linear", c=1) svm_clf.fit(x, y) svm_clf.predict([[5.3, 1.3]]) #print(svm_clf.predict([[5.3, 1.3]])) #hinge loss t = np.linspace(-2, 4, 200) h = np.where(1 - t < 0, 0, 1 - t) # max(0, 1-t) plt.figure(figsize=(5,2.8)) plt.plot(t, h, "b-", linewidth=2, label="$max(0, 1 - t)$") plt.grid(true, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.yticks(np.arange(-1, 2.5, 1)) plt.xlabel("$t$", fontsize=16) plt.axis([-2, 4, -1, 2.5]) plt.legend(loc="upper right", fontsize=16) save_fig("hinge_plot") plt.show()
上一篇: 身体磨砂膏多久用一次比较好(抖音热卖款身体磨砂膏及使用指南)
下一篇: 家里没点笑声,那得多无聊呀!