金华公司网站建设,有做挂名法人和股东的网站吗,chrome浏览器,温州网站的优化文章目录 前言一、数据集介绍二、预测房价代码1.引入库2.数据3.梯度下降 总结 前言
梯度下降算法学习。
一、数据集介绍
波士顿房价数据集#xff1a;波士顿房价数据集#xff0c;用于线性回归预测
二、预测房价代码
1.引入库
from sklearn.linear_model import Linear… 文章目录 前言一、数据集介绍二、预测房价代码1.引入库2.数据3.梯度下降 总结 前言
梯度下降算法学习。
一、数据集介绍
波士顿房价数据集波士顿房价数据集用于线性回归预测
二、预测房价代码
1.引入库
from sklearn.linear_model import LinearRegression as LR
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston as boston
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import numpy as np
from sklearn.metrics import mean_squared_error2.数据
def preprocess():# get the dataset of bostonX boston().datay boston().targetname_data boston().feature_names# draw the figure of relationship between feature and priceplt.figure(figsize(20,20))for i in range(len(X[0])):plt.subplot(5, 3, i 1)plt.scatter(X[:, i], y, s20)plt.title(name_data[i])plt.show()# 删除相关性较低的特征# X np.delete(X, [0, 1, 3, 4, 6, 7, 8, 9, 11], axis1)# normalizationfor i in range(len(X[0])):X[:, i] (X[:, i] - X[:, i].min()) / (X[:, i].max() - X[:, i].min())# split into test and trainXtrain, Xtest, Ytrain, Ytest train_test_split(X, y, test_size0.3, random_state10)return Xtrain, Xtest, Ytrain, Ytest, Xdef lr(Xtrain, Xtest, Ytrain, Ytest, if_figure):# use LinearRegressionreg LR().fit(Xtrain, Ytrain)y_pred reg.predict(Xtest)loss mean_squared_error(Ytest, y_pred)print(*************LR*****************)print(w\t {}.format(reg.coef_))print(b\t {:.4f}.format(reg.intercept_))# draw the figure of predict resultsif if_figure:plt.figure(figsize (14,6),dpi 80)plt.plot(range(len(Ytest)), Ytest, cblue, labelreal)plt.plot(range(len(y_pred)), y_pred, cred, linestyle:, labelpredict)plt.title(predict results from row LR)plt.legend()plt.show()return loss3.梯度下降
def gradDescnet(Xtrain, Xtest, Ytrain, Ytest, X, if_figure, rate):# 梯度下降def grad(y, yp, X):grad_w (y - yp) * (-X)grad_b (y - yp) * (-1)return [grad_w, grad_b]# 设置训练参数epoch_train 100learning_rate ratew np.random.normal(0.0, 1.0, (1, len(X[0])))b 0.0 loss_train []loss_test []for epoch in range(epoch_train 1):loss1 0for i in range(len(Xtrain)):yp w.dot(Xtrain[i]) b# 计算损失err Ytrain[i] - yploss1 err ** 2# 迭代更新 w 和 bgw grad(Ytrain[i], yp, Xtrain[i])[0]gb grad(Ytrain[i], yp, Xtrain[i])[1]w w - learning_rate * gwb b - learning_rate * gb# 记录损失loss_train.append(loss1 / len(Xtrain))loss11 0for i in range(len(Xtest)):yp2 w.dot(Xtest[i]) berr2 Ytest[i] - yp2loss11 err2 ** 2# 记录损失loss_test.append(loss11 / len(Xtest))# shuffle the dataXtrain, Ytrain shuffle(Xtrain, Ytrain)# draw the figure of lossif if_figure:plt.figure()plt.title(figure of loss)plt.plot(range(len(loss_train)), loss_train, cblue, linestyle:, labeltrain)plt.plot(range(len(loss_test)), loss_test, cred, labeltest)plt.legend()plt.show()# draw figure of predict resultsif if_figure:Predict_value []for i in range(len(Xtest)):Predict_value.append(w.dot(Xtest[i]) b)plt.figure()plt.title(predict results from gradScent)plt.plot(range(len(Xtest)), Ytest, cblue, labelreal)plt.plot(range(len(Xtest)), Predict_value, cred, linestyle:, labelpredict)plt.legend()plt.show()return loss_test[-1], w, bdef test():if_figure TrueXtrain, Xtest, Ytrain, Ytest, X preprocess()loss_lr lr(Xtrain, Xtest, Ytrain, Ytest, if_figure)loss_gd, w, b gradDescnet(Xtrain, Xtest, Ytrain, Ytest, X, if_figure, 0.01)print(*************GD*****************) print(w\t: {}.format(w))print(b\t: {}.format(b))print(************loss****************)print(lr\t: %.4f % loss_lr)print(gd\t: %.4f % loss_gd)def searchRate():if_figure FalseXtrain, Xtest, Ytrain, Ytest, X preprocess()loss_grad []w_grad []b_grad []rates list(np.arange(0.001, 0.05, 0.001))epoch 1for rate in rates:loss, w, b gradDescnet(Xtrain, Xtest, Ytrain, Ytest, X, if_figure, rate)loss_grad.append(loss[0])w_grad.append(w)b_grad.append(b)print(epoch %d: %.4f % (epoch, loss_grad[-1]))epoch 1plt.figure()plt.plot(rates, loss_grad)plt.title(loss under different rate)plt.show()loss_grad_min min(loss_grad)position loss_grad.index(loss_grad_min)w w_grad[position]b b_grad[position]rate rates[position]loss_lr lr(Xtrain, Xtest, Ytrain, Ytest, if_figure)print(*************GD*****************)print(w\t: {}.format(w))print(b\t: {}.format(b))print(rate: %.3f % rate)print(************loss****************)print(lr\t: %.4f % loss_lr)print(gd\t: %.4f % loss_grad_min)data boston
Xtrain, Xtest, Ytrain, Ytest, X preprocess()lr(Xtrain, Xtest, Ytrain, Ytest,True)test()searchRate()总结
通过此次学习对梯度下降算法有了更深的认识。