這篇文章給大家介紹怎么在python中利用梯度下降算法實現(xiàn)一個多線性回歸,內(nèi)容非常詳細,感興趣的小伙伴們可以參考借鑒,希望對大家能有所幫助。

具體內(nèi)容如下


import pandas as pd
import matplotlib.pylab as plt
import numpy as np
# Read data from csv
pga = pd.read_csv("D:\python3\data\Test.csv")
# Normalize the data 歸一化值 (x - mean) / (std)
pga.AT = (pga.AT - pga.AT.mean()) / pga.AT.std()
pga.V = (pga.V - pga.V.mean()) / pga.V.std()
pga.AP = (pga.AP - pga.AP.mean()) / pga.AP.std()
pga.RH = (pga.RH - pga.RH.mean()) / pga.RH.std()
pga.PE = (pga.PE - pga.PE.mean()) / pga.PE.std()
def cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y):
# Initialize cost
J = 0
# The number of observations
m = len(x1)
# Loop through each observation
# 通過每次觀察進行循環(huán)
for i in range(m):
# Compute the hypothesis
# 計算假設(shè)
h=theta0+x1[i]*theta1+x2[i]*theta2+x3[i]*theta3+x4[i]*theta4
# Add to cost
J += (h - y[i])**2
# Average and normalize cost
J /= (2*m)
return J
# The cost for theta0=0 and theta1=1
def partial_cost_theta4(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):
h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4
diff = (h - y) * x4
partial = diff.sum() / (x2.shape[0])
return partial
def partial_cost_theta3(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):
h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4
diff = (h - y) * x3
partial = diff.sum() / (x2.shape[0])
return partial
def partial_cost_theta2(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):
h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4
diff = (h - y) * x2
partial = diff.sum() / (x2.shape[0])
return partial
def partial_cost_theta1(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):
h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4
diff = (h - y) * x1
partial = diff.sum() / (x2.shape[0])
return partial
# 對theta0 進行求導(dǎo)
# Partial derivative of cost in terms of theta0
def partial_cost_theta0(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y):
h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4
diff = (h - y)
partial = diff.sum() / (x2.shape[0])
return partial
def gradient_descent(x1,x2,x3,x4,y, alpha=0.1, theta0=0, theta1=0,theta2=0,theta3=0,theta4=0):
max_epochs = 1000 # Maximum number of iterations 大迭代次數(shù)
counter = 0 # Intialize a counter 當前第幾次
c = cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y) ## Initial cost 當前代價函數(shù)
costs = [c] # Lets store each update 每次損失值都記錄下來
# Set a convergence threshold to find where the cost function in minimized
# When the difference between the previous cost and current cost
# is less than this value we will say the parameters converged
# 設(shè)置一個收斂的閾值 (兩次迭代目標函數(shù)值相差沒有相差多少,就可以停止了)
convergence_thres = 0.000001
cprev = c + 10
theta0s = [theta0]
theta1s = [theta1]
theta2s = [theta2]
theta3s = [theta3]
theta4s = [theta4]
# When the costs converge or we hit a large number of iterations will we stop updating
# 兩次間隔迭代目標函數(shù)值相差沒有相差多少(說明可以停止了)
while (np.abs(cprev - c) > convergence_thres) and (counter < max_epochs):
cprev = c
# Alpha times the partial deriviative is our updated
# 先求導(dǎo), 導(dǎo)數(shù)相當于步長
update0 = alpha * partial_cost_theta0(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
update1 = alpha * partial_cost_theta1(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
update2 = alpha * partial_cost_theta2(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
update3 = alpha * partial_cost_theta3(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
update4 = alpha * partial_cost_theta4(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
# Update theta0 and theta1 at the same time
# We want to compute the slopes at the same set of hypothesised parameters
# so we update after finding the partial derivatives
# -= 梯度下降,+=梯度上升
theta0 -= update0
theta1 -= update1
theta2 -= update2
theta3 -= update3
theta4 -= update4
# Store thetas
theta0s.append(theta0)
theta1s.append(theta1)
theta2s.append(theta2)
theta3s.append(theta3)
theta4s.append(theta4)
# Compute the new cost
# 當前迭代之后,參數(shù)發(fā)生更新
c = cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)
# Store updates,可以進行保存當前代價值
costs.append(c)
counter += 1 # Count
# 將當前的theta0, theta1, costs值都返回去
#return {'theta0': theta0, 'theta1': theta1, 'theta2': theta2, 'theta3': theta3, 'theta4': theta4, "costs": costs}
return {'costs':costs}
print("costs =", gradient_descent(pga.AT, pga.V,pga.AP,pga.RH,pga.PE)['costs'])
descend = gradient_descent(pga.AT, pga.V,pga.AP,pga.RH,pga.PE, alpha=.01)
plt.scatter(range(len(descend["costs"])), descend["costs"])
plt.show()損失函數(shù)隨迭代次數(shù)變換圖:

關(guān)于怎么在python中利用梯度下降算法實現(xiàn)一個多線性回歸就分享到這里了,希望以上內(nèi)容可以對大家有一定的幫助,可以學(xué)到更多知識。如果覺得文章不錯,可以把它分享出去讓更多的人看到。
本文標題:怎么在python中利用梯度下降算法實現(xiàn)一個多線性回歸-創(chuàng)新互聯(lián)
分享地址:http://www.yijiale78.com/article44/ddhshe.html
成都網(wǎng)站建設(shè)公司_創(chuàng)新互聯(lián),為您提供App開發(fā)、網(wǎng)站內(nèi)鏈、網(wǎng)站設(shè)計公司、建站公司、微信公眾號、品牌網(wǎng)站建設(shè)
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉(zhuǎn)載內(nèi)容為主,如果涉及侵權(quán)請盡快告知,我們將會在第一時間刪除。文章觀點不代表本網(wǎng)站立場,如需處理請聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉(zhuǎn)載,或轉(zhuǎn)載時需注明來源: 創(chuàng)新互聯(lián)
猜你還喜歡下面的內(nèi)容