ML
파이썬 날코딩: 경사하강법(gradient descent), 다중 선형 회귀
컴닥
2023. 1. 21. 22:01
반응형
경사하강법은 가설 함수의 기울기(가중치)와 절편(편향)을 찾는 중 하나. 이를 옵티마이저라고 한다.
순수 파이썬으로..
x1 = [2, 4, 6, 8]
x2 = [0, 4, 2, 3]
y = [81, 93, 91, 97]
a1 = a2 = b = 0
lr = 0.01
epochs = 2001
for i in range(epochs):
pred_y = [a1 * each_x1 + a2 * each_x2 + b for each_x1, each_x2 in zip(x1, x2)]
error = [each_y - each_pred for each_y, each_pred in zip(y, pred_y)]
a1_diff = 2 / len(x1) * sum(-each_x1 * each_error for each_x1, each_error in zip(x1, error))
a2_diff = 2 / len(x1) * sum(-each_x2 * each_error for each_x2, each_error in zip(x2, error))
b_diff = 2 / len(x1) * sum(-each for each in error)
a1 -= lr * a1_diff
a2 -= lr * a2_diff
b -= lr * b_diff
if i % 100 == 0:
print(i, a1, a2, b)
final_pred_y = [a1 * each_x1 + a2 * each_x2 + b for each_x1, each_x2 in zip(x1, x2)]
print(final_pred_y)
넘파이로
import numpy as np
x1 = np.array([2, 4, 6, 8])
x2 = np.array([0, 4, 2, 3])
y = np.array([81, 93, 91, 97])
a1 = a2 = b = 0
lr = 0.01
epochs = 2001
for i in range(epochs):
pred_y = a1 * x1 + a2 * x2 + b
error = y - pred_y
a1_diff = 2 / len(x1) * np.sum(-x1 * error)
a2_diff = 2 / len(x1) * np.sum(-x2 * error)
b_diff = 2 / len(x1) * np.sum(-error)
a1 -= lr * a1_diff
a2 -= lr * a2_diff
b -= lr * b_diff
if i % 100 == 0:
print(i, a1, a2, b)
final_pred_y = a1 * x1 + a2 * x2 + b
print(final_pred_y)
텐서 플로
import numpy as np
from tensorflow import keras
x = np.array([[2, 0], [4, 4], [6, 2], [8, 3]])
y = np.array([81, 93, 91, 97])
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2, activation='linear'))
model.compile(optimizer='sgd', loss='mse')
model.fit(x, y, epochs=2001)
print(model.predict([[7, 4]]))
반응형