gpt4 book ai didi

python - python中逻辑回归GD的实现

转载 作者:行者123 更新时间:2023-11-30 09:09:43 25 4
gpt4 key购买 nike

我正在 python 中使用正则损失函数实现逻辑回归,如下所示: enter image description here

但是梯度算法效果不好。请先阅读粗体文字!只需将代码逐个单元格地粘贴即可

import numpy as np, scipy as sp, sklearn as sl
from scipy import special as ss
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.datasets import make_classification
import theano.tensor as T

这是损失函数:(scipy 的作用是“剪辑”1 附近的对数参数)

def lossf(w, X, y, l1, l2):
w.resize((w.shape[0],1))
y.resize((y.shape[0],1))

lossf1 = np.sum(ss.log1p(1 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
lossf2 = l2 * (np.dot(np.transpose(w), w))
lossf3 = l1 * sum(abs(w))
lossf = np.float(lossf1 + lossf2 + lossf3)
return lossf

这是梯度函数:(??这里有问题??-见结尾)

def gradf(w, X, y, l1, l2):
w.resize((w.shape[0],1))
y.resize((y.shape[0],1))

gradw1 = l2 * 2 * w
gradw2 = l1 * np.sign(w)
gradw3 = np.multiply(-y,(2 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
gradw3 = gradw3 / (2 + (ss.expm1((np.multiply(-y, np.dot(X, w))))))
gradw3 = np.sum(np.multiply(gradw3, X), axis=0)
gradw3.resize(gradw3.shape[0],1)
gradw = gradw1 + gradw2 + gradw3
gradw.resize(gradw.shape[0],)
return np.transpose(gradw)

这是我的 LR 类:

class LR(ClassifierMixin, BaseEstimator):
def __init__(self, lr=0.0001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
self.l1 = l1
self.l2 = l2
self.w = None
self.lr = lr
self.verbose = verbose
self.num_iter = num_iter

def fit(self, X, y):
n, d = X.shape
self.w = np.zeros(shape=(d,))
for i in range(self.num_iter):
g = gradf(self.w, X, y, self.l1, self.l2)
g.resize((g.shape[0],1))
self.w = self.w - g
print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
return self

def predict_proba(self, X):
probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
return probs

def predict(self, X):
probs = self.predict_proba(X)
probs = np.sign(2 * probs - 1)
probs.resize((probs.shape[0],))
return probs

以下是测试:

X, y = make_classification(n_features=100, n_samples=100)
y = 2 * (y - 0.5)
clf = LR(lr=0.000001, l1=0.1, l2=0.1, num_iter=10, verbose=0)
clf = clf.fit(X, y)
yp = clf.predict(X)
yp.resize((100,1))
accuracy = int(sum(y == yp))/len(y)

哎呀。这不收敛。但如果我用 theno 替换我的 gradw3:

gradw3 = get_gradw3(w,X,y)

where:
w,X,y = T.matrices("wXy")
logloss = T.sum(T.log1p(1 + T.expm1(-y* T.dot(X, w))))
get_gradw3 = theano.function([w,X,y],T.grad(logloss,w).reshape(w.shape))

它收敛到 100% 的准确度。这意味着,我的 gradw3 实现错误,但我找不到错误。贪婪地寻求帮助!

最佳答案

事实上,我终于成功了。我不知道关键的更改到底是什么,但以下是我的更改摘录:

  • 将所有 np.multiply 替换为 *

  • 学习率和调节器降低

  • np.nan_to_num 应用于指数

所以这是最终的代码:

def lossf(w, X, y, l1, l2):
w.resize((w.shape[0],1))
y.resize((y.shape[0],1))

lossf1 = np.sum(ss.log1p(1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w)))))
lossf2 = l2 * (np.dot(np.transpose(w), w))
lossf3 = l1 * sum(abs(w))
lossf = np.float(lossf1 + lossf2 + lossf3)
return lossf

def gradf(w, X, y, l1, l2):
w.resize((w.shape[0],1))
y.resize((y.shape[0],1))

gradw1 = l2 * 2 * w
gradw2 = l1 * np.sign(w)
gradw3 = -y * (1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
gradw3 = gradw3 / (2 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
gradw3 = np.sum(gradw3 * X, axis=0)
gradw3.resize(gradw3.shape[0],1)
gradw = gradw1 + gradw2 + gradw3
gradw.resize(gradw.shape[0],)
return np.transpose(gradw)
class LR(ClassifierMixin, BaseEstimator):
def __init__(self, lr=0.000001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
self.l1 = l1
self.l2 = l2
self.w = None
self.lr = lr
self.verbose = verbose
self.num_iter = num_iter

def fit(self, X, y):
n, d = X.shape
self.w = np.zeros(shape=(d,))
for i in range(self.num_iter):
print "\n", "Iteration ", i
g = gradf(self.w, X, y, self.l1, self.l2)
g.resize((g.shape[0],1))
self.w = self.w - g
print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
return self

def predict_proba(self, X):
probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
return probs

def predict(self, X):
probs = self.predict_proba(X)
probs = np.sign(2 * probs - 1)
probs.resize((probs.shape[0],))
return probs

关于python - python中逻辑回归GD的实现,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42784242/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com