gpt4 book ai didi

python - 在Python中优化四个参数scipy.optimize.fmin_l_bfgs_b,出现错误

转载 作者:行者123 更新时间:2023-11-30 23:05:02 25 4
gpt4 key购买 nike

我正在使用 scipy.optimize 中的 L-BFGS 算法编写主动学习算法。我需要优化四个参数:alphabetaWgamma

但是,它不起作用,错误为

optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=np.array(alpha,beta,W,gamma), fprime=func_grad)
ValueError: only 2 non-keyword arguments accepted

请注意,在代码的最后一句中,x0 是四个参数的初始猜测。如果我更改为 x0=np.array((alpha,beta,W,gamma),dtype=float),我收到错误

ValueError: setting an array element with a sequence.

我不确定为什么会发生错误。

from sys import argv
import numpy as np
import scipy as sp
import pandas as pd
import scipy.stats as sps

num_labeler = 3
num_instance = 5

X = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4],[5,5,5,5]])
Z = np.array([1,0,1,0,1])
Y = np.array([[1,0,1],[0,1,0],[0,0,0],[1,1,1],[1,0,0]])

W = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3]])
gamma = np.ones(5)
alpha = np.ones(4)
beta = 1


def log_p_y_xz(yit,zi,sigmati): #log P(y_it|x_i,z_i)
return np.log(sps.norm(zi,sigmati).pdf(yit))#tested

def log_p_z_x(alpha,beta,xi): #log P(z_i=1|x_i)
return -np.log(1+np.exp(-np.dot(alpha,xi)-beta))#tested

def sigma_eta_ti(xi, w_t, gamma_t): # 1+exp(-w_t x_i -gamma_t)^-1
return 1/(1+np.exp(-np.dot(xi,w_t)-gamma_t)) #tested

def df_alpha(X,Y,Z,W,alpha,beta,gamma):#df/dalpha
return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)*X[i]/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))

def df_beta(X,Y,Z,W,alpha,beta,gamma):#df/dbelta
return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))

def df_w(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dw
return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]for t in range(num_labeler)) for i in range (num_instance))

def df_gamma(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dgamma
return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))for t in range(num_labeler)) for i in range (num_instance))

def func(para, *args):
#the function to minimize
#parameters
alpha = para[0]#alpha should be an array
beta = para[1]
W = para[2]
gamma = para[3]
#args
X = args [0]
Y = args[1]
Z = args[2]
return np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))


def func_grad(para, *args):
#para have 4 values
alpha = para[0]#alpha should be an array
beta = para[1]
W = para[2]
gamma = para[3]
#args
X = args [0]
Y = args[1]
Z = args[2]
#gradiants
d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
return np.array([d_f_a, d_f_b,d_f_w,d_f_g])


optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0 =np.array(alpha,beta,W,gamma), fprime = func_grad)

最佳答案

scipy 优化例程只能优化参数的一维向量。看起来您正在尝试优化包含标量、向量和矩阵混合的值元组。

您需要做的是将所有相关参数值展平为一维数组,然后让您的 func 展开并适当设置它们。

<小时/>

编辑:我将继续创建一个方便的函数来提取您的参数;例如:

def get_params(para):
# extract parameters from 1D parameter vector
assert len(para) == 22
alpha = para[0:4]
beta = para[4]
W = para[5:17].reshape(3, 4)
gamma = para[17:]
return alpha, beta, gamma, W

def func(para, *args):
#the function to minimize
#parameters
alpha, beta, gamma, W = get_params(para)

#args
X = args [0]
Y = args[1]
Z = args[2]
return np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))


def func_grad(para, *args):
#para have 4 values
alpha, beta, gamma, W = get_params(para)

#args
X = args [0]
Y = args[1]
Z = args[2]
#gradiants
d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
return np.array([d_f_a, d_f_b,d_f_w,d_f_g])


x0 = np.concatenate([np.ravel(alpha), np.ravel(beta), np.ravel(W), np.ravel(gamma)])
optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=x0, fprime=func_grad)

它无法运行,因为您的 func()func_grad() 需要代码片段中未指定的其他参数,但此更改解决了特定问题你问的是。

关于python - 在Python中优化四个参数scipy.optimize.fmin_l_bfgs_b,出现错误,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/33383895/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com