我正在研究吴安德教授的深度学习课程的作业2。我必须计算逻辑回归的成本和梯度(dw,db)。对于我的代码出了什么问题,我一无所知。各种错误接after而至。我正在附上代码。
谢谢
import numpy as np
def propagate(w, b, X, Y):
m = X.shape[1]
X=X.reshape((X.shape[0]*X.shape[1]), 1)
f=X.shape[0]*X.shape[1]
g=int(f-(w.shape[0]*w.shape[1]))
w=np.array(np.append(w, np.zeros(g)))
w=w.reshape(f, 1)
assert(w.shape == (f, 1))
#Sigmoid function
z=(np.dot(w.T, X) + b)
A=1/(1 + np.exp(-z))
# cost calculation
cost = -(1/m)*np.sum(np.multiply(Y, np.log(A)) + np.multiply((1-Y), np.log(1-A)), axis=1)
dw =(1/m)*np.dot((A-Y).T, X)
db = (1/m)*(np.sum(A-Y))
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw, "db": db}
return grads, cost
w, b, X, Y = np.array([[1.], [2.]]), 2, np.array([[1., 2., -1.],[3., 4., -3.2]]),
np.array([[1, 0, 1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))