我现在正在学习stanford cs231n课程。在完成softmax_loss函数时,我发现以全矢量化类型编写并不容易,特别是处理 dw 项。以下是我的代码。有人可以优化代码。将不胜感激。
def softmax_loss_vectorized(W, X, y, reg):
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[0]
num_classes = W.shape[1]
scores = X.dot(W)
scores -= np.max(scores, axis = 1)[:, np.newaxis]
exp_scores = np.exp(scores)
sum_exp_scores = np.sum(exp_scores, axis = 1)
correct_class_score = scores[range(num_train), y]
loss = np.sum(np.log(sum_exp_scores)) - np.sum(correct_class_score)
exp_scores = exp_scores / sum_exp_scores[:,np.newaxis]
# **maybe here can be rewroten into matrix operations**
for i in xrange(num_train):
dW += exp_scores[i] * X[i][:,np.newaxis]
dW[:, y[i]] -= X[i]
loss /= num_train
loss += 0.5 * reg * np.sum( W*W )
dW /= num_train
dW += reg * W
return loss, dW
答案 0 :(得分:0)
下面是一个矢量化实现。但我建议你尝试花更多的时间来自己解决问题。我们的想法是构造一个包含所有softmax值的矩阵,并从正确的元素中减去-1
。
def softmax_loss_vectorized(W, X, y, reg):
num_train = X.shape[0]
scores = X.dot(W)
scores -= np.max(scores)
correct_scores = scores[np.arange(num_train), y]
# Compute the softmax per correct scores in bulk, and sum over its logs.
exponents = np.exp(scores)
sums_per_row = np.sum(exponents, axis=1)
softmax_array = np.exp(correct_scores) / sums_per_row
information_array = -np.log(softmax_array)
loss = np.mean(information_array)
# Compute the softmax per whole scores matrix, which gives the matrix for X rows coefficients.
# Their linear combination is algebraically dot product X transpose.
all_softmax_matrix = (exponents.T / sums_per_row).T
grad_coeff = np.zeros_like(scores)
grad_coeff[np.arange(num_train), y] = -1
grad_coeff += all_softmax_matrix
dW = np.dot(X.T, grad_coeff) / num_train
# Regularization
loss += 0.5 * reg * np.sum(W * W)
dW += reg * W
return loss, dW