我在Python中实现了Andrew NG的机器学习课程,并且在第5周的练习4中参考了一段代码。我不理解的是在最终输出中使用np.trace()的必要性。难以可视化矩阵的问题
import numpy as npfrom scipy.optimize import minimizeimport scipy.ioimport matplotlib.pyplot as pltdata_dict = scipy.io.loadmat('ex4_orig_octave/ex4data1.mat')X = data_dict['X']y = data_dict['y'].ravel()M = X.shape[0]N = X.shape[1] L = 26 # = number of nodes in the hidden layer (including bias node)K = len(np.unique(y))X = np.hstack((np.ones((M, 1)), X))Y = np.zeros((M, K), dtype='uint8') for i, row in enumerate(Y): Y[i, y[i] - 1] = 1weights_dict = scipy.io.loadmat('ex4_orig_octave/ex4weights.mat')theta_1 = weights_dict['Theta1']theta_2 = weights_dict['Theta2']nn_params_saved = np.concatenate((theta_1.flatten(), theta_2.flatten()))def nn_cost_function(nn_params, X, Y, M, N, L, K): """Python version of nnCostFunction.m after completing 'Part 1'.""" # Unroll the parameter vector. theta_1 = nn_params[:(L - 1) * (N + 1)].reshape(L - 1, N + 1) theta_2 = nn_params[(L - 1) * (N + 1):].reshape(K, L) # Calculate activations in the second layer. a_2 = sigmoid(theta_1.dot(X.T)) # Add the second layer's bias node. a_2_p = np.vstack((np.ones(M), a_2)) # Calculate the activation of the third layer. a_3 = sigmoid(theta_2.dot(a_2_p)) # Calculate the cost function. cost = 1 / M * np.trace(- Y.dot(np.log(a_3)) - (1 - Y).dot(np.log(1 - a_3))) return costcost_saved = nn_cost_function(nn_params_saved, X, Y, M, N, L, K) print 'Cost at parameters (loaded from ex4weights): %.6f' % cost_savedprint '(this value should be about 0.287629)'
回答:
操作1/M * np.trace()
是在计算大小为M的批次的平均成本:
虽然可读性稍差,但速度显著提高的应该是:
np.sum(np.sum(Y.multiply(np.log(a_3.T)),axis=1),axis=0)
, 如果Y.shape==(M,K)
和 a_3.shape==(K,M)
:
Y = lambda : np.random.uniform(size=(5000,10)) # (M,K)a3 = lambda : np.random.uniform(size=(10,5000)) # (K,M)timeit.timeit('import numpy as np; np.trace(Y().dot(a3()))', number=10, globals=globals())# 0.5633535870001651timeit.timeit('import numpy as np; np.sum(np.sum(np.multiply(Y(),a3().T),axis=1),axis=0)', number=10, globals=globals())# 0.013223066000136896