我一直在阅读Bishop关于机器学习的书,试图为神经网络实现反向传播算法,但它找不到解决方案。代码如下。我已将其分解为网络代码和测试代码。
import numpy as npfrom collections import namedtupleimport matplotlib.pyplot as pltimport scipy.optimize as opt# Network codedef tanh(x): return np.tanh(x)def dtanh(x): return 1 - np.tan(x)**2def identity(x): return xdef unpack_weights(w, D, M, K): """ len(w) = (D + 1)*M + (M + 1)*K, where D = number of inputs, excluding bias M = number of hidden units, excluding bias K = number of output units """ UnpackedWeights = namedtuple("UpackedWeights", ["wHidden", "wOutput"]) cutoff = M*(D + 1) wHidden = w[:cutoff].reshape(M, D + 1) wOutput = w[cutoff:].reshape(K, M + 1) return UnpackedWeights(wHidden=wHidden, wOutput=wOutput)def compute_output(x, weights, fcnHidden=tanh, fcnOutput=identity): NetworkResults = namedtuple("NetworkResults", ["hiddenAct", "hiddenOut", "outputAct", "outputOut"]) xBias = np.vstack((1., x)) hiddenAct = weights.wHidden.dot(xBias) hiddenOut = np.vstack((1., fcnHidden(hiddenAct))) outputAct = weights.wOutput.dot(hiddenOut) outputOut = fcnOutput(outputAct) return NetworkResults(hiddenAct=hiddenAct, hiddenOut=hiddenOut, outputAct=outputAct, outputOut=outputOut)def backprop(t, x, M, fcnHidden=tanh, fcnOutput=identity, dFcnHidden=dtanh): maxIter = 10000 learningRate = 0.2 N, K = t.shape N, D = x.shape nParams = (D + 1)*M + (M + 1)*K w0 = np.random.uniform(-0.1, 0.1, nParams) for _ in xrange(maxIter): sse = 0. for n in xrange(N): weights = unpack_weights(w0, D, M, K) # Compute net output netResults = compute_output(x=x[n].reshape(-1, 1), weights=weights, fcnHidden=fcnHidden, fcnOutput=fcnOutput) # Compute derivatives of error function wrt wOutput outputDelta = netResults.outputOut - t[n].reshape(K, 1) outputDerivs = outputDelta.dot(netResults.hiddenOut.T) # Compute derivateives of error function wrt wHidden hiddenDelta = dFcnHidden(netResults.hiddenAct)*(weights.wOutput[:, 1:].T.dot(outputDelta)) xBias = np.vstack((1., x[n].reshape(-1, 1))) hiddenDerivs = hiddenDelta.dot(xBias.T) delErr = np.hstack((np.ravel(hiddenDerivs), np.ravel(outputDerivs))) w1 = w0 - learningRate*delErr w0 = w1 sse += np.sum(outputDelta**2) return w0# Testing codedef generate_test_data(): D, M, K, N = 1, 3, 1, 25 x = np.sort(np.random.uniform(-1., 1., (N, D)), axis=0) t = 1.0 + x**2 return D, M, K, N, x, tdef test_backprop(): D, M, K, N, x, t = generate_test_data() return backprop(t, x, M)def scipy_solution(t, x, D, M, K, N, method="BFGS"): def obj_fn(w): weights = unpack_weights(w, D, M, K) err = 0 for n in xrange(N): netOut = compute_output(x[n], weights=weights) err += (netOut.outputOut[0, 0] - t[n])**2 return err w0 = np.random.uniform(-1, 1, (D + 1)*M + (M + 1)*K) return opt.minimize(obj_fn, w0, method=method)
当我使用scipy中的优化模块(即scipy_solution()函数)来寻找网络权重时,平方误差和非常接近于零,网络的输出看起来像我生成的数据。当我使用我的反向传播函数时,平方误差和卡在2.0到3.0之间,网络输出看起来几乎是线性的。此外,当我将scipy解决方案的权重作为起始值输入我的反向传播函数时,我的反向传播函数仍然找不到正确的解决方案。
我已经为此困扰了几天,所以我非常希望能得到任何人的建议。谢谢。
回答:
def dtanh(x): return 1 - np.tan(x)**2
应该改为
def dtanh(x): return 1 - np.tanh(x)**2