为什么Dropout会降低我的模型准确性?

如果我在训练中不使用dropout,以下代码的准确率约为95%。如果使用dropout,准确率会下降到11%。

该网络是使用Numpy构建的。我使用了一个包含多个层对象的神经网络类。最后一层使用sigmoid激活函数,其余层使用Relu激活函数。代码如下:

import numpy as np import idx2numpy as idximport matplotlib.pyplot as pltnp.random.seed(0)img = r"C:\Users\Aaditya\OneDrive\Documents\ML\train-image"lbl = r'C:\Users\Aaditya\OneDrive\Documents\ML\train-labels-idx1-ubyte't_lbl = r'C:\Users\Aaditya\OneDrive\Documents\ML\t10k-labels.idx1-ubyte't_img = r'C:\Users\Aaditya\OneDrive\Documents\ML\t10k-images.idx3-ubyte'image = idx.convert_from_file(img)iput = np.reshape(image, (60000,784))/255otput = np.eye(10)[idx.convert_from_file(lbl)]test_image = idx.convert_from_file(t_img)test_input = np.reshape(test_image, (10000,784))/255test_output = idx.convert_from_file(t_lbl)def sigmoid(x):    sigmoid = 1/(1+ np.exp(-x))     return sigmoid    def tanh(x):    return np.tanh(x)def relu(x):    return np.where(x>0,x,0)def reluprime(x):    return (x>0).astype(x.dtype)def sigmoid_prime(x):    return sigmoid(x)*(1-sigmoid(x))    def tanh_prime(x):    return 1 - tanh(x)**2class Layer_Dense:    def __init__(self,n_inputs,n_neurons,activation="sigmoid",keep_prob=1):        self.n_neurons=n_neurons        if activation == "sigmoid":            self.activation = sigmoid            self.a_prime = sigmoid_prime        elif activation == "tanh":            self.activation = tanh            self.a_prime = tanh_prime        else :            self.activation = relu            self.a_prime = reluprime        self.keep_prob = keep_prob        self.weights = np.random.randn(n_inputs ,n_neurons)*0.1        self.biases = np.random.randn(1,n_neurons)*0.1         def cal_output(self,input,train=False):                output = np.array(np.dot(input,self.weights) + self.biases,dtype="float128")                if train == True:            D = np.random.randn(1,self.n_neurons)            self.D = (D>self.keep_prob).astype(int)            output = output * self.D          return output    def forward(self,input):        return self.activation(self.cal_output(input))    def back_propagate(self,delta,ap,lr=1,keep_prob=1):        dz =  delta        self.weights -= 0.001*lr*(np.dot(ap.T,dz)*self.D)        self.biases -= 0.001*lr*(np.sum(dz,axis=0,keepdims=True)*self.D)        return np.multiply(np.dot(dz,self.weights.T),(1-ap**2))        class Neural_Network:    def __init__(self,input,output):        self.input=input        self.output=output        self.layers = []    def Add_layer(self,n_neurons,activation="relu",keepprob=1):        if len(self.layers) != 0:                newL = Layer_Dense(self.layers[-1].n_neurons,n_neurons,activation,keep_prob=keepprob)        else:            newL = Layer_Dense(self.input.shape[1],n_neurons,activation,keep_prob=keepprob)        self.layers.append(newL)    def predict(self,input):        output = input        for layer in self.layers:            output = layer.forward(output)        return output    def cal_zs(self,input):        self.activations = []        self.activations.append(input)        output = input        for layer in self.layers:            z = layer.cal_output(output,train=True)            activation = layer.activation(z)            self.activations.append(activation)            output = activation    def train(self,input=None,output=None,lr=10):        if input is None:            input=self.input            output=self.output                    if len(input)>1000:            indices = np.arange(input.shape[0])            np.random.shuffle(indices)            input = input[indices]            output = output[indices]            for _ in range(100):                self.lr = lr                for i in range(int(len(input)/100)):                    self.lr *=0.99                    self.train(input[i*100:i*100+100],output[i*100:i*100+100],self.lr)            return        self.cal_zs(input)        for i in range(1,len(self.layers)+1):            if i==1:                delta = self.activations[-1] - output                self.delta = self.layers[-1].back_propagate(delta,self.activations[-2],lr)            else:                self.delta = self.layers[-i].back_propagate(self.delta,self.activations[-i-1],lr)    def MSE(self):        predict = self.predict(self.input)        error = (predict - self.output)**2        mse = sum(sum(error))        print(mse)    def Logloss(self):        predict = self.predict(self.input)        error = np.multiply(self.output,np.log(predict)) + np.multiply(1-self.output,np.log(1-predict))        logloss = -1*sum(sum(error))        print(logloss)    def accuracy(self):        predict = self.predict(test_input)        prediction = np.argmax(predict,axis=1)        correct = np.mean(prediction == test_output)        print(correct*100)                # def train(self,input,output):        model = Neural_Network(iput,otput)# model.Add_layer(4)model.Add_layer(64)model.Add_layer(16)model.Add_layer(10,"sigmoid")lrc= 6for _ in range(10):    model.accuracy()    model.Logloss()    model.train(lr=lrc)model.accuracy()

我使用了MNIST数据库,链接是 这个


回答:

原因之一可能是你丢弃了太多的神经元。在下面的代码中

D = np.random.randn(1,self.n_neurons)self.D = (D>self.keep_prob).astype(int)

第一行生成的矩阵可能包含许多小于零的值。因此,当与self.keep_prob(其值为1)进行比较时,很多神经元会被丢弃

请尝试进行以下更改

self.D = (D < self.keep_prob).astype(int)

Related Posts

Keras Dense层输入未被展平

这是我的测试代码: from keras import…

无法将分类变量输入随机森林

我有10个分类变量和3个数值变量。我在分割后直接将它们…

如何在Keras中对每个输出应用Sigmoid函数?

这是我代码的一部分。 model = Sequenti…

如何选择类概率的最佳阈值?

我的神经网络输出是一个用于多标签分类的预测类概率表: …

在Keras中使用深度学习得到不同的结果

我按照一个教程使用Keras中的深度神经网络进行文本分…

‘MatMul’操作的输入’b’类型为float32,与参数’a’的类型float64不匹配

我写了一个简单的TensorFlow代码,但不断遇到T…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注