编辑:将输入压缩在0到1之间,每个数据集每个神经元的输出大约为0.5。
似乎在训练后,无论我输入什么数据集,输出总是1。但是,如果我将学习率从正值改为负值,或者反之,输出总是0。
LN = -0.05def Matrix(numI, numO): matrix = [] for i in range(0, numO): matrix.append([]) for c in range(0, numI): if c > numI: rw = random.random() matrix[i].append(rw) else: rw = random.random() matrix[i].append(rw) return matrixclass Neuralnetwork: def __init__(self, numI, numO): self.Output_layer = Output_layer(numI, numO) self.Feed_forward = self.Output_layer.Feed_forward def train(self, t_inputs, t_targets): for n in range(len(self.Output_layer.Neurons)): self.Output_layer.new_weight(t_inputs, t_targets, n)class Output_layer: def __init__(self, numI, numO): self.Bias = 1 self.Matrix = Matrix(numI, numO) self.Neurons = [] for o in range(numO): self.Neurons.append(Neuron(self.Matrix, o)) def Feed_forward(self, inputs): outputs = [] for i in self.Neurons: outputs.append(i.Output(inputs, self.Bias)) print(outputs) def new_weight(self, t_inputs, t_targets, a): for aw in range(len(self.Neurons[a].Weights)): totalsw = [] totalsb = [] for i in range(len(t_inputs)): pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i], self.Bias) - t_targets[i][a]) pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias)) pd_net_wrt_weight = t_inputs[aw][aw] pd_c_wrt_weight = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_weight totalsw.append(pd_c_wrt_weight) pd_c_wrt_output = 2 * (self.Neurons[a].Output(t_inputs[i], self.Bias) - t_targets[i][a]) pd_output_wrt_net = self.Neurons[a].Output(t_inputs[i], self.Bias) * (1 - self.Neurons[a].Output(t_inputs[i], self.Bias)) pd_net_wrt_bias = 1 pd_c_wrt_bias = pd_c_wrt_output * pd_output_wrt_net * pd_net_wrt_bias totalsb.append(pd_c_wrt_bias) pd_weight = sum(totalsw) pd_bias = sum(totalsb) self.Neurons[a].Weights[aw] -= LN * pd_weight self.Bias -= LN * pd_biasclass Neuron: def __init__(self, matrix, index_of_M): self.Weights = matrix[index_of_M] def Weighted_sum(self, weights, inputs, bias): ind = 0 weightedI = [] for i in weights: output = i * inputs[ind] weightedI.append(output) ind += 1 list = sum(weightedI) + bias return list def Sigmoid(self, prediction): e = math.exp(-prediction) prediction = 1 / (1 + e) return round(prediction, 8) def Output(self, inputs, bias): output = self.Sigmoid(self.Weighted_sum(self.Weights, inputs, bias)) return outputnn = Neuralnetwork(2, 2)nn.Feed_forward([10, 20])for i in range(100000): nn.train([[10, 20], [15, 30], [8, 16], [3, 9], [6, 18], [2, 6]], [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])`
在我的第一个神经网络中运行良好。实在找不到错误所在。我尝试了不同的方法,比如将new_weight方法放入Neuron类中,改变输入和输出的数量等。
回答:
尝试将权重值设置为随机。这将有助于打破对称性。同时将偏置设置为1。你有两个输出类别。因此,我建议你使用均方误差作为损失函数,并使用梯度下降优化器。还可以将学习率设置为0.001或0.01左右。
你可以在这里了解更多。