如何使用plotly绘制梯度下降

我尝试复制类似于下方代码的工作,但当我使用这个链接中的数据https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv时,出现了错误。我认为这是因为数据形状的问题,但我不确定具体如何修改它。

如果您能帮助我解决这个问题,将非常感激。

这是我的代码

from IPython.core.display import HTMLimport plotlyimport plotly.graph_objects as goimport noiseimport numpy as npimport matplotlibfrom mpl_toolkits.mplot3d import axes3d%matplotlib inlineimport pandas as pddata =  = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')z = dataimport numpy as npfrom numpy.lib.stride_tricks import as_strideddef sliding_window(arr, window_size):    """ Construct a sliding window view of the array"""    arr = np.asarray(arr)    window_size = int(window_size)    if arr.ndim != 2:        raise ValueError("need 2-D input")    if not (window_size > 0):        raise ValueError("need a positive window size")    shape = (arr.shape[0] - window_size + 1,             arr.shape[1] - window_size + 1,             window_size, window_size)    if shape[0] <= 0:        shape = (1, shape[1], arr.shape[0], shape[3])    if shape[1] <= 0:        shape = (shape[0], 1, shape[2], arr.shape[1])    strides = (arr.shape[1]*arr.itemsize, arr.itemsize,               arr.shape[1]*arr.itemsize, arr.itemsize)    return as_strided(arr, shape=shape, strides=strides)def cell_neighbours(arr, i, j, d):    """Return d-th neighbors of cell (i, j)"""    w = sliding_window(arr, 2*d+1)    ix = np.clip(i - d, 0, w.shape[0]-1)    jx = np.clip(j - d, 0, w.shape[1]-1)    i0 = max(0, i - d - ix)    j0 = max(0, j - d - jx)    i1 = w.shape[2] - max(0, d - i + ix)    j1 = w.shape[3] - max(0, d - j + jx)    return w[ix, jx][i0:i1,j0:j1].ravel()from dataclasses import dataclass@dataclassclass descent_step:    """Class for storing each step taken in gradient descent"""    value: float    x_index: float    y_index: floatdef gradient_descent_3d(array,x_start,y_start,steps=50,step_size=1,plot=False):    # Initial point to start gradient descent at    step = descent_step(array[y_start][x_start],x_start,y_start)        # Store each step taken in gradient descent in a list    step_history = []    step_history.append(step)        # Plot 2D representation of array with startng point as a red marker    if plot:        matplotlib.pyplot.imshow(array,origin='lower',cmap='terrain')        matplotlib.pyplot.plot(x_start,y_start,'ro')    current_x = x_start    current_y = y_start    # Loop through specified number of steps of gradient descent to take    for i in range(steps):        prev_x = current_x        prev_y = current_y                # Extract array of neighbouring cells around current step location with size nominated        neighbours=cell_neighbours(array,current_y,current_x,step_size)                # Locate minimum in array (steepest slope from current point)        next_step = neighbours.min()        indices = np.where(array == next_step)                # Update current point to now be the next point after stepping        current_x, current_y = (indices[1][0],indices[0][0])        step = descent_step(array[current_y][current_x],current_x,current_y)                step_history.append(step)                # Plot each step taken as a black line to the current point nominated by a red marker        if plot:            matplotlib.pyplot.plot([prev_x,current_x],[prev_y,current_y],'k-')            matplotlib.pyplot.plot(current_x,current_y,'ro')                    # If step is to the same location as previously, this infers convergence and end loop        if prev_y == current_y and prev_x == current_x:            print(f"Converged in {i} steps")            break    return next_step,step_historynp.random.seed(42)global_minimum = z.min()indices = np.where(z == global_minimum)print(f"Target: {global_minimum} @ {indices}")step_size = 0found_minimum = 99999# Random starting pointstart_x = np.random.randint(0,50)start_y = np.random.randint(0,50)# Increase step size until convergence on global minimumwhile found_minimum != global_minimum:    step_size += 1    found_minimum,steps = gradient_descent_3d(z,start_x,start_y,step_size=step_size,plot=False)print(f"Optimal step size {step_size}")found_minimum,steps = gradient_descent_3d(z,start_x,start_y,step_size=step_size,plot=True)print(f"Steps: {steps}")def multiDimenDist(point1,point2):   #find the difference between the two points, its really the same as below   deltaVals = [point2[dimension]-point1[dimension] for dimension in range(len(point1))]   runningSquared = 0   #because the pythagarom theorm works for any dimension we can just use that   for coOrd in deltaVals:       runningSquared += coOrd**2   return runningSquared**(1/2)def findVec(point1,point2,unitSphere = False):  #setting unitSphere to True will make the vector scaled down to a sphere with a radius one, instead of it's orginal length  finalVector = [0 for coOrd in point1]  for dimension, coOrd in enumerate(point1):      #finding total differnce for that co-ordinate(x,y,z...)      deltaCoOrd = point2[dimension]-coOrd      #adding total difference      finalVector[dimension] = deltaCoOrd  if unitSphere:      totalDist = multiDimenDist(point1,point2)      unitVector =[]      for dimen in finalVector:          unitVector.append( dimen/totalDist)      return unitVector  else:      return finalVectordef generate_3d_plot(step_history):    # Initialise empty lists for markers    step_markers_x = []    step_markers_y = []    step_markers_z = []    step_markers_u = []    step_markers_v = []    step_markers_w = []        for index, step in enumerate(step_history):        step_markers_x.append(step.x_index)        step_markers_y.append(step.y_index)        step_markers_z.append(step.value)                # If we haven't reached the final step, calculate the vector between the current step and the next step        if index < len(steps)-1:            vec1 = [step.x_index,step.y_index,step.value]            vec2 = [steps[index+1].x_index,steps[index+1].y_index,steps[index+1].value]            result_vector = findVec(vec1,vec2)            step_markers_u.append(result_vector[0])            step_markers_v.append(result_vector[1])            step_markers_w.append(result_vector[2])        else:            step_markers_u.append(0.1)            step_markers_v.append(0.1)            step_markers_w.append(0.1)        # Include cones at each marker to show direction of step, scatter3d is to show the red line between points and surface for the terrain    fig = go.Figure(data=[        go.Cone(        x=step_markers_x,        y=step_markers_y,        z=step_markers_z,        u=step_markers_u,        v=step_markers_v,        w=step_markers_w,        sizemode="absolute",        sizeref=2,        anchor='tail'),        go.Scatter3d(        x=step_markers_x,        y=step_markers_y,        z=step_markers_z,        mode='lines',        line=dict(            color='red',            width=2        )),        go.Surface(colorscale=terrain,z=world,opacity=0.5)])    # Z axis is limited to the extent of the terrain array    fig.update_layout(        title='Gradient Descent Steps',        scene = dict(zaxis = dict(range=[world.min(),world.max()],),),)    return fig    # Generate 3D plot from previous random starting locationfig = generate_3d_plot(steps)HTML(plotly.offline.plot(fig, filename='random_starting_point_3d_gradient_descent.html',include_plotlyjs='cdn'))

回答:

错误发生的原因是found_minimum是一个int类型,而global_minimum是一个Series类型。我认为您参考的教程假设数据是以numpy的array形式加载的,但这点并未明确说明。

因此,z = data.to_numpy()解决了一个问题,同时也暴露了另一个问题,即教程中的数据集是50×50,而您的数据集是25×25。仅仅更改随机起始点的限制似乎是一个诱人的解决方案,但实际上效果并不好。这个数据集对于这种梯度下降实现来说太小了,无法适当收敛。

为了解决这个问题,我调整了您的数据集,制造了一个50×50的数据集:

data_arr = data.to_numpy()double_arr = np.append(data_arr, 1.5*data_arr + 50, axis=0)quad_arr = np.append(double_arr, 1.5*double_arr + 50, axis=1)

在整个代码中使用这个quad_arr,并将plotly的颜色比例更新为go.Surface(colorscale=Earth),结果如下:

gradient_descent_terrain

完整的、可复制的代码如下:

from IPython.core.display import HTMLimport plotlyimport plotly.graph_objects as goimport noiseimport numpy as npimport matplotlibfrom mpl_toolkits.mplot3d import axes3d%matplotlib inlineimport pandas as pddata = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')data_arr = data.to_numpy()double_arr = np.append(data_arr, 1.5*data_arr + 50, axis=0)quad_arr = np.append(double_arr, 1.5*double_arr + 50, axis=1)z = quad_arrmatplotlib.pyplot.imshow(z,origin='lower',cmap='terrain')# Find maximum value index in numpy arrayindices = np.where(z == z.max())max_z_x_location, max_z_y_location = (indices[1][0],indices[0][0])matplotlib.pyplot.plot(max_z_x_location,max_z_y_location,'ro',markersize=15)# Find minimum value index in numpy arrayindices = np.where(z == z.min())min_z_x_location, min_z_y_location = (indices[1][0],indices[0][0])matplotlib.pyplot.plot(min_z_x_location,min_z_y_location,'yo',markersize=15)import numpy as npfrom numpy.lib.stride_tricks import as_strideddef sliding_window(arr, window_size):    """ Construct a sliding window view of the array"""    arr = np.asarray(arr)    window_size = int(window_size)    if arr.ndim != 2:        raise ValueError("need 2-D input")    if not (window_size > 0):        raise ValueError("need a positive window size")    shape = (arr.shape[0] - window_size + 1,             arr.shape[1] - window_size + 1,             window_size, window_size)    if shape[0] <= 0:        shape = (1, shape[1], arr.shape[0], shape[3])    if shape[1] <= 0:        shape = (shape[0], 1, shape[2], arr.shape[1])    strides = (arr.shape[1]*arr.itemsize, arr.itemsize,               arr.shape[1]*arr.itemsize, arr.itemsize)    return as_strided(arr, shape=shape, strides=strides)def cell_neighbours(arr, i, j, d):    """Return d-th neighbors of cell (i, j)"""    w = sliding_window(arr, 2*d+1)    ix = np.clip(i - d, 0, w.shape[0]-1)    jx = np.clip(j - d, 0, w.shape[1]-1)    i0 = max(0, i - d - ix)    j0 = max(0, j - d - jx)    i1 = w.shape[2] - max(0, d - i + ix)    j1 = w.shape[3] - max(0, d - j + jx)    return w[ix, jx][i0:i1,j0:j1].ravel()from dataclasses import dataclass@dataclassclass descent_step:    """Class for storing each step taken in gradient descent"""    value: float    x_index: float    y_index: floatdef gradient_descent_3d(array,x_start,y_start,steps=50,step_size=1,plot=False):    # Initial point to start gradient descent at    step = descent_step(array[y_start][x_start],x_start,y_start)        # Store each step taken in gradient descent in a list    step_history = []    step_history.append(step)        # Plot 2D representation of array with startng point as a red marker    if plot:        matplotlib.pyplot.imshow(array,origin='lower',cmap='terrain')        matplotlib.pyplot.plot(x_start,y_start,'ro')    current_x = x_start    current_y = y_start    # Loop through specified number of steps of gradient descent to take    for i in range(steps):        prev_x = current_x        prev_y = current_y                # Extract array of neighbouring cells around current step location with size nominated        neighbours=cell_neighbours(array,current_y,current_x,step_size)                # Locate minimum in array (steepest slope from current point)        next_step = neighbours.min()        indices = np.where(array == next_step)                # Update current point to now be the next point after stepping        current_x, current_y = (indices[1][0],indices[0][0])        step = descent_step(array[current_y][current_x],current_x,current_y)                step_history.append(step)                # Plot each step taken as a black line to the current point nominated by a red marker        if plot:            matplotlib.pyplot.plot([prev_x,current_x],[prev_y,current_y],'k-')            matplotlib.pyplot.plot(current_x,current_y,'ro')                    # If step is to the same location as previously, this infers convergence and end loop        if prev_y == current_y and prev_x == current_x:            print(f"Converged in {i} steps")            break    return next_step,step_historynp.random.seed(42)global_minimum = z.min()indices = np.where(z == global_minimum)print(f"Target: {global_minimum} @ {indices}")step_size = 0found_minimum = 99999# Random starting pointstart_x = np.random.randint(0,50)start_y = np.random.randint(0,50)# Increase step size until convergence on global minimumprint('==========================')print(found_minimum)print(global_minimum)print('==========================')while found_minimum != global_minimum:    step_size += 1    try:        found_minimum,steps = gradient_descent_3d(z,start_x,start_y,step_size=step_size,plot=True)    except ValueError:        passprint(f"Optimal step size {step_size}")found_minimum,steps = gradient_descent_3d(z,start_x,start_y,step_size=step_size,plot=True)print(f"Steps: {steps}")def multiDimenDist(point1,point2):   #find the difference between the two points, its really the same as below   deltaVals = [point2[dimension]-point1[dimension] for dimension in range(len(point1))]   runningSquared = 0   #because the pythagarom theorm works for any dimension we can just use that   for coOrd in deltaVals:       runningSquared += coOrd**2   return runningSquared**(1/2)def findVec(point1,point2,unitSphere = False):  #setting unitSphere to True will make the vector scaled down to a sphere with a radius one, instead of it's orginal length  finalVector = [0 for coOrd in point1]  for dimension, coOrd in enumerate(point1):      #finding total differnce for that co-ordinate(x,y,z...)      deltaCoOrd = point2[dimension]-coOrd      #adding total difference      finalVector[dimension] = deltaCoOrd  if unitSphere:      totalDist = multiDimenDist(point1,point2)      unitVector =[]      for dimen in finalVector:          unitVector.append( dimen/totalDist)      return unitVector  else:      return finalVectordef generate_3d_plot(step_history):    # Initialise empty lists for markers    step_markers_x = []    step_markers_y = []    step_markers_z = []    step_markers_u = []    step_markers_v = []    step_markers_w = []        for index, step in enumerate(step_history):        step_markers_x.append(step.x_index)        step_markers_y.append(step.y_index)        step_markers_z.append(step.value)                # If we haven't reached the final step, calculate the vector between the current step and the next step        if index < len(steps)-1:            vec1 = [step.x_index,step.y_index,step.value]            vec2 = [steps[index+1].x_index,steps[index+1].y_index,steps[index+1].value]            result_vector = findVec(vec1,vec2)            step_markers_u.append(result_vector[0])            step_markers_v.append(result_vector[1])            step_markers_w.append(result_vector[2])        else:            step_markers_u.append(0.1)            step_markers_v.append(0.1)            step_markers_w.append(0.1)        # Include cones at each marker to show direction of step, scatter3d is to show the red line between points and surface for the terrain    fig = go.Figure(data=[        go.Cone(        x=step_markers_x,        y=step_markers_y,        z=step_markers_z,        u=step_markers_u,        v=step_markers_v,        w=step_markers_w,        sizemode="absolute",        sizeref=2,        anchor='tail'),        go.Scatter3d(        x=step_markers_x,        y=step_markers_y,        z=step_markers_z,        mode='lines',        line=dict(            color='red',            width=2        )),        go.Surface(colorscale='Earth', z=quad_arr,opacity=0.5)])    # Z axis is limited to the extent of the terrain array    fig.update_layout(        title='Gradient Descent Steps',        scene = dict(zaxis = dict(range=[quad_arr.min(),quad_arr.max()],),),)    return fig    # Generate 3D plot from previous random starting locationfig = generate_3d_plot(steps)HTML(plotly.offline.plot(fig, filename='random_starting_point_3d_gradient_descent.html',include_plotlyjs='cdn'))

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注