问题一: 机器学习的基本流程(18)

< best_loss:best_loss = torch.abs(loss).item()best_epoch = epochtorch.save(myNet.state_dict(), 'new_best_fitting.mdl')# 连续绘图if epoch % 10 == 0:plt.ion()# 打开交互模式plt.close('all')myNet.load_state_dict(torch.load('new_best_fitting.mdl'))with torch.no_grad():pred = myNet(x_data)plt.plot(x_data, y_data, label='real')plt.plot(x_data, pred, label='predict')plt.title("Training times:" + str(epoch))plt.legend()plt.savefig('./Training_process/Fitting_'+str(epoch)+'.png')if epoch == best_epoch:plt.savefig('Best_fitting.png')# plt.show()# plt.pause(0.01)print('=' * 55)print('学习结束'.center(55))print('-' * 55)print('最优学习批次:', best_epoch, '最优误差:', best_loss)plt.close('all')plt.ioff()# 关闭交互模式plt.title('Error curve')plt.xlabel('loss vs. epoches')plt.ylabel('loss')plt.plot(range(0, epochs + 1), Loss_list, label='Loss')plt.savefig('Error_curve_Fitting.png')# plt.show()print('已生成"最优拟合结果图",请打开文件"Best_fitting.png"查看')print('已生成"误差曲线图",请打开文件"Error_curve_Fitting.png"查看')print('-' * 55)print('准备绘制训练过程动态图')image2gif.image2gif('Fitting')print('=' * 55)
三、常微分方程
问题描述
考虑如下常微分方程:
f ( x ) = d ? d x + ( x + 1 + 3 x 2 1 + x + x 3 ) ? ? x 3 ? 2 x ? x 2 1 + 3 x 2 1 + x + x 3 , x ∈ ( 0 , 2 ) f(x)=\frac{d\phi}{dx} + (x + \frac{1+3x^2}{1+x+x^3})\phi - x^3 - 2x - x^2\frac{1+3x^2}{1+x+x^3}, x\in (0,2) f(x)=dxd??+(x+1+x+x31+3x2?)??x3?2x?x21+x+x31+3x2?,x∈(0,2)
f ( x ) = 0 , x = 0. f(x) = 0, x=0. f(x)=0,x=0.
ANN求解结果与真解: f ( x ) = e ? 1 2 x 2 1 + x + x 3 + x 2 f(x)=\frac{e^{-\frac{1}{2}x^2}}{1+x+x^3}+x^2 f(x)=1+x+x3e?21?x2?+x2的对比如下:
程序源代码
# 开发者:Leo 刘# 开发环境: macOs Big Sur# 开发时间: 2021/5/21 3:07 下午# 邮箱: 517093978@qq.com# @Software: PyCharmimport matplotlib.pyplot as pltimport numpy as npimport torchfrom torch import autogradfrom Dynamic_drawing import image2gif# 构建输入集# 生成[0,2]区间100个点x_data = http://www.kingceram.com/post/np.linspace(0, 2, 100, endpoint=True)[:, np.newaxis]x_data = torch.tensor(x_data).float()# print(x_data.size())# 已知解析解用于比较y_data = np.exp(-0.5 * x_data ** 2) / (1 + x_data + x_data ** 3) + x_data ** 2y_data = torch.tensor(y_data).float()# 定义神经网络class Net(torch.nn.Module):# 初始化数组,参数分别是初始化信息,特征数,隐藏单元数,输出单元数def __init__(self, n_feature, n_hidden, n_output):# 此步骤是官方要求super(Net, self).__init__()# 设置输入层到隐藏层的函数self.input = torch.nn.Linear(n_feature, n_hidden)# 设置隐藏层到隐藏层的函数self.hidden = torch.nn.Linear(n_hidden, n_hidden)# 设置隐藏层到输出层的函数self.predict = torch.nn.Linear(n_hidden, n_output)# 定义向前传播函数def forward(self, input):# 给x加权成为a,用激活函数将a变成特征b# input = torch.relu(self.hidden(input))# 线性插值input = torch.tanh(self.input(input))# 非线性插值# 给a加权成为b,用激活函数将b变成特征cinput = torch.tanh(self.hidden(input))# 给c加权,预测最终结果input = self.predict(input)return input# 初始化网络print('神经网络结构:')myNet = Net(1, 10, 1)print(myNet)# 设置优化器optimizer = torch.optim.Adam(myNet.parameters(), lr=0.05)best_loss, best_epoch = 100, 0epochs = 2000# 训练次数input = x_data# 声明:自动保存参数input的梯度input.requires_grad_()Loss_list = []print('开始学习:')for epoch in range(epochs + 1):output = myNet(input)# 梯度grads = autograd.grad(outputs=output, inputs=input,grad_outputs=torch.ones_like(output),create_graph=True, retain_graph=True, only_inputs=True)[0]lq = (1 + 3 * (input ** 2)) / (1 + input + input ** 3)t_loss = (grads + (input + lq) * output - input ** 3 - 2 * input - lq * input * input) ** 2# 常微分方程F的平方loss_func = torch.mean(t_loss) + (output[0] - 1) ** 2# 每点F平方求和后取平均再加上边界条件loss = loss_func# 计算误差Loss_list.append(loss / (len(x_data)))optimizer.zero_grad()# 清除梯度loss.backward()# 误差反向传播optimizer.step()# 梯度更新torch.save(myNet.state_dict(), 'new_best_ODE.mdl')# 记录误差if epoch % 100 == 0:print('epoch:', epoch, 'loss:', loss.item(), )# 记录最佳训练次数if epoch> int(4 * epochs / 5):if torch.abs(loss)