问题一: 机器学习的基本流程(19)

< best_loss:best_loss = torch.abs(loss).item()best_epoch = epochtorch.save(myNet.state_dict(), 'new_best_ODE.mdl')# 连续绘图if epoch % 1 == 0:plt.ion()# 打开交互模式plt.close('all')myNet.load_state_dict(torch.load('new_best_ODE.mdl'))with torch.no_grad():x_data = http://www.kingceram.com/post/np.linspace(0, 2, 100, endpoint=True)[:, np.newaxis]x_data = torch.tensor(x_data).float()pred = myNet(x_data)plt.plot(x_data, y_data, label='real')plt.plot(x_data, pred, label='predict')plt.title("Training times:" + str(epoch))plt.legend()plt.savefig('./Training_process/ODE_'+str(epoch)+'.png')if epoch == best_epoch:plt.savefig('Best_ODE.png')# plt.show()# plt.pause(0.01)print('=' * 55)print('学习结束'.center(55))print('-' * 55)print('最优学习批次:', best_epoch, '最优误差:', best_loss)plt.close('all')plt.ioff()# 关闭交互模式plt.title('Error curve')plt.xlabel('loss vs. epoches')plt.ylabel('loss')plt.plot(range(0, epochs + 1), Loss_list, label='Loss')plt.savefig('Error_curve_ODE.png')# plt.show()print('已生成"最优求解结果图",请打开文件"Best_ODE.png"查看')print('已生成"误差曲线图",请打开文件"Error_curve_ODE.png"查看')print('-' * 55)print('准备绘制训练过程动态图')image2gif.image2gif('ODE')print('=' * 55)
四、偏微分方程
问题描述
考虑泊松方程:
? Δ u ( x ) = 1 , x ∈ Ω -\Delta u(x) = 1, x\in \Omega ?Δu(x)=1,x∈Ω
u ( x ) = 0 , x ∈ ? Ω , u(x) = 0, x\in \ \Omega, u(x)=0,x∈?Ω,
其中 Ω = ( ? 1 , 1 ) × ( ? 1 , 1 ) \ [ 0 , 1 ) × { 0 } \Omega = (-1,1) \times (-1,1) \ [0,1) \times \{0\} Ω=(?1,1)×(?1,1)\[0,1)×{0}.使用 DRM 算法进行求解的数值计算结果如下:
程序源代码
# 开发者:Leo 刘# 开发环境: macOs Big Sur# 开发时间: 2021/8/1 10:45 下午# 邮箱: 517093978@qq.com# @Software: PyCharmimport torchimport torch.nn as nnimport torch.nn.functional as Ffrom torch import optim, autogradfrom matplotlib import pyplot as pltfrom mpl_toolkits.axes_grid1 import make_axes_locatablefrom Dynamic_drawing import image2gif# Try to solve the poisson equation:'''Solve the following PDE-\Delta u(x) = 1, x\in \Omega,u(x) = 0, x\in \partial \Omega\Omega = (-1,1) * (-1,1) \ [0,1) *{0}'''# 定义DRM中提到的激活函数(但会出现异常值,故采用tanh激活函数)class PowerReLU(nn.Module):"""Implements simga(x)^(power)Applies a power of the rectified linear unit element-wise.NOTE: inplace may not be working.Can set inplace for inplace operation if desired.BUT I don't think it is working now.INPUT:x -- size (N,*) tensor where * is any number of additionaldimensionsOUTPUT:y -- size (N,*)"""def __init__(self, inplace=False, power=3):super(PowerReLU, self).__init__()self.inplace = inplaceself.power = powerdef forward(self, input):y = F.relu(input, inplace=self.inplace)return torch.pow(y, self.power)# 定义一个残差块class Block(nn.Module):"""Implementation of the block used in the Deep RitzPaperParameters:in_N-- dimension of the inputwidth -- number of nodes in the interior middle layerout_N -- dimension of the outputphi-- activation function used"""def __init__(self, in_N, width, out_N, phi=PowerReLU()):super(Block, self).__init__()# create the necessary linear layersself.L1 = nn.Linear(in_N, width)self.L2 = nn.Linear(width, out_N)# choose appropriate activation functionself.phi = nn.Tanh()def forward(self, x):return self.phi(self.L2(self.phi(self.L1(x)))) + x# 组装残差块,完成一个完整残差神经网络的搭建class drrnn(nn.Module):"""drrnn -- Deep Ritz Residual Neural NetworkImplements a network with the architecture used in thedeep ritz method paperParameters:in_N-- input dimensionout_N -- output dimensionm-- width of layers that form blocksdepth -- number of blocks to be stackedphi-- the activation function"""def __init__(self, in_N, m, out_N, depth=4, phi=PowerReLU()):super(drrnn, self).__init__()# set parametersself.in_N = in_Nself.m = mself.out_N = out_Nself.depth = depthself.phi = nn.Tanh()# list for holding all the blocksself.stack = nn.ModuleList()# add first layer to listself.stack.append(nn.Linear(in_N, m))# add middle blocks to listfor i in range(depth):self.stack.append(Block(m, m, m))# add output linear layerself.stack.append(nn.Linear(m, out_N))def forward(self, x):# first layerfor i in range(len(self.stack)):x = self.stack[i](x)return x# 内部采样def get_interior_points(N=512, d=2):"""randomly sample N points from interior of [-1,1]^d"""return torch.rand(N, d) * 2 - 1# 边界采样def get_boundary_points(N=32):"""randomly sample N points from boundary"""index = torch.rand(N, 1)index1 = torch.rand(N, 1) * 2 - 1xb1 = torch.cat((index, torch.zeros_like(index)), dim=1)xb2 = torch.cat((index1, torch.ones_like(index1)), dim=1)xb3 = torch.cat((index1, torch.full_like(index1, -1)), dim=1)xb4 = torch.cat((torch.ones_like(index1), index1), dim=1)xb5 = torch.cat((torch.full_like(index1, -1), index1), dim=1)xb = torch.cat((xb1, xb2, xb3, xb4, xb5), dim=0)return xb# 初始化神经网络参数def weights_init(m):if isinstance(m, (nn.Conv2d, nn.Linear)):nn.init.xavier_normal_(m.weight)nn.init.constant_(m.bias, 0.0)epochs = 50000# 学习次数in_N = 2m = 10out_N = 1# print(torch.cuda.is_available())# 优先选用GPU进行训练device = torch.device('cpu' if torch.cuda.is_available() else 'cpu')# 实例化残差神经网络模型model = drrnn(in_N, m, out_N).to(device)# Apply weight_init initialization method to submodelsmodel.apply(weights_init)# 选择Adam优化器,初始化学习率(lr)为3e-3optimizer = optim.Adam(model.parameters(), lr=3e-3)print('神经网络结构:')print(model)best_loss, best_epoch = 1000, 0Loss_list = []# 开始训练print('开始学习:')for epoch in range(epochs + 1):# 产生数据集xr = get_interior_points()xb = get_boundary_points()xr = xr.to(device)xb = xb.to(device)# 声明:自动保存参数xr、xb_rho的梯度xr.requires_grad_()output_r = model(xr)output_b = model(xb)# 梯度grads = autograd.grad(outputs=output_r, inputs=xr,grad_outputs=torch.ones_like(output_r),create_graph=True, retain_graph=True, only_inputs=True)[0]# loss_r为Ritz变分格式、loss_b为边界处理、loss为损失函数loss_r = 0.5 * torch.sum(torch.pow(grads, 2), dim=1) - output_rloss_r = torch.mean(loss_r)loss_b = torch.mean(torch.pow(output_b, 2))loss = loss_r + 500 * loss_bLoss_list.append(loss / (len(xr) + len(xb)))optimizer.zero_grad()# 优化器参数归零loss.backward()# 误差反向传播optimizer.step()# 神经网络参数(权重及偏置)更新torch.save(model.state_dict(), 'new_best_Deep_Ritz.mdl')# 保存神经网络模型if epoch % 100 == 0:print('epoch:', epoch, 'loss:', loss.item(), 'loss_r:', (loss_r).item(), 'loss_b:',(500 * loss_b).item())if epoch > int(4 * epochs / 5):if torch.abs(loss)