首页 > 其他分享 >Pytorch 拟合多项式

Pytorch 拟合多项式

时间:2022-10-14 12:44:17浏览次数:48  
标签:多项式 self torch data grad Pytorch 拟合 detach numpy

拟合多项式参数

# coding=utf-8
import torch
import numpy as np

class Net(torch.nn.Module):
    def __init__(self):
        # self.a = torch.rand(1, dtype=torch.float32, requires_grad=True)  # a
        # self.b = torch.rand(1, dtype=torch.float32, requires_grad=True)  # b
        # self.c = torch.rand(1, dtype=torch.float32, requires_grad=True)  # b
        self.a = torch.tensor([1.0], dtype=torch.float32,  requires_grad=True)
        self.b = torch.tensor([1.0], dtype=torch.float32,  requires_grad=True)
        self.c = torch.tensor([10.0], dtype=torch.float32, requires_grad=True)
        self.__parameters = dict(a=self.a, b=self.b, c=self.c) 
        self.___gpu = False

    def cuda(self):
        if not self.___gpu:
            self.a = self.a.cuda().detach().requires_grad_(True)
            self.b = self.b.cuda().detach().requires_grad_(True)
            self.c = self.c.cuda().detach().requires_grad_(True)
            self.__parameters = dict(a=self.a, b=self.b, c=self.c) 
            self.___gpu = True
        return self

    def cpu(self):
        if self.___gpu:
            self.a = self.a.cpu().detach().requires_grad_(True)
            self.b = self.b.cpu().detach().requires_grad_(True)
            self.c = self.c.cpu().detach().requires_grad_(True)
            self.__parameters = dict(a=self.a, b=self.b, c=self.c)
            self.___gpu = False
        return self

    def forward(self, inputs):
        return self.a * inputs ** 2  + self.b * inputs + self.c

    def parameters(self):
        for name, param in self.__parameters.items():
            yield param


def main():
    x = np.linspace(1, 50, 1000)
    a, b, c = 2, 1, 13
    y = a * x ** 2 + b * x + c
    x_ref = torch.from_numpy(x.astype(np.float32))
    y_ref = torch.from_numpy(y.astype(np.float32))

    net = Net()
    if torch.cuda.is_available():
        x = x.cuda()
        y = y.cuda()
        net = net.cuda()

    optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0005)
    loss_op = torch.nn.MSELoss(reduction='sum')

    for i in range(1, 100001, 1):
        y_out = net.forward(x_ref)
        loss = loss_op(y_ref, y_out)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_numpy = loss.cpu().detach().numpy()
        if i % 2000 == 0:  # 1000
            a = net.a.cpu().detach().numpy()
            b = net.b.cpu().detach().numpy()
            c = net.c.cpu().detach().numpy()
            print(i, loss_numpy, a, b, c)
        if loss_numpy < 0.0001:  # 0.00001
            a = net.a.cpu().detach().numpy()
            b = net.b.cpu().detach().numpy()
            c = net.c.cpu().detach().numpy()
            print(a, b, c)
            exit()

if __name__ == '__main__':
    main()
View Code

 

错误,不规范代码记录

 

# coding=utf-8
import torch
import numpy as np

class Net(torch.nn.Module):
    def __init__(self):
        self.LL = torch.tensor([-20], dtype=torch.float32,  requires_grad=True)
        self.MM = torch.tensor([-10], dtype=torch.float32,  requires_grad=True)
        self.NN = torch.tensor([-20], dtype=torch.float32, requires_grad=True)
        self.__parameters = dict(LL=self.LL, MM=self.MM, NN=self.NN) 
        self.___gpu = False

    def cuda(self):
        if not self.___gpu:
            self.LL = self.LL.cuda().detach().requires_grad_(True)
            self.MM = self.MM.cuda().detach().requires_grad_(True)
            self.NN = self.NN.cuda().detach().requires_grad_(True)
            self.__parameters = dict(LL=self.LL, MM=self.MM, NN=self.NN) 
            self.___gpu = True
        return self

    def cpu(self):
        if self.___gpu:
            self.LL = self.LL.cpu().detach().requires_grad_(True)
            self.MM = self.MM.cpu().detach().requires_grad_(True)
            self.NN = self.NN.cpu().detach().requires_grad_(True)
            self.__parameters = dict(LL=self.LL, MM=self.MM, NN=self.NN)
            self.___gpu = False
        return self

    def forward(self, k_data):
        Ev = -0.5747985
        N = len(k_data)
        E_out = torch.zeros(N,3, dtype=torch.float32)
        for i in range(N):
            Ham = torch.zeros(3,3, dtype=torch.float32)
            kx, ky, kz = k_data[i][0], k_data[i][1], k_data[i][2]
            Ham[0][0] = Ev + self.LL*kx**2 + self.MM*(ky**2 + kz**2)
            Ham[1][1] = Ev + self.LL*ky**2 + self.MM*(kx**2 + kz**2)
            Ham[2][2] = Ev + self.LL*kz**2 + self.MM*(kx**2 + ky**2)
            Ham[0][1], Ham[0][2] = self.NN*kx*ky, self.NN*kx*kz
            Ham[1][0], Ham[1][2] = self.NN*kx*ky, self.NN*ky*kz
            Ham[2][0], Ham[2][1] = self.NN*kx*kz, self.NN*ky*kz
            Eig = torch.linalg.eigvals(Ham)
            E_out[i,:] = np.real(Eig)
        return E_out

    def parameters(self):
        for name, param in self.__parameters.items():
            yield param

def reference():
    k_data = np.load("kpoints.npy")
    E_data = np.load("bands.npy")
    E_data = np.transpose(E_data)

    k_data = k_data[269:339]
    E_data = E_data[269:339]
    return k_data, E_data


def main():
    k_data, E_data = reference()
    k_data = torch.from_numpy(k_data.astype(np.float32))
    E_ref = torch.from_numpy(E_data.astype(np.float32))

    net = Net()
    if torch.cuda.is_available():
        k_data = k_data.cuda()
        E_ref = E_ref.cuda()
        net = net.cuda()

    optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0005)
    loss_op = torch.nn.MSELoss(reduction='sum')

    for i in range(1, 100001, 1):
        E_out = net.forward(k_data)
        loss = loss_op(E_ref, E_out)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_numpy = loss.cpu().detach().numpy()
        if i % 100 == 0:  # 1000
            LL = net.LL.cpu().detach().numpy()
            MM = net.MM.cpu().detach().numpy()
            NN = net.NN.cpu().detach().numpy()
            print(i, loss_numpy, LL, MM, NN)
        if loss_numpy < 0.0001:  # 0.00001
            LL = net.LL.cpu().detach().numpy()
            MM = net.MM.cpu().detach().numpy()
            NN = net.NN.cpu().detach().numpy()
            print(LL, MM, NN)
            exit()

if __name__ == '__main__':
    main()
    # k_data, E_data = reference()
    # print(k_data)
View Code

 

标签:多项式,self,torch,data,grad,Pytorch,拟合,detach,numpy
From: https://www.cnblogs.com/ghzhan/p/16791262.html

相关文章

  • 代码笔记26 pytorch复现pointnet
    1浅浅记录一下model的复现,之后做好完整的工程放到github上2importtorch.nnasnnimporttorchimportnumpyasnpclasstnet(nn.Module):def__init__(self,......
  • 多项式,FFT
    多项式,FFTPolynomialConvolution若$A(x)$与$B(x)$分别为两个多项式,则两个多项式的卷积为:$$A(x)\cdotB(x)=\sum_{i=0}n\sum_{j=0}ia_jb_{i-j}$$其中,$A(x)$......
  • Pytorch的调试神器
    安装pipinstallipdb开始调试try:importipdbexcept:importpdbasipdb#需要开始调试的地方加入语句ipdb.set_trace()或者通过终端调试程序pyth......
  • linux:安装pytorch(python3.6.8 / pytorch 1.10.1+cu102)
    一,pytorch的官网:https://pytorch.org/如图:根据自己的需求选择版本、平台、语言环境等信息,然后运行命令即可说明:刘宏缔的架构森林是一个专注架构的博客,地址:https:/......
  • 多项式简陋入门
    多项式全家桶然而并没有多点求值,快速插值,转下降/上升幂,复合,复合逆疯狂多项式,v我50namespaceefX_poly{ constintmaxlen=(1<<23)+1,maxSqrt=1e5+1; inlineintad......
  • 【安富莱二代示波器教程】第18章 附件C---波形拟合
    完整教程下载地址:http://forum.armfly.com/forum.php?mod=viewthread&tid=45785第18章     附件C---波形拟合emWin5.44中新增的样条函数Spline可以实现波形拟合,即波形......
  • 【图像分类】 基于Pytorch的细粒度图像分类实战
    欢迎大家来到《图像分类》专栏,今天讲述基于pytorch的细粒度图像分类实战!作者&编辑|郭冰洋1简介针对传统的多类别图像分类任务,经典的CNN网络已经取得了非常优异的成绩,但......
  • 使用Pytorch进行多卡训练
    当一块GPU不够用时,我们就需要使用多卡进行并行训练。其中多卡并行可分为数据并行和模型并行。具体区别如下图所示:由于模型并行比较少用,这里只对数据并行进行记录......
  • 【项目实战课】基于Pytorch的MTCNN与Centerloss人脸识别实战
    欢迎大家来到我们的项目实战课,本期内容是《基于Pytorch的MTCNN与Centerloss人脸识别实战》。所谓项目实战课,就是以简单的原理回顾+详细的项目实战的模式,针对具体的某一个主......
  • uoj34 多项式乘法 ntt
    ​​http://www.elijahqi.win/2018/03/17/uoj34ntt/​​​这是一道模板题。给你两个多项式,请输出乘起来后的多项式。输入格式第一行两个整数nn和mm,分别表示两个多项......