首页 > 其他分享 >深度学习-->线性回归模型

深度学习-->线性回归模型

时间:2023-07-28 17:24:21浏览次数:36  
标签:loss -- train nd label num 深度 线性 data

# 线性回归

# 创建数据集
from mxnet import ndarray as nd
from mxnet import autograd as ad

num_input = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 4.2

x = nd.random_normal(shape=(num_examples, num_input))
y = true_w[0] * x[:, 0] + true_w[1] * x[:, 1] + true_b
y += .01 * nd.random_normal(shape=y.shape)
print(x[0:10], y[0:10])
# 数据读取
import random

batch_size = 10


def data_iter():
    idx = list(range(num_examples))  # 随机打乱样例
    random.shuffle(idx)  # 打乱
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(x, j), nd.take(y, j)  # 返回这两个数,对应下面的data,label,每次在在十个里面拿一个样例和一个结果


for data, label in data_iter():
    print(data, label)
# 查看要取多少次
n = 0
for data, label in data_iter():
    n = n + 1
print(n)  # 要取100次,因为有1000个样例,每次取10个

# 初始化模型参数
w = nd.random_normal(shape=(num_input, 1))
b = nd.zeros((1,))
params = [w, b]
# 之后需要不断地更新w,b,更新模型
for param in params:
    param.attach_grad()


# 定义模型
def net(x):
    return nd.dot(x, w) + b


print(net(data))


# 损失函数,用来衡量预测值与真实值的差距
# 一般使用平方误差来衡量
def square_loss(yhat, y):
    return (yhat - y.reshape(yhat.shape)) ** 2  # 加入reshape的作用是防止yhat是行,而y是列这种错误,用reshape进行广播纠错改正


# 优化,sgd函数使用随机梯度下降的算法,最终的目标就是让loss尽可能地小
def sgd(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


# 训练模型
epochs = 5  # 表示为我要对数据扫5遍
learning_rate = 0.001
for e in range(epochs):
    total_loss = 0
    for data, label in data_iter():
        with ad.record():
            output = net(data)
            loss = square_loss(output, label)

        loss.backward()
        sgd(params, learning_rate)  # 优化误差
        total_loss += nd.sum(loss).asscalar()  # 求和,asscalar的作用是让最终值成为浮点数

    print("Epoch %d,average loss: %f" % (e, total_loss / num_examples))#打印出来每一步的平均误差

print(true_w, w)#打印出最终训练之后的w和b与真实的w和b的差距
print(true_b, b)

 更为简便的(使用gluon函数):

from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon

num_input = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 4.2

x = nd.random_normal(shape=(num_examples, num_input))
y = true_w[0] * x[:, 0] + true_w[1] * x[:, 1] + true_b
y += 0.01 * nd.random_normal(shape=y.shape)

# 数据读取

batch_size = 10
dataset = gluon.data.ArrayDataset(x, y)
data_iter = gluon.data.DataLoader(dataset, batch_size, shuffle=True)

for data, label in data_iter:
    print(data, label)
    break

# 定义模型
net = gluon.nn.Sequential()  # Sequential是一个容器,具有很多层,一开始定义为空
net.add(gluon.nn.Dense(1))
print(f"net:", net)

# 初始化模型
net.initialize()
# 损失函数
square_loss = gluon.loss.L2Loss()
# 优化
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
# 训练
epochs = 5
for e in range(epochs):
    total_loss = 0
    for data, label in data_iter:
        with autograd.record():
            output = net(data)
            loss = square_loss(output, label)
        loss.backward()
        trainer.step(batch_size)
        total_loss += nd.sum(loss).asscalar()
    print("Epoch %d,average loss: %f" % (e, total_loss / num_examples))

dense = net[0]
print(true_w, dense.weight.data())
print(true_b, dense.bias.data())

 图片识别算法 :

 

# 多类->线性回归
from mxnet import gluon
from mxnet import ndarray as nd
import matplotlib.pyplot as plt


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape:", data.shape, 'label', label)  # 3d,长宽高


# 打印图片
def show_images(images):
    n = images.shape[0]
    _, figs = plt.subplots(1, n, figsize=(15, 15))
    for i in range(n):
        figs[i].imshow(images[i].reshape((28, 28)).asnumpy())
        figs[i].axes.get_xaxis().set_visible(False)
        figs[i].axes.get_yaxis().set_visible(False)
    plt.show()


def get_text_label(label):
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in label]


data, label = mnist_train[0:9]
show_images(data)
print(get_text_label(label))

# 数据读取
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)

# 初始化模型
# 跟线性回归中的例子一样,我们将使用向量表示每个样本。已知每个样本输入是高和宽均为28像素的图像。模型的输入向量的长度是28×28=784:
# 该向量的每个元素对应图像中每个像素。由于图像有10个类别,单层神经网络输出层的输出个数为10.
# 因此softmax回归的权重和偏差参数分别为784×10和1×10的矩阵。
num_input = 784
num_output = 10

w = nd.random_normal(shape=(num_input, num_output))
b = nd.random_normal(shape=num_output)

params = [w, b]
for param in params:
    param.attach_grad()
# 定义模型

from mxnet import nd


# 实现softmax运算
# 我们先描述一下对如何对多维NDArray按维度操作。
# 给定一个NDArray矩阵X。我们可以只对其中同一列(axis=0)或同一行(axis=1)的元素求和,并在结果中保留行和列这两个维度(keepdims=True)。
# 矩阵X的行数是样本数,列数是输出个数。为了表达样本预测各个输出的概率,softmax运算会先通过exp函数对每个元素做指数运算,
# 再对exp矩阵同行元素求和,最后令矩阵每行各元素与该行元素之和相除。这样一来,最终得到的矩阵每行元素和为1且非负。
# 因此,该矩阵每行都是合法的概率分布。softmax运算的输出矩阵中的任意一行元素代表了一个样本在各个输出类别上的预测概率。
def softmax(x):  # 计算概率,首先概率需要为正,且加起来等于1
    exp = nd.exp(x)  # 绝对值函数
    partition = exp.sum(axis=1, keepdims=True)  # 自动使他们转化为合是1
    return exp / partition


x = nd.random_normal(shape=(2, 5))
x_prob = softmax(x)

print(x)
print(x_prob)
print(x_prob.sum(axis=1))


def net(x):
    return softmax(nd.dot(x.reshape((-1, num_input)), w) + b)  # 把数据输入归一,然后利用y=wx+b


# 优化损失
def cross_entropy(yhat, y):
    return -nd.pick(nd.log(yhat), y)


# 计算精度
def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()  # 将概率最高的拿出来,然后与数据对比查看是否正确


# argmax是取最大的,mean对矩阵求均值


def evaluate_accuracy(data_iterator, net):
    acc = 0.
    for data, label in data_iterator:
        output = net(data)  # 取出输出值
        acc += accuracy(output, label)  # 求和
    return acc / len(data_iterator)  # 求平均


evaluate_accuracy(test_data, net)
print(evaluate_accuracy(test_data, net))

# 训练模型

import sys

sys.path.append('..')
from mxnet import autograd


# 优化函数
def sgd(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


learning_rate = .1

for epoch in range(10):
    train_acc = 0.
    train_loss = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = cross_entropy(output, label)
        loss.backward()
        sgd(params, learning_rate / batch_size)
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)
    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc / len(train_data)))

# 最后进行图片识别

data, label = mnist_test[0:9]
show_images(data)
print('true labels')
print(get_text_label(label))

predicted_labels = net(data).argmax(axis=1)
print('predicted labels')
print(get_text_label(predicted_labels.asnumpy()))

 

# 多层感知机

# 获取数据
import d2lzh
from mxnet import autograd

batch_size = 256
train_data, test_data = d2lzh.load_data_fashion_mnist(batch_size)

# 读入数据
from mxnet import ndarray as nd

num_input = 28 * 28
num_output = 10

num_hidden = 256  # 中间多加了一层
weight_scale = .01

w1 = nd.random_normal(shape=(num_input, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)

w2 = nd.random_normal(shape=(num_hidden, num_output), scale=weight_scale)
b2 = nd.zeros(num_output)

params = [w1, b1, w2, b2]

for param in params:
    param.attach_grad()


# 激活函数

def relu(x):  # 如果x大于0就返回x,否则返回0
    return nd.maximum(x, 0)


# 定义模型

def net(x):
    x = x.reshape((-1, num_input))
    h1 = relu(nd.dot(x, w1) + b1)
    output = nd.dot(h1, w2) + b2
    return output


# softmax 与 损失熵函数

from mxnet import gluon

softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()


def accuracy(y_hat, y):
    return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()


# 训练

learning_rate = .5

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)

        loss.backward()
        d2lzh.sgd(params, learning_rate, batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = d2lzh.evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc / len(train_data)))

 

标签:loss,--,train,nd,label,num,深度,线性,data
From: https://www.cnblogs.com/o-Sakurajimamai-o/p/17582378.html

相关文章

  • 腾讯云 CODING 成为首批 TISC 企业级平台工程综合能力要求标准贡献单位
    点击链接了解详情今天,由中国信通院主办的“2023数字生态发展大会”暨中国信通院“铸基计划”年中会议在京召开。本次大会全面总结了“铸基计划”上半年度工作成果,帮助行业解析数字化转型发展趋势,链接供给侧和需求侧企业,以期推动我国数字生态高质量发展。在会上,腾讯云CODING......
  • CTFer成长记录——CTF之Web专题·bugku—备份是个好习惯
    一、题目链接https://ctf.bugku.com/challenges/detail/id/83.html?id=83二、解法步骤  打开网站,发现是一串字符串:    解码:[空密码]/[EmptyString],无结果。题目提示“备份是个好习惯”,那么尝试访问index.php.bak和index.php.swp,这两个文件,看看存不存在。于是在index.......
  • 1
      ......
  • 多用户登录,提取token,传入下一个请求
     1.添加默认元件 2.添加临界部分控制器(有执行的顺序,先登录,后请求) 3.登录了提取token 4.下个接口加上token作为头管理器  配置持续时间: ......
  • 鸟哥Linux私房菜学习记录day4
    第九章vim程序编辑器简易执行范例替换 :n1,n2s/word1/word2/g   :1,$s/word1/word2/g(c)(确认)删除:x向后删除一个字符,X向前删除一个字符,nx向后连续删n个字符(n)dd删除(剪切)光标所在的那一行nyy复制光标所在的那n行nG:移动到第n行u恢复前一个操作ctrl+r重做上一个动作.......
  • CKEditor上传图片word
    ​ 1.编辑器修改(可选)1.1在 ueditor/config.json 中添加代码块    /* 上传word配置 */    "wordActionName":"wordupload",/* 执行上传视频的action名称 */    "wordFieldName":"upfile",/* 提交的视频表单名称 */    "wordPathFormat":"/p......
  • 深度学习刷SOTA的trick
    作者:GordonLeehttps://www.zhihu.com/question/540433389/answer/2549775065 1.R-Drop:两次前向+KLloss约束2.MLM:在领域语料上用mlm进一步预训练(Post-training)3.EFL:少样本下,把分类问题转为匹配问题,把输入构造为NSP任务形式.4.混合精度fp16:加快训练速度,提高训练......
  • 使用Go调用Powershell加域
    packagemainimport( "fmt" "github.com/go-ldap/ldap/v3" "github.com/mozillazg/go-pinyin" "os" "os/exec" "strings" "time")funcChineseToAbbreviation(chinesestring)string{......
  • 电子病历归档管理系统标准化实施
     总体设计   01404......
  • 335. Self Crossing (Hard)
    Description335.SelfCrossing(Hard)Youaregivenanarrayofintegersdistance.Youstartatthepoint(0,0)onanX-Yplane,andyoumovedistance[0]meterstothenorth,thendistance[1]meterstothewest,distance[2]meterstothesouth,distance[......