首页 > 其他分享 >minit数据集及手写BP网络及梯度求导

minit数据集及手写BP网络及梯度求导

时间:2022-10-23 21:11:35浏览次数:42  
标签:__ self labels images BP minit np 集及 def

import random
import struct

import numpy as np
import pandas as pd


def load_labels(file):
    with open(file, "rb") as f:
        data = f.read()

    magic_number, num_samples = struct.unpack(">ii", data[:8])
    if magic_number != 2049:  # 0x00000801
        print(f"magic number mismatch {magic_number} != 2049")
        return None

    labels = np.array(list(data[8:]))
    return labels


def load_images(file):
    with open(file, "rb") as f:
        data = f.read()

    magic_number, num_samples, image_width, image_height = struct.unpack(">iiii", data[:16])
    if magic_number != 2051:  # 0x00000803
        print(f"magic number mismatch {magic_number} != 2051")
        return None

    image_data = np.asarray(list(data[16:]), dtype=np.uint8).reshape(num_samples, -1)
    return image_data


def one_hot(labels, classes):
    n = len(labels)
    output = np.zeros((n, classes), dtype=np.int32)
    for row, label in enumerate(labels):
        output[row, label] = 1
    return output


class Dataset:
    def __init__(self, images, labels):
        self.images = images
        self.labels = labels

    # 获取他的一个item,  dataset = Dataset(),   dataset[index]
    def __getitem__(self, index):
        return self.images[index], self.labels[index]

    # 获取数据集的长度,个数
    def __len__(self):
        return len(self.images)


class DataLoaderIterator:
    def __init__(self, dataloader):
        self.dataloader = dataloader
        self.cursor = 0
        self.indexs = list(range(self.dataloader.count_data))  # 0, ... 60000
        if self.dataloader.shuffle:
            # 打乱一下
            random.shuffle(self.indexs)

    # 合并batch的数据
    def merge_to(self, container, b):
        if len(container) == 0:
            for index, data in enumerate(b):
                if isinstance(data, np.ndarray):
                    container.append(data)
                else:
                    container.append(np.array([data], dtype=type(data)))
        else:
            for index, data in enumerate(b):
                container[index] = np.vstack((container[index], data))
        return container

    def __next__(self):
        if self.cursor >= self.dataloader.count_data:
            raise StopIteration()

        batch_data = []
        remain = min(self.dataloader.batch_size, self.dataloader.count_data - self.cursor)  # 256, 128
        for n in range(remain):
            index = self.indexs[self.cursor]
            data = self.dataloader.dataset[index]
            batch_data = self.merge_to(batch_data, data)
            self.cursor += 1
        return batch_data


class DataLoader:

    # shuffle 打乱
    def __init__(self, dataset, batch_size, shuffle):
        self.dataset = dataset
        self.shuffle = shuffle
        self.count_data = len(dataset)
        self.batch_size = batch_size

    def __iter__(self):
        return DataLoaderIterator(self)


# val_labels = load_labels("E:/杜老师课程/dataset/t10k-labels-idx1-ubyte")  # 10000,
# val_images = load_images("E:/杜老师课程/dataset/t10k-images-idx3-ubyte")  # 10000, 784
# numdata = val_images.shape[0]  # 60000
# val_images = np.hstack((val_images / 255 - 0.5, np.ones((numdata, 1))))  # 10000, 785
# val_pd = pd.DataFrame(val_labels, columns=["label"])
class Module:
    def __init__(self, name):
        self.name = name

    def __call__(self, *args):
        return self.forward(*args)


class Initializer:
    def __init__(self, name):
        self.name = name

    def __call__(self, *args):
        return self.apply(*args)


class Parameter:
    def __init__(self, value):
        self.value = value
        self.delta = np.zeros(value.shape)

    def zero_grad(self):
        self.delta[...] = 0


class LinearLayer(Module):
    def __init__(self, input_feature, output_feature):
        super().__init__("Linear")
        self.input_feature = input_feature
        self.output_feature = output_feature
        self.weights = Parameter(np.zeros((input_feature, output_feature)))
        self.bais = Parameter(np.zeros((1, output_feature)))

        # 权重初始化
        initer = GaussInitializer(0, 1.0)
        initer.apply(self.weights.value)

    def forward(self, x):
        self.x_save = x.copy()
        return x @ self.weights.value + self.bais.value
    # AB=C G
    # dB = A.T @ G
    # dA = G @ B.T
    def backward(self, G):
        self.weights.delta = self.x_save.T @ G
        self.bais.delta[...] = np.sum(G, 0)  # 值复制
        return G @ self.weights.value.T


class GaussInitializer(Initializer):
    # where :math:`\mu` is the mean and :math:`\sigma` the standard
    # deviation. The square of the standard deviation, :math:`\sigma^2`,
    # is called the variance.
    def __init__(self, mu, sigma):
        self.mu = mu
        self.sigma = sigma

    def apply(self, value):
        value[...] = np.random.normal(self.mu, self.sigma, value.shape)


class ReLULayer(Module):
    def __init__(self, inplace=True):
        super().__init__("ReLU")
        self.inplace = inplace

    def forward(self, x):
        self.negative_position = x < 0
        if not self.inplace:
            x = x.copy()

        x[self.negative_position] = 0
        return x

    def backward(self, G):
        if not self.inplace:
            G = G.copy()

        G[self.negative_position] = 0
        return G


class SigmoidCrossEntropyLayer(Module):
    def __init__(self):
        super().__init__("CrossEntropyLoss")

    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def forward(self, x, label_onehot):
        eps = 1e-4
        self.label_onehot = label_onehot
        self.predict = self.sigmoid(x)
        self.predict = np.clip(self.predict, a_max=1 - eps, a_min=eps)  # 裁切
        self.batch_size = self.predict.shape[0]
        return 1

    def backward(self):
        return (self.predict - self.label_onehot) / self.batch_size


class SoftmaxCrossEntropyLayer(Module):
    def __init__(self):
        super().__init__("CrossEntropyLoss")

    def softmax(self, x):
        return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)

    def forward(self, x, label_onehot):
        eps = 1e-4
        self.label_onehot = label_onehot
        self.predict = self.softmax(x)
        self.predict = np.clip(self.predict, a_max=1 - eps, a_min=eps)  # 裁切
        self.batch_size = self.predict.shape[0]
        return -np.sum(label_onehot * np.log(self.predict) + (1 - label_onehot) *
                       np.log(1 - self.predict)) / self.batch_size

    def backward(self):
        return (self.predict - self.label_onehot) / self.batch_size


class Model(Module):
    def __init__(self, num_feature, num_hidden, num_classes):
        super().__init__("Model")
        self.input_to_hidden = LinearLayer(num_feature, num_hidden)
        self.relu = ReLULayer()
        self.hidden_to_output = LinearLayer(num_hidden, num_classes)

    def forward(self, x):
        x = self.input_to_hidden(x)
        x = self.relu(x)
        x = self.hidden_to_output(x)
        return x

    def backward(self, G):
        G = self.hidden_to_output.backward(G)
        G = self.relu.backward(G)
        G = self.input_to_hidden.backward(G)
        return G


class Optimizer:
    def __init__(self, name, model, lr):
        self.name = name
        self.model = model
        self.lr = lr

        layers = []
        self.params = []
        for attr in model.__dict__:
            layer = model.__dict__[attr]
            if isinstance(layer, Module):
                layers.append(layer)

        for layer in layers:
            for attr in layer.__dict__:
                layer_param = layer.__dict__[attr]
                if isinstance(layer_param, Parameter):
                    self.params.append(layer_param)

    def zero_grad(self):
        for param in self.params:
            param.zero_grad()

    def set_lr(self, lr):
        self.lr = lr


class Adam(Optimizer):
    def __init__(self, model, lr=1e-3, beta1=0.9, beta2=0.999, momentum=0.1):
        super().__init__("Adam", model, lr)
        self.momentum = momentum
        self.beta1 = beta1
        self.beta2 = beta2
        self.t = 0

        for param in self.params:
            param.m = 0
            param.v = 0

    def step(self):
        eps = 1e-8
        self.t += 1
        for param in self.params:
            g = param.delta
            param.m = self.beta1 * param.m + (1 - self.beta1) * g
            param.v = self.beta2 * param.v + (1 - self.beta2) * g ** 2
            mt_ = param.m / (1 - self.beta1 ** self.t)
            vt_ = param.v / (1 - self.beta2 ** self.t)
            param.value -= self.lr * mt_ / (np.sqrt(vt_) + eps)

def estimate_val(predict, gt_labels, classes, loss_func):
    plabel = predict.argmax(1)
    positive = plabel == val_labels
    total_images = predict.shape[0]
    accuracy = sum(positive) / total_images
    return accuracy, loss_func(predict, one_hot(gt_labels, classes))

val_labels = load_labels("E:/杜老师课程/dataset/t10k-labels-idx1-ubyte")  # 10000,
val_images = load_images("E:/杜老师课程/dataset/t10k-images-idx3-ubyte")  # 10000, 784
numdata = val_images.shape[0]  # 60000
val_images = np.hstack((val_images / 255 - 0.5, np.ones((numdata, 1))))  # 10000, 785
val_pd = pd.DataFrame(val_labels, columns=["label"])

train_labels = load_labels("E:/杜老师课程/dataset/train-labels-idx1-ubyte")  # 60000,
train_images = load_images("E:/杜老师课程/dataset/train-images-idx3-ubyte")  # 60000, 784
numdata = train_images.shape[0]  # 60000
train_images = np.hstack((train_images / 255 - 0.5, np.ones((numdata, 1))))  # 60000, 785
train_pd = pd.DataFrame(train_labels, columns=["label"])

classes = 10  # 定义10个类别
batch_size = 32  # 定义每个批次的大小
epochs = 20  # 退出策略,也就是最大把所有数据看10次
lr = 1e-3
train_data = DataLoader(Dataset(train_images,one_hot(train_labels,classes)),batch_size,shuffle=True)

model = Model(train_images.shape[1],256,classes)
loss_func = SigmoidCrossEntropyLayer()
optim = Adam(model,lr)
iters = 0

for epoch in range(epochs):
    for index,(image,label) in enumerate(train_data):
        predict = model(image)
        loss = loss_func(predict,label)
        # optim.zero_grad()
        G = loss_func.backward()
        model.backward(G)
        optim.step()
        iters += 1

        if iters % 1000 == 0:
            print(f"Iter {iters}, {epoch} / {epochs}, Loss {loss:.3f}, LR {lr:g}")
    val_accuracy, val_loss = estimate_val(model(val_images), val_labels, classes, loss_func)
    print(f"Val set, Accuracy: {val_accuracy:.3f}, Loss: {val_loss:.3f}")
View Code

 

标签:__,self,labels,images,BP,minit,np,集及,def
From: https://www.cnblogs.com/xiaoruirui/p/16819535.html

相关文章

  • webpack学习
    目录什么是webpack?什么是构建工具?为什么要用webpack?webpack的核心配置Entryoutputloaderpluginsmodewebpack打包命令小结:webpack配置信息样板基本配置信息react项目中路径......
  • vue笔记 webpack配置vue环境
       import时需要写完整的路径,用webpack之后进行模块化开发,不需要引入完整的路径。 无需写完整的路径,直接去node_modules目录下面找     vue在npmrun......
  • vue笔记 10 webpack 安装命令npm install [email protected] -g
             ......
  • BPF之巅 电子书 pdf
    作者:BrendanGregg/布伦丹·格雷格出版社:电子工业出版社副标题:洞悉Linux系统和应用性能原作名:BPFPerformanceTools::LinuxSystemandApplicationObservabil......
  • ABP(v5.13 ),替换全局异常处理过滤器
    ASP.NETBoilerplate说明:目标框架是.NETStandard2.0,包版本5.13.0全局异常处理过滤有两个,AbpExceptionFilter,AbpExceptionPageFilter是在services.AddAbp()的时候注册的......
  • webpack与rollup打包工具
    webpack与rollup打包工具jxZheng冷静&&勤思考​关注他  一、rolluprollup是一个JavaScript模块打包器,可以将小块代码编译成大块复杂的代......
  • Webpack 和 Rollup:一样但又不同
    Webpack和Rollup:一样但又不同油墨香^_^于2022-07-2909:53:16发布247收藏1分类专栏:#Webpack文章标签:webpackjavascript前端版权Webpack专栏收录该内容38......
  • 【BPC】-EPM下载及安装
    1.参考文档官方文档epm下载:HowToDownloadEPMAdd-InorAOforSAPBPC(Upgrade)|SAPBlogsepm安装:InstallingEPMAdd-inforMicrosoftOffice(standalone)|......
  • ABP vNext 批量操作
    ABP框架6.0后提供了 InsertManyAsync  UpdateManyAsync DeleteManyAsync 批量操作方法源码:publicinterfaceIBasicRepository<TEntity>:IReadOnlyBasicReposi......
  • ABP vNext 自动入住暗藏玄机
    导言我们在使用ABPvNext框架时,都知道该框架为我们实现了自动依赖注入(实现自动注入需要在项目里面创建Module类,并且将Module类上的DependsOn到相应的启动Module类或调用M......