首页 > 其他分享 >神经网络中间层输出

神经网络中间层输出

时间:2024-11-02 18:47:09浏览次数:3  
标签:输出 kernel inplace padding stride 神经网络 中间层 True size

测试中间层输出

import torch
import numpy as np
from PIL import Image
from torchvision import transforms, models
import matplotlib.cm
from torchinfo import summary
import copy
import cv2
import matplotlib.pyplot as plt

device = 'cuda' if torch.cuda.is_available() else 'cpu'

module_name = []
p_in = []
p_out = []

# 定义hook_fn,顾名思义就是把数值从forward计算过程中“勾出”来
def hook_fn(module, inputs, outputs):
    module_name.append(module.__class__)
    print(module)
    p_in.append(inputs)
    p_out.append(outputs)


# 加载模型,注册钩子函数
model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.rpn.head.cls_logits.register_forward_hook(hook_fn) # faster_rcnn

## Retinanet
# model = models.detection.retinanet_resnet50_fpn(pretrained=True)
# model.head.classification_head.cls_logits.register_forward_hook(hook_fn)

## SSD300 VGG16
# model = models.detection.ssd300_vgg16(pretrained=True)
# model.head.classification_head.register_forward_hook(hook_fn)

# print(summary(model, (1,3,300,300), verbose=1))

# 导入一张图像
img_file = "elephant_1280p.jpg"
img = Image.open(img_file)
ori_img = img.copy()

# 前处理
transform = transforms.Compose([
    # transforms.Resize((416,416)),
    transforms.ToTensor(),
    transforms.Normalize(
        [0.485, 0.456, 0.406],
        [0.229, 0.224, 0.225]
    )
])

img = transform(img).unsqueeze(0)  # 增加batch维度

model.to(device)
img = img.to(device) # torch.Size([1, 3, 720, 1280])

# EVAL模式
model.eval()
with torch.no_grad():
    model(img)

# 特征可视化
def show_feature_map(img_src, conv_features):
    '''可视化卷积层特征图输出
    img_src:源图像文件路径
    conv_feature:得到的卷积输出,[b, c, h, w]
    '''
    img = Image.open(img_file).convert('RGB')
    height, width = img.size
    conv_features = conv_features.cpu()

    heat = conv_features.squeeze(0)#降维操作,尺寸变为(C,H,W)
    heatmap = torch.mean(heat,dim=0)#对各卷积层(C)求平均值,尺寸变为(H,W)
    # heatmap = torch.max(heat,dim=1).values.squeeze()

    heatmap = heatmap.numpy()#转换为numpy数组
    heatmap = np.maximum(heatmap, 0)
    heatmap /= np.max(heatmap)#minmax归一化处理
    heatmap = cv2.resize(heatmap,(img.size[0],img.size[1]))#变换heatmap图像尺寸,使之与原图匹配,方便后续可视化
    heatmap = np.uint8(255*heatmap)#像素值缩放至(0,255)之间,uint8类型,这也是前面需要做归一化的原因,否则像素值会溢出255(也就是8位颜色通道)
    heatmap = cv2.applyColorMap(heatmap,cv2.COLORMAP_HSV)#颜色变换
    plt.imshow(heatmap)
    plt.show()
    # heatmap = np.array(Image.fromarray(heatmap).convert('L'))
    superimg = heatmap*0.4+np.array(img)[:,:,::-1] #图像叠加,注意翻转通道,cv用的是bgr
    cv2.imwrite('./superimg.jpg',superimg)#保存结果
    # 可视化叠加至源图像的结果
    img_ = np.array(Image.open('./superimg.jpg').convert('RGB'))
    plt.imshow(img_)
    plt.show()

model_str = model.__str__()

if model_str.startswith('SSD'):
    for k in range(len(module_name)):
        for j in range(len(p_in[0][0])):
            print(p_in[k][0][j].shape)
            print(p_out[k].shape)
            show_feature_map(img_file, p_in[k][0][j])
            # show_feature_map(img_file, torch.sigmoid(p_out[k]))
            print()

if model_str.startswith('RetinaNet'): # retinanet
    for k in range(len(module_name)):# 不同尺寸的特征图
        print(p_in[k][0].shape)
        print(p_out[k].shape)
        # show_feature_map(img_file, p_in[k][j])
        show_feature_map(img_file, torch.sigmoid(p_out[k]))
        print()

if model_str.startswith('FasterRCNN'): # FasterRCNN
    for k in range(len(module_name)):
        print(p_in[k][0].shape)
        print(p_out[k].shape)
        # show_feature_map(img_file, p_in[k][0])
        show_feature_map(img_file, torch.sigmoid(p_out[k]))
        print()

print(summary(model, (1,3,300,300), verbose=1))

测试hook

import torch
import torch.nn as nn
import torch.nn.functional as F
 
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16*5*5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
 
    def forward(self, x):
        out = F.relu(self.conv1(x))     #1 
        out = F.max_pool2d(out, 2)      #2
        out = F.relu(self.conv2(out))   #3
        out = F.max_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out
 
features = []
def hook(module, input, output): 
    # module: model.conv2 
    # input :in forward function  [#2]
    # output:is  [#3 self.conv2(out)]
    features.append(output.clone().detach())
    # output is saved in a list 
 
 
net = LeNet() ## 模型实例化 
print(net)

x = torch.randn(2, 3, 32, 32) ## input 
handle = net.conv2.register_forward_hook(hook) ## 获取整个Lenet模型 conv2的中间结果
y = net(x)  ## 获取的是 关于 input x 的 conv2 结果 
 
print(features[0].size()) # 即 [#3 self.conv2(out)]
handle.remove() ## hook删除


"""
import torch 
from torch.autograd import Variable 
 
 
def print_grad(grad):
    print('grad is \n',grad)
 
x = Variable(torch.randn(2, 1), requires_grad=True)
## x = torch.rand(2,1,requires_grad=True) #  等效
print('x value is \n',x)
y = x+3
print('y value is \n',y)
z = torch.mean(torch.pow(y, 1/2))
lr = 1e-3
 
y.register_hook(print_grad) 

z.backward() # 梯度求解

print(x.grad.data)

x.data -= lr*x.grad.data
print('new x is\n',x)
"""

测试net

import torch.nn as nn
 
class AlexNet(nn.Module):
    def __init__(self, num_classes=1000, init_weights=False):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2),  # input[3, 224, 224]  output[48, 55, 55]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[48, 27, 27]
            nn.Conv2d(48, 128, kernel_size=5, padding=2),           # output[128, 27, 27]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[128, 13, 13]
            nn.Conv2d(128, 192, kernel_size=3, padding=1),          # output[192, 13, 13]
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 192, kernel_size=3, padding=1),          # output[192, 13, 13]
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 128, kernel_size=3, padding=1),          # output[128, 13, 13]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[128, 6, 6]
        )
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(128 * 6 * 6, 2048),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(2048, 2048),
            nn.ReLU(inplace=True),
            nn.Linear(2048, num_classes),
        )
        if init_weights:
            self._initialize_weights()
 
    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x
 
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)
 
if __name__ == '__main__':
    model = AlexNet()

    print('model children: ')
    for module in model.children():
        print(module)
    print("\n")

    print('model modules: ')
    for module in model.modules():
        print(module)
    print("\n")
 
    print('model named children: ')
    for name, module in model.named_children():
        print('name: {}, module: {}'.format(name, module))
    print("\n")
    
    print('model named modules: ')
    for name, module in  model.named_modules():
        print('name: {}, module: {}'.format(name, module))
    print("\n")

    print('parameters: ')
    for parameter in model.parameters():
        print('parameter: {}'.format(parameter.shape))
    print("\n")
 
    print('model named parameters: ')
    for name, parameter in model.named_parameters():
        print('name: {}, parameter: {}'.format(name, parameter.shape))



"""
model children: 
Sequential(
  (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
  (1): ReLU(inplace=True)
  (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
  (4): ReLU(inplace=True)
  (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (7): ReLU(inplace=True)
  (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (9): ReLU(inplace=True)
  (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace=True)
  (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=4608, out_features=2048, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=2048, out_features=2048, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=2048, out_features=1000, bias=True)
)

model modules: 
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=4608, out_features=2048, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=2048, out_features=2048, bias=True)
    (5): ReLU(inplace=True)
    (6): Linear(in_features=2048, out_features=1000, bias=True)
  )
)
Sequential(
  (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
  (1): ReLU(inplace=True)
  (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
  (4): ReLU(inplace=True)
  (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (7): ReLU(inplace=True)
  (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (9): ReLU(inplace=True)
  (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace=True)
  (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
ReLU(inplace=True)
MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
ReLU(inplace=True)
MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
ReLU(inplace=True)
Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
ReLU(inplace=True)
Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
ReLU(inplace=True)
MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=4608, out_features=2048, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=2048, out_features=2048, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=2048, out_features=1000, bias=True)
)
Dropout(p=0.5, inplace=False)
Linear(in_features=4608, out_features=2048, bias=True)
ReLU(inplace=True)
Dropout(p=0.5, inplace=False)
Linear(in_features=2048, out_features=2048, bias=True)
ReLU(inplace=True)
Linear(in_features=2048, out_features=1000, bias=True)

model named children: 
name: features, module: Sequential(
  (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
  (1): ReLU(inplace=True)
  (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
  (4): ReLU(inplace=True)
  (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (7): ReLU(inplace=True)
  (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (9): ReLU(inplace=True)
  (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace=True)
  (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
name: classifier, module: Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=4608, out_features=2048, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=2048, out_features=2048, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=2048, out_features=1000, bias=True)
)

model named modules: 
name: , module: AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=4608, out_features=2048, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=2048, out_features=2048, bias=True)
    (5): ReLU(inplace=True)
    (6): Linear(in_features=2048, out_features=1000, bias=True)
  )
)
name: features, module: Sequential(
  (0): Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
  (1): ReLU(inplace=True)
  (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (3): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
  (4): ReLU(inplace=True)
  (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  (6): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (7): ReLU(inplace=True)
  (8): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (9): ReLU(inplace=True)
  (10): Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace=True)
  (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
name: features.0, module: Conv2d(3, 48, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
name: features.1, module: ReLU(inplace=True)
name: features.2, module: MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
name: features.3, module: Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
name: features.4, module: ReLU(inplace=True)
name: features.5, module: MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
name: features.6, module: Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
name: features.7, module: ReLU(inplace=True)
name: features.8, module: Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
name: features.9, module: ReLU(inplace=True)
name: features.10, module: Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
name: features.11, module: ReLU(inplace=True)
name: features.12, module: MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
name: classifier, module: Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=4608, out_features=2048, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=2048, out_features=2048, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=2048, out_features=1000, bias=True)
)
name: classifier.0, module: Dropout(p=0.5, inplace=False)
name: classifier.1, module: Linear(in_features=4608, out_features=2048, bias=True)
name: classifier.2, module: ReLU(inplace=True)
name: classifier.3, module: Dropout(p=0.5, inplace=False)
name: classifier.4, module: Linear(in_features=2048, out_features=2048, bias=True)
name: classifier.5, module: ReLU(inplace=True)
name: classifier.6, module: Linear(in_features=2048, out_features=1000, bias=True)

parameters: 
parameter: torch.Size([48, 3, 11, 11])
parameter: torch.Size([48])
parameter: torch.Size([128, 48, 5, 5])
parameter: torch.Size([128])
parameter: torch.Size([192, 128, 3, 3])
parameter: torch.Size([192])
parameter: torch.Size([192, 192, 3, 3])
parameter: torch.Size([192])
parameter: torch.Size([128, 192, 3, 3])
parameter: torch.Size([128])
parameter: torch.Size([2048, 4608])
parameter: torch.Size([2048])
parameter: torch.Size([2048, 2048])
parameter: torch.Size([2048])
parameter: torch.Size([1000, 2048])
parameter: torch.Size([1000])

model named parameters: 
name: features.0.weight, parameter: torch.Size([48, 3, 11, 11])
name: features.0.bias, parameter: torch.Size([48])
name: features.3.weight, parameter: torch.Size([128, 48, 5, 5])
name: features.3.bias, parameter: torch.Size([128])
name: features.6.weight, parameter: torch.Size([192, 128, 3, 3])
name: features.6.bias, parameter: torch.Size([192])
name: features.8.weight, parameter: torch.Size([192, 192, 3, 3])
name: features.8.bias, parameter: torch.Size([192])
name: features.10.weight, parameter: torch.Size([128, 192, 3, 3])
name: features.10.bias, parameter: torch.Size([128])
name: classifier.1.weight, parameter: torch.Size([2048, 4608])
name: classifier.1.bias, parameter: torch.Size([2048])
name: classifier.4.weight, parameter: torch.Size([2048, 2048])
name: classifier.4.bias, parameter: torch.Size([2048])
name: classifier.6.weight, parameter: torch.Size([1000, 2048])
name: classifier.6.bias, parameter: torch.Size([1000])
"""

标签:输出,kernel,inplace,padding,stride,神经网络,中间层,True,size
From: https://www.cnblogs.com/sgqmax/p/18522317

相关文章

  • 常用的神经网络实现
    VGG16fromtorchimportnnclassVGG(nn.Module):"""一共6个版本,最常用VGG16VGG采用五组卷积,三个全连接,最后用Softmax分类VGG显著特点:每次经过池化层maxpool后特征图尺寸减小一倍,,通道数增加一倍(最后一个池化层除外)"""def__init__(self,num_class......
  • 神经网络工具nn
    实现神经网络torch将张量转换为torch.cuda.TensorFloat并在GPU上进行计算torch.autograd构建计算图并自动获取梯度torch.nn具有共享层和损失函数的神经网络库torch.optim通用优化算法神经网络基本结构网络层:神经网络的基本模型网络模型:层构成的网络损失函数:参数学习的......
  • ResNet 残差神经网络
    文章目录一、什么是ResNet?二、残差结构(ResidualStructure)三、BatchNormalization(BN----批归一化)一、什么是ResNet?ResNet网络是在2015年由微软实验室中的何凯明等几位大神提出,斩获当年ImageNet竞赛中分类任务第一名,目标检测第一名。获得COCO数据集中目标检测第......
  • 机器学习入门基础----白板推导笔记输出
    为了能够建立知识学习后输出体系,开设这个系列,旨在通过记录博客输出学习到的机器学习内容,笔者所学为B站upshuhuai008白板推导系列,记录可能比不上原创,也可能有没理解不严谨的地方,请善意指正。感兴趣的可以去看UP白板-------------------------------------------------------------......
  • 适用FPGA的小型神经网络:加速边缘智能的新篇章
    在人工智能(AI)技术日新月异的今天,神经网络作为其核心驱动力,正逐步渗透到各个行业与领域。然而,传统的神经网络模型往往受限于计算资源和功耗,难以在边缘设备上实现高效运行。现场可编程门阵列(FPGA)作为一种高性能、低功耗的硬件加速器,为小型神经网络的部署提供了理想的平台。本文将深......
  • 7-2 输出闰年 分数 10
    输出21世纪中截止某个年份以来的所有闰年年份。注意:闰年的判别条件是该年年份能被4整除但不能被100整除、或者能被400整除。输入格式:输入在一行中给出21世纪的某个截止年份。输出格式:逐行输出满足条件的所有闰年年份,即每个年份占一行。输入若非21世纪的年份则输出"Invalidy......
  • STM32(4)输出比较和输入捕获
    OC输出比较OC(OutputCompare)输出比较输出比较可以通过比较CNT与CCR寄存器值的关系,来对输出电平进行置1、置0或翻转的操作,用于输出一定频率和占空比的PWM波形每个高级定时器和通用定时器都拥有4个输出比较通道高级定时器的前3个通道额外拥有死区生成和互补输出的功能 输......
  • 功率因数对输出电压纹波的影响
    功率因数对输出电压纹波的影响低功率因数的输入电源会增加电网的负担,同时也会影响DC-DC变换器的性能。低功率因数会导致输入电流与输入电压之间的相位差增大,从而增加DC-DC变换器的损耗。这些损耗会转化为热量,从而影响DC-DC变换器的工作温度。如果工作温度过高,DC-DC变换器......
  • 模拟大模型训练时,单双精度输出不一致?从而加剧幻觉?或导致幻觉?
        下面是Python代码。就同样的随机数据,分别在单精度、双精度下做模拟训练与预测,最后比较它们预测的值,发现不一致。    大家看看,代码是否有问题?importnumpyasnpimporttensorflowastffromtensorflow.keras.layersimportDense,LSTMfromtensorfl......
  • 项目实战:Qt+OpenCV仿射变换工具v1.1.0(支持打开图片、输出棋盘角点、调整偏移点、导出
    需求  1.打开图片;  2.矫正识别角点;  3.opencv摄像头操作子线程处理;  4.支持设置棋盘格的行列角点数; 背景  深入研究图像拼接细分支算法,产出的效果查看工具,验证算法单步思路。 相关博客  《项目实战:Qt+Opencv相机标定工具v1.3.0(支持打开摄像头、视......