import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
class ModulatedAttLayer(nn.Module):
# (Unchanged code)
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.mod_att = ModulatedAttLayer(in_channels=64, reduction=2, mode='embedded_gaussian')
self.fc = nn.Linear(64 * 7 * 7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x, _ = self.mod_att(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# Dummy data loader for demonstration purposes
transform = transforms.Compose([transforms.ToTensor(), transforms.Resize((224, 224))])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./data', train=True, download=True, transform=transform),
batch_size=32, shuffle=True, num_workers=4)
# Move the model and data to GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SimpleCNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
for epoch in range(5):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device) # Move data to GPU
optimizer.zero_grad()
output = model(data)
loss = nn.CrossEntropyLoss()(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item()}')
# Testing the attention mechanism
test_data, _ = next(iter(train_loader))
test_data = test_data.to(device)
test_output, attention_maps = model(test_data)
# Visualize or analyze the attention maps as needed
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# Assuming you have a simple CNN model
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.mod_att = ModulatedAttLayer(in_channels=64, reduction=2, mode='embedded_gaussian')
self.fc = nn.Linear(64 * 7 * 7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x, _ = self.mod_att(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# Dummy data loader for demonstration purposes
transform = transforms.Compose([transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./data', train=True, download=True, transform=transform),
batch_size=32, shuffle=True, num_workers=4)
# Initialize the model and optimizer
model = SimpleCNN()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
for epoch in range(5):
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = nn.CrossEntropyLoss()(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item()}')
# Testing the attention mechanism
test_data, _ = next(iter(train_loader))
test_output, attention_maps = model(test_data)
# Visualize or analyze the attention maps as needed
标签:14,nn,2023.12,self,torch,test,model,data
From: https://www.cnblogs.com/ZarkY/p/17902068.html