点击查看代码
import math
import torch
from torch import nn
from d2l import torch as d2l
# 掩蔽softmax操作
#@save
def masked_softmax(X, valid_lens):
"""通过在最后一个轴上掩蔽元素来执行softmax操作"""
# X:3D张量,valid_lens:1D或2D张量
if valid_lens is None:
# dim=-1 最后一维
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
# print('shape', shape)
if valid_lens.dim() == 1:
# 沿着指定的维度重复张量的元素
# """
# torch.repeat_interleave(input, repeats, dim=None) → Tensor
# 1)input (类型:torch.Tensor):输入张量
# 2)repeats(类型:int或torch.Tensor):每个元素的重复次数
# 3)dim(类型:int)需要重复的维度。默认情况下dim=None,表示将把给定的输入张量展平(flatten)为向量,
# 然后将每个元素重复repeats次,并返回重复后的张量。
# """
# print('valid_lens0', valid_lens)
valid_lens = torch.repeat_interleave(valid_lens, shape[1])
# print('valid_lens1', valid_lens)
else:
# 不分行列,改成一串
# print('valid_lens0', valid_lens)
valid_lens = valid_lens.reshape(-1)
# print('valid_lens1', valid_lens)
# value=-1e6
# 最后一轴上被掩蔽的元素使用一个非常大的负值替换,从而其softmax输出为0
X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6)
# """
# X = torch.tensor([[1, 2, 3], [4, 5, 6]])
# print('sequence_mask(X, torch.tensor([1, 2]))',
# sequence_mask(X, torch.tensor([1, 2])))
#
# sequence_mask(X, torch.tensor([1, 2]))
# tensor([[1, 0, 0],
# [4, 5, 0]])
# """
return nn.functional.softmax(X.reshape(shape), dim=-1)
# 函数是如何工作
# print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3])))
# print(masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]])))
"""
shape torch.Size([2, 2, 4])
valid_lens0 tensor([2, 3])
valid_lens1 tensor([2, 2, 3, 3])
tensor([[[0.3190, 0.6810, 0.0000, 0.0000],
[0.4617, 0.5383, 0.0000, 0.0000]],
[[0.2563, 0.3015, 0.4423, 0.0000],
[0.3518, 0.3026, 0.3456, 0.0000]]])
shape torch.Size([2, 2, 4])
valid_lens0 tensor([[1, 3],
[2, 4]])
valid_lens1 tensor([1, 3, 2, 4])
tensor([[[1.0000, 0.0000, 0.0000, 0.0000],
[0.2313, 0.3962, 0.3725, 0.0000]],
[[0.3576, 0.6424, 0.0000, 0.0000],
[0.2277, 0.2360, 0.3160, 0.2203]]])
"""
# 加性注意力
#@save
class AdditiveAttention(nn.Module):
"""加性注意力"""
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
self.w_v = nn.Linear(num_hiddens, 1, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
# valid_lens 多少对keys-values是需要的
queries, keys = self.W_q(queries), self.W_k(keys)
# print('queries.shape', queries.shape)
# print('keys.shape', keys.shape)
# '''
# (batch_size, num_query, num_hidden)
# queries.shape torch.Size([2, 1, 8])
# (batch_size, num_key, num_hidden)
# keys.shape torch.Size([2, 10, 8])
# '''
# 在维度扩展后,
# queries的形状:(batch_size,查询的个数,1, num_hidden)
# key的形状: (batch_size,1, “键-值”对的个数,num_hiddens)
# features的形状:(batch_size,查询的个数,“键-值”对的个数,num_hiddens)
# 使用广播方式进行求和
# print('queries.unsqueeze(2)', queries.unsqueeze(2).shape)
# print('keys.unsqueeze(1)', keys.unsqueeze(1).shape)
# """
# queries.unsqueeze(2) torch.Size([2, 1, 1, 8])
# keys.unsqueeze(1) torch.Size([2, 1, 10, 8])
# """
features = queries.unsqueeze(2) + keys.unsqueeze(1)
# print('features.shape', features.shape)
# """
# features.shape torch.Size([2, 1, 10, 8])
# """
features = torch.tanh(features)
# self.w_v仅有一个输出,因此从形状中移除最后那个维度。
# scores的形状:(batch_size,查询的个数,“键-值”对的个数)
# print('self.w_v(features).shape', self.w_v(features).shape)
# """
# self.w_v(features).shape torch.Size([2, 1, 10, 1])
# """
scores = self.w_v(features).squeeze(-1)
# print('scores.shape', scores.shape)
# """
# scores.shape torch.Size([2, 1, 10])
# """
# 过滤
self.attention_weights = masked_softmax(scores, valid_lens)
# print('self.attention_weights.shape', self.attention_weights.shape)
# (batch_size,查询的个数,“键-值”对的个数)
# """self.attention_weights.shape torch.Size([2, 1, 10])"""
# values的形状:(batch_size,“键-值”对的个数,值的维度)
# print('values.shape', values.shape)
# """values.shape torch.Size([2, 10, 4])"""
# print('torch.bmm(self.dropout(self.attention_weights), values).shape',
# torch.bmm(self.dropout(self.attention_weights), values).shape)
# """torch.bmm(self.dropout(self.attention_weights), values).shape torch.Size([2, 1, 4])"""
# (batch_size, num_query, value_size)
return torch.bmm(self.dropout(self.attention_weights), values)
# 演示上面的AdditiveAttention类
# (2, 1, 20) -> (batch_size, num_query, query_size)
# (2, 10, 2) -> (batch_size, num_key, key_size)
queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))
# values的小批量,两个值矩阵是相同的
# (2, 10, 4) -> (batch_size, num_value, value_size)
values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(
2, 1, 1)
valid_lens = torch.tensor([2, 6])
attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8,
dropout=0.1)
attention.eval()
print(attention(queries, keys, values, valid_lens))
print('attention(queries, keys, values, valid_lens).shape',
attention(queries, keys, values, valid_lens).shape)
"""
tensor([[[ 2.0000, 3.0000, 4.0000, 5.0000]],
[[10.0000, 11.0000, 12.0000, 13.0000]]], grad_fn=<BmmBackward0>)
(batch_size, num_query, value_size)
attention(queries, keys, values, valid_lens).shape torch.Size([2, 1, 4])
"""
# 注意力权重
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
# d2l.plt.show()
# 缩放点积注意力
#@save
class DotProductAttention(nn.Module):
"""缩放点积注意力"""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# 查询和键的长度为 d num_query = num_keys
# queries的形状:(batch_size,查询的个数,size_query = d)
# keys的形状:(batch_size,“键-值”对的个数,size_keys = d)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
# valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# 设置transpose_b=True为了交换keys的最后两个维度
scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
return torch.bmm(self.dropout(self.attention_weights), values)
# 演示上述的DotProductAttention类
# (batch_size, num_query, query_size)
queries = torch.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.eval()
print(attention(queries, keys, values, valid_lens))
# 均匀的注意力权重
d2l.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries')
# d2l.plt.show()