从0开始学pytorch【4】--维度变换、拼接与拆分
- 学习内容:
- 维度变换:
- 张量拆分与拼接:
- 小结
学习内容:
维度变换、张量拆分与拼接
维度变换:
1、view
import torch
a = torch.rand(4,1,28,28)
print(a.shape)
print(a.view(4, 28*28))
print(a.shape)
b = a.view(4, 28, -1)
b.view(4, 28,28, -1).shape
'''
torch.Size([4, 1, 28, 28])
tensor([[0.2516, 0.4973, 0.2032, ..., 0.1892, 0.5932, 0.9167],
[0.9366, 0.3864, 0.8891, ..., 0.3008, 0.7179, 0.5442],
[0.5551, 0.8872, 0.8677, ..., 0.3314, 0.7326, 0.2640],
[0.2746, 0.3667, 0.0543, ..., 0.9942, 0.4714, 0.9417]])
torch.Size([4, 1, 28, 28])
torch.Size([4, 28, 28, 1])
'''
2、unsqueeze
插入维度(以增加中括号实现,不改变数据个数多少)
print(a.shape)
a.unsqueeze(0).shape, a.unsqueeze(-1).shape, a.unsqueeze(4).shape, a.unsqueeze(-4).shape, a.unsqueeze(-5).shape
'''
torch.Size([4, 1, 28, 28])
(torch.Size([1, 4, 1, 28, 28]),
torch.Size([4, 1, 28, 28, 1]),
torch.Size([4, 1, 28, 28, 1]),
torch.Size([4, 1, 1, 28, 28]),
torch.Size([1, 4, 1, 28, 28]))
'''
直观, 维度体现在,最外层(所有元素)(0), 最里层(每个元素)(-1)
a = torch.tensor([1.2, 2.3])
a.unsqueeze(-1), a.unsqueeze(0)
'''
(tensor([[1.2000],
[2.3000]]),
tensor([[1.2000, 2.3000]]))
'''
维度增加
bias = torch.rand(1, 32, 1, 1)
f = torch.rand(4, 32, 14, 14)
print((f+bias).shape)
bias = torch.rand(32)
bias = bias.unsqueeze(1).unsqueeze(2).unsqueeze(0)
(f+bias).shape
'''
torch.Size([4, 32, 14, 14])
torch.Size([4, 32, 14, 14])
'''
squeeze,维度删减,体现在中括号的减少,元素个数不变
b = torch.rand(1, 32, 1, 1)
print(b.squeeze().shape) # 将所有维度为1的都去掉
print(b.squeeze(0).shape)
print(b.squeeze(-1).shape)
print(b.squeeze(1).shape) # 无变化
b.squeeze(-4).shape
'''
torch.Size([32])
torch.Size([32, 1, 1])
torch.Size([1, 32, 1])
torch.Size([1, 32, 1, 1])
torch.Size([32, 1, 1])
'''
expand,逻辑上复制,实际上内存不改变
a = torch.rand(4, 32, 14, 14)
b = torch.randn(1, 32, 1, 1)
print(a.shape, b.shape)
b.expand(4, 32, 14, 14).shape, b.expand(-1, 32, -1, -1).shape, b.shape
'''
torch.Size([4, 32, 14, 14]) torch.Size([1, 32, 1, 1])
(torch.Size([4, 32, 14, 14]),
torch.Size([1, 32, 1, 1]),
torch.Size([1, 32, 1, 1]))
'''
repeat, 复制
print(b.shape, 32*32)
b.repeat(4, 32, 1, 1).shape, b.repeat(4, 1, 1,1).shape, b.repeat(4, 1, 32, 32).shape
'''
torch.Size([1, 32, 1, 1]) 1024
(torch.Size([4, 1024, 1, 1]),
torch.Size([4, 32, 1, 1]),
torch.Size([4, 32, 32, 32]))
'''
t转置
a = torch.randn(3, 4)
a.shape, a.t().shape
'''
(torch.Size([3, 4]), torch.Size([4, 3]))
'''
transpose维度交换
a = torch.rand(4, 3, 30, 32)
a.shape, a.transpose(1, 3).shape
'''
(torch.Size([4, 3, 30, 32]), torch.Size([4, 32, 30, 3]))
'''
contiguous() 对维度转换后的张量进行复制,必要步骤,否则会改变原始张量,无法作比较
a = torch.rand(4, 3, 30, 32)
a1 = a.transpose(1, 3).contiguous().view(4, 3*30*32).view(4, 3, 30, 32)
a2 = a.transpose(1, 3).contiguous().view(4, 3*30*32).view(4, 32, 30, 3).transpose(1,3)
print(a1.shape, a2.shape)
print(torch.all(torch.eq(a, a1)), torch.all(torch.eq(a, a1)).int())
print(torch.all(torch.eq(a, a2)), torch.all(torch.eq(a, a2)).int())
'''
torch.Size([4, 3, 30, 32]) torch.Size([4, 3, 30, 32])
tensor(False) tensor(0, dtype=torch.int32)
tensor(True) tensor(1, dtype=torch.int32)
'''
permute, 按照序号,直接调换各维度位置
a = torch.randn(4, 3, 28, 30)
a.shape, a.permute(3, 0, 2, 1).shape
'''
(torch.Size([4, 3, 28, 30]), torch.Size([30, 4, 28, 3]))
'''
broadcasting , 同tensorflow, 都是broadcast_to, 从最里层的维度开始对齐扩张, 从最后一个维度开始。
x = torch.tensor([1, 2, 3])
print(x, x.shape, torch.broadcast_to(x, (6, 3)))
'''
tensor([1, 2, 3]) torch.Size([3]) tensor([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
'''
torch.broadcast_tensors 返回两个值,变换后的两个值
x = torch.rand(4, 2)
bias = torch.randn(1, 2)
# torch.broadcast_tensors 返回两个值,变换后的两个值
bias1 = torch.broadcast_tensors(bias, x)
x, bias, bias1
'''
(tensor([[0.5489, 0.0084],
[0.2405, 0.0769],
[0.2159, 0.5728],
[0.2529, 0.7946]]),
tensor([[0.3781, 0.9359]]),
(tensor([[0.3781, 0.9359],
[0.3781, 0.9359],
[0.3781, 0.9359],
[0.3781, 0.9359]]),
tensor([[0.5489, 0.0084],
[0.2405, 0.0769],
[0.2159, 0.5728],
[0.2529, 0.7946]])))
'''
张量拆分与拼接:
cat拼接
a = torch.rand(4, 32, 8)
b = torch.rand(5, 32, 8)
print(torch.cat([a, b], dim=0).shape)
a1 = torch.rand(4, 3, 32, 32)
a2 = torch.rand(5, 3, 32, 32)
print(torch.cat([a1, a2], dim=0).shape)
a1 = torch.rand(4, 1, 32, 32)
a2 = torch.rand(4, 6, 32, 32)
print(torch.cat([a1, a2], dim=1).shape)
a1 = torch.rand(4, 3, 32, 32)
a2 = torch.rand(4, 3, 32, 3)
print(torch.cat([a1, a2], dim=-1).shape)
'''
torch.Size([9, 32, 8])
torch.Size([9, 3, 32, 32])
torch.Size([4, 7, 32, 32])
torch.Size([4, 3, 32, 35])
'''
stack, 新增维度,开关. 要求两个张量维度一致
a1 = torch.rand(4, 3, 16, 32)
a2 = torch.randn(4, 3, 16, 32)
print(torch.cat([a1, a2], dim=2).shape)
print(torch.stack([a1, a2], dim=2).shape)
a = torch.rand(32, 8)
b = torch.rand(32, 8)
torch.stack([a, b], dim=0).shape
'''
torch.Size([4, 3, 32, 32])
torch.Size([4, 3, 2, 16, 32])
torch.Size([2, 32, 8])
'''
split 按照维度和元素个数 切割
a = torch.rand(32, 8)
b = torch.rand(32, 8)
c = torch.stack([a, b], dim=0)
print(c.shape)
aa, bb = c.split([1, 1], dim=0)
print(aa.shape, bb.shape)
aa, bb = c.split([7, 1], dim=-1)
print(aa.shape, bb.shape)
aa, bb = c.split(1, dim=0)
print(aa.shape, bb.shape)
c = torch.cat([a, b], dim=0)
print(c.shape)
aa, bb = c.split([1,63], dim=0)
print(aa.shape, bb.shape)
'''
torch.Size([2, 32, 8])
torch.Size([1, 32, 8]) torch.Size([1, 32, 8])
torch.Size([2, 32, 7]) torch.Size([2, 32, 1])
torch.Size([1, 32, 8]) torch.Size([1, 32, 8])
torch.Size([64, 8])
torch.Size([1, 8]) torch.Size([63, 8])
'''
chunk 以及填充、对数
c = torch.randn(2, 32, 8)
print(c.shape)
aa, bb = c.chunk(2, dim=0)
print(aa.shape, bb.shape)
a = torch.full([4], 10)
print(torch.log10(a),torch.log(a),torch.log2(a))
'''
torch.Size([2, 32, 8])
torch.Size([1, 32, 8]) torch.Size([1, 32, 8])
tensor([1., 1., 1., 1.]) tensor([2.3026, 2.3026, 2.3026, 2.3026]) tensor([3.3219, 3.3219, 3.3219, 3.3219])
'''
小结