pytorch–Matrix相关
1.矩阵生成
Tensor,即张量,是PyTorch中的基本操作对象,可以看做是包含单一数据类型元素的多维矩阵。从使用角度来看,Tensor与NumPy的ndarrays非常类似,相互之间也可以自由转换,只不过Tensor还支持GPU的加速。
1.1创建一个没有初始化的矩阵
x = torch.empty(2, 2)
print(x)
输出:
tensor([[-1.5151e-28, 1.8553e-42],
[ 0.0000e+00, 0.0000e+00]])
1.2随机初始化的矩阵
x = torch.randn(2, 2)
输出:
tensor([[-0.0755, 0.3629],
[-0.2084, 0.4353]]) #符合正态分布
1.3创建零矩阵
x = torch.zeros((2, 2), dtype=int) #dtype设置数据类型
输出;
tensor([[0, 0],
[0, 0]])
1.4通过数据直接初始化
x = torch.tensor([2.5,3.5])
1.5设定范围初始化
x = torch.range(1,10,2) #三个参数分别为起始值,结束值,步长
输出:
tensor([1., 3., 5., 7., 9.]) #数据类型为float
2.tensor的运算
2.1绝对值
x = torch.randn(2, 3).cuda(0) #随机生成,使用gpu计算
print(x)
b = torch.abs(x)
print(b)
输出:
tensor([[ 0.4277, 0.6996, -0.4395],
[ 1.4346, -0.5341, -0.6967]], device=‘cuda:0’)
tensor([[0.4277, 0.6996, 0.4395],
[1.4346, 0.5341, 0.6967]], device=‘cuda:0’)
2.2矩阵相加
a = torch.randn(2, 3).cuda(0)
print(a)
b = torch.randn(2, 3).cuda(0)
print(b)
c = torch.add(a, b)
print©
输出:
tensor([[ 0.1433, 1.0482, 0.0479],
[-0.4808, 0.1263, -3.5381]], device=‘cuda:0’)
tensor([[ 0.2173, -1.4631, -1.9016],
[ 1.1502, -1.1447, 1.1478]], device=‘cuda:0’)
tensor([[ 0.3605, -0.4149, -1.8537],
[ 0.6693, -1.0184, -2.3903]], device=‘cuda:0’)
e = torch.add(d,10) #矩阵和标量相加
输出:
tensor([[10.3605, 9.5851, 8.1463],
[10.6693, 8.9816, 7.6097]], device=‘cuda:0’)
2.3裁剪
a = torch.randn(2, 3).cuda(0)
print(a)
b = torch.clamp(a,-0.1,0.1)#后两参数为上下边界,范围之外的,用边界替换
print(b)
tensor([[ 5.3171e-01, -1.5754e+00, 1.8492e-03],
[-7.7089e-01, -6.3851e-02, 1.2988e-03]], device=‘cuda:0’)
tensor([[ 0.1000, -0.1000, 0.0018],
[-0.1000, -0.0639, 0.0013]], device=‘cuda:0’)
2.4矩阵相除
a = torch.randn(2, 3).cuda(0)
print(a)
b = torch.randn(2, 3).cuda(0)
print(b)
c = torch.div(a, b)#对应元素相除
print©
输出:
tensor([[ 0.0519, -0.6156, -0.2791],
[ 0.0302, 0.5509, 0.6499]], device=‘cuda:0’)
tensor([[-0.4269, 1.1787, -0.4143],
[-0.0453, 0.9825, 0.4282]], device=‘cuda:0’)
tensor([[-0.1216, -0.5223, 0.6737],
[-0.6654, 0.5608, 1.5177]], device=‘cuda:0’)
2.4求幂
a = torch.randn(2, 2).cuda(0)
print(a)
b = torch.pow(a, 2)#求平方
print(b)
输出:
tensor([[ 0.1320, -0.8886],
[-1.0319, 0.2598]], device=‘cuda:0’)
tensor([[0.0174, 0.7896],
[1.0648, 0.0675]], device=‘cuda:0’)
2.5矩阵乘法
a = torch.randn(2, 2).cuda(0)
print(a)
b = torch.randn(2, 2).cuda(0)
print(b)
c = torch.mm(a, b.T)#a和b的转置相乘
print©
输出:
tensor([[ 0.9191, 0.7436],
[-0.3451, 1.2992]], device=‘cuda:0’)
tensor([[-0.2323, 2.1104],
[-0.7523, 0.5111]], device=‘cuda:0’)
tensor([[ 1.3559, -0.3113],
[ 2.8219, 0.9236]], device=‘cuda:0’)
2.6矩阵和列向量相乘
a = torch.randn(2, 3).cuda(0)
print(a)
b = torch.randn(3).cuda(0)
print(b)
c = torch.mv(a, b)#相当于a乘上b的转置
print©
输出:
tensor([[-1.7464, 0.0637, 0.4352],
[ 0.7413, -1.0528, -0.7528]], device=‘cuda:0’)
tensor([1.5810, 0.5887, 0.7501], device=‘cuda:0’)
tensor([-2.3972, -0.0124], device=‘cuda:0’)
2.7矩阵拼接
(1)torch.cat
a = torch.randn(2,3,4)
b = torch.randn(2,1,4)
c = torch.cat((a, b),dim=1)# 在tensor第二个维度拼接,即第二个中括号里
输出:
tensor([[[-0.0326, -0.7061, 1.5002, -0.3364],
[ 0.4779, 0.8947, 0.4408, 0.7752],
[-1.4860, -0.4548, -1.1087, 0.7338]],
[[ 0.8744, -1.1853, 0.4264, 1.0947],
[ 1.5023, -0.0208, 0.6415, -0.6923],
[-0.0623, 0.4951, 0.1186, 0.6621]]])
tensor([[[ 0.2769, 1.4247, 0.3008, 1.9411]],
[[-3.6871, -2.4743, -0.2735, 0.7840]]])
tensor([[[-0.0326, -0.7061, 1.5002, -0.3364],
[ 0.4779, 0.8947, 0.4408, 0.7752],
[-1.4860, -0.4548, -1.1087, 0.7338],
[ 0.2769, 1.4247, 0.3008, 1.9411]],
[[ 0.8744, -1.1853, 0.4264, 1.0947],
[ 1.5023, -0.0208, 0.6415, -0.6923],
[-0.0623, 0.4951, 0.1186, 0.6621],
[-3.6871, -2.4743, -0.2735, 0.7840]]])
(2)torch.stake
a = torch.rand(3, 3).cuda(0)
b = torch.rand(3, 3).cuda(0)
print(torch.stack((a, b), dim=0).shape)#拼接之后会高一个维度
torch.Size([2, 3, 3])
2.8维度转换
a = torch.rand(2,4,2)
print(a)
print(a.view(2, 8)) #压缩维度
print(a.view(2,2,2,2)) #扩展维度
输出:
tensor([[[0.3526, 0.7707],
[0.9576, 0.8122],
[0.4288, 0.0359],
[0.7087, 0.6782]],
[[0.9383, 0.6101],
[0.5572, 0.0409],
[0.7279, 0.5749],
[0.5316, 0.2492]]])
tensor([[0.3526, 0.7707, 0.9576, 0.8122, 0.4288, 0.0359, 0.7087, 0.6782],
[0.9383, 0.6101, 0.5572, 0.0409, 0.7279, 0.5749, 0.5316, 0.2492]])
tensor([[[[0.3526, 0.7707],
[0.9576, 0.8122]],
[[0.4288, 0.0359],
[0.7087, 0.6782]]],
[[[0.9383, 0.6101],
[0.5572, 0.0409]],
[[0.7279, 0.5749],
[0.5316, 0.2492]]]])
2.9矩阵分割
#split操作
g = torch.rand(4,2,2)
h = torch.split(g,2,0)#2代表划分长度,0代表第0维进行分割
print(g.size())
for i in h:
print(i.size())
输出:
torch.Size([4, 2, 2])
torch.Size([2, 2, 2])
torch.Size([2, 2, 2])
2.10两个维度交换
x.transpose(dim0, dim1)
x = torch.rand(2,2,1)
print(x)
x= x.transpose(1, 2)
print(x)
输出:
tensor([[[0.1303],
[0.3482]],
[[0.5394],
[0.9294]]])
tensor([[[0.1303, 0.3482]],
[[0.5394, 0.9294]]])
标签:randn,Matrix,--,torch,device,pytorch,cuda,print,tensor
From: https://blog.csdn.net/SWZ156/article/details/139626550