首页 > 其他分享 >Pytorch快速使用手册 02张量操作

Pytorch快速使用手册 02张量操作

时间:2023-02-25 09:33:24浏览次数:38  
标签:02 rand tensor torch Pytorch 使用手册 print size Size

import os
import tqdm
import torch
import random
import shutil
import numpy as np

2 Tenosr Operation

2.1 Base information of tensor

tensor = torch.rand([2, 5])
print(tensor)
print("type ", tensor.type())   # Data type
print("size ", tensor.size())   # Shape of the tensor. It is a subclass of Python tuple
print("dim ", tensor.dim())    # Number of dimensions.

tensor([[0.7604, 0.6443, 0.2335, 0.0522, 0.5894],
[0.9144, 0.5356, 0.1220, 0.9488, 0.4529]])
type torch.FloatTensor
size torch.Size([2, 5])
dim 2

2.2 Data type conversion

# Set default tensor type. Float in PyTorch is much faster than double.
torch.set_default_tensor_type(torch.FloatTensor)

# Type convertions.
tensor = tensor.cuda()
print(tensor.type())

tensor = tensor.cpu()
print(tensor.type())

tensor = tensor.float()
print(tensor.type())

tensor = tensor.long()
print(tensor.type())

torch.cuda.FloatTensor
torch.FloatTensor
torch.FloatTensor
torch.LongTensor

2.3 Conversion between torch.Tensor and np.ndarray

tensor = torch.empty((2,5)).normal_(mean=0.,std=0.6)  # torch.Tensor -> np.ndarray. 

np_val = tensor.cpu().numpy()
print(type(np_val))
print(np_val)
print()

tensor = torch.Tensor(np_val).float()  # np.ndarray -> torch.Tensor.
print(tensor)

# tensor = torch.from_numpy(np_val.copy()).float()  # If ndarray has negative stride

<class 'numpy.ndarray'>
[[ 0.47877645 0.4708376 -0.02325169 -0.18679902 0.30069816]
[-0.98023766 -0.51492953 0.35119155 -1.1812955 -0.01345202]]

tensor([[ 0.4788, 0.4708, -0.0233, -0.1868, 0.3007],
[-0.9802, -0.5149, 0.3512, -1.1813, -0.0135]])

2.4 Tensor Reshape

tensor = torch.rand((2,5))
print(tensor.size())
print(tensor)
print()

tensor = torch.reshape(tensor, (5,2))
print(tensor.size())
print(tensor)
print()

torch.Size([2, 5])
tensor([[0.6753, 0.8400, 0.5969, 0.0085, 0.6854],
[0.5195, 0.9284, 0.1016, 0.5699, 0.7604]])

torch.Size([5, 2])
tensor([[0.6753, 0.8400],
[0.5969, 0.0085],
[0.6854, 0.5195],
[0.9284, 0.1016],
[0.5699, 0.7604]])

2.5 Tensor permute

a = torch.randn(16, 16, 3) 
print(a.size())
print()

b = a.permute(2,0,1)
print(b.size())
print()

torch.Size([16, 16, 3])

torch.Size([3, 16, 16])

2.6 Tensor Copy

# Operation                 |  New/Shared memory | Still in computation graph |
tensor.clone()            # |        New         |          Yes               |
tensor.detach()           # |      Shared        |          No                |
tensor.contiguous()       # |      Shared        |          Yes               |
    
a = torch.rand(2,2)
print(id(a))
b = a.clone()
print(id(b))
print()

a = torch.rand(2,2)
print(id(a))
b = a.detach()
print(id(b))
print()

a = torch.rand(2,2)
print(id(a))
b = a.contiguous()
print(id(b))
print()

2691879448152
2691879420136

2691879370008
2691879448152

2691879420136
2691879420136

2.7 Tensor joint

lt_in = [torch.rand([1, 5]), torch.rand([2, 5]), torch.rand([3, 5])]
tensor = torch.cat(lt_in, dim=0)
for i, e in enumerate(lt_in):
    print(i, e.shape)
print("cat done(given dim)", tensor.shape)
print()

lt_in = [torch.rand([1, 5]), torch.rand([1, 5]), torch.rand([1, 5])]
tensor = torch.stack(lt_in, dim=0)
for i, e in enumerate(lt_in):
    print(i, e.shape)
print("stack done(new dim)", tensor.shape)

0 torch.Size([1, 5])
1 torch.Size([2, 5])
2 torch.Size([3, 5])
cat done(given dim) torch.Size([6, 5])

0 torch.Size([1, 5])
1 torch.Size([1, 5])
2 torch.Size([1, 5])
stack done(new dim) torch.Size([3, 1, 5])

2.8 Tensor squeeze

A = torch.rand(1,2,2)
print(A.size())
print()

B = A.squeeze(0)
print(B.size())

C = A.unsqueeze(0)
print(C.size())

torch.Size([1, 2, 2])

torch.Size([2, 2])
torch.Size([1, 1, 2, 2])

2.9 Get non-zero/zero element

tensor = torch.rand(3,3)
print(tensor)
print(torch.nonzero(tensor))               # Index of non-zero elements
print(torch.nonzero(tensor == 0))          # Index of zero elements
print(torch.nonzero(tensor).size(0))       # Number of non-zero elements
print(torch.nonzero(tensor == 0).size(0))  # Number of zero elements

tensor([[0.8941, 0.2039, 0.8508],
[0.3449, 0.3553, 0.2724],
[0.5092, 0.2380, 0.3142]])
tensor([[0, 0],
[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 2],
[2, 0],
[2, 1],
[2, 2]])
tensor([], size=(0, 2), dtype=torch.int64)
9
0

2.10 Tensor A == Tensor B ?

t1 = torch.Tensor([1., 2.])
t2 = torch.Tensor([1., 2.])
# print(torch.allclose(t1, t2))  # float tensor
print(torch.equal(t1, t2))     # int tensor

True

2.11 Tensor Expand

# Expand tensor of shape 64*512 to shape 64*512*7*7.
t = torch.rand((3,3))
print(t.size())
print(t)
print()

out = torch.reshape(t, (3, 3, 1, 1)).expand(3, 3, 2, 2)
print(out.size())
print(out)

torch.Size([3, 3])
tensor([[0.2546, 0.7513, 0.1227],
[0.8105, 0.8989, 0.4692],
[0.9552, 0.5418, 0.2136]])

torch.Size([3, 3, 2, 2])
tensor([[[[0.2546, 0.2546],
[0.2546, 0.2546]],

     [[0.7513, 0.7513],
      [0.7513, 0.7513]],

     [[0.1227, 0.1227],
      [0.1227, 0.1227]]],


    [[[0.8105, 0.8105],
      [0.8105, 0.8105]],

     [[0.8989, 0.8989],
      [0.8989, 0.8989]],

     [[0.4692, 0.4692],
      [0.4692, 0.4692]]],


    [[[0.9552, 0.9552],
      [0.9552, 0.9552]],

     [[0.5418, 0.5418],
      [0.5418, 0.5418]],

     [[0.2136, 0.2136],
      [0.2136, 0.2136]]]])

2.12 Matrix multiplication

# Matrix multiplication: (m*n) * (n*p) -> (m*p).
t1 = torch.rand((2,3))
t2 = torch.rand((3,2))
r = t1 @ t2
print(r.size())
print(r)
print()

# Batch matrix multiplication: (b*m*n) * (b*n*p) -> (b*m*p).
t1 = torch.rand((6, 2, 3))
t2 = torch.rand((6, 3, 2))
r = t1 @ t2
print(r.size())
print(r)
print()

# Element-wise multiplication.
t1 = torch.rand((3,3))
t2 = torch.rand((3,3))
r = t1 * t2
print(r.size())
print(r)
print()

torch.Size([2, 2])
tensor([[0.2805, 0.4128],
[0.3273, 0.5588]])

torch.Size([6, 2, 2])
tensor([[[0.9034, 1.1809],
[1.0052, 0.5672]],

    [[1.0503, 0.7826],
     [0.5958, 0.3320]],

    [[0.3317, 0.9927],
     [0.1044, 0.1776]],

    [[0.3701, 0.3560],
     [0.5593, 0.8541]],

    [[0.8256, 0.6950],
     [1.3882, 0.8211]],

    [[0.4006, 0.6250],
     [1.0060, 1.1441]]])

torch.Size([3, 3])
tensor([[0.0139, 0.4317, 0.0394],
[0.0608, 0.4654, 0.3051],
[0.1981, 0.2008, 0.2868]])

2.13 Get value from tensor which only has one element

tensor = torch.rand(1)
val = tensor.item()
print(type(val), val)

<class 'float'> 0.6443968415260315

2.14 Convert int label to one-hot vector

n_batch, n_class_num = 6, 3
t_label = torch.Tensor([random.randint(0, n_class_num-1) for i in range(n_batch)]).long().reshape((n_batch,1))
print("t_label ")
print(t_label.size())
print(t_label)
print()

one_hot = torch.zeros(t_label.shape[0], 3).scatter_(1, t_label, 1).long() # (dim, index, value)
print("one_hot ")
print(one_hot.size())
print(one_hot)

t_label
torch.Size([6, 1])
tensor([[0],
[1],
[1],
[0],
[1],
[2]])

one_hot
torch.Size([6, 3])
tensor([[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])

2.15 Compute Euclid Distance of two arrays

# X1 is of shape m*d, X2 is of shape n*d.
X1 = torch.rand((2, 3))
X2 = torch.rand((6, 3))
D = torch.sqrt(torch.sum((X1[:,None,:] - X2) ** 2, dim=2))
print(D.size())
print(D)

torch.Size([2, 6])
tensor([[1.1364, 1.1124, 0.5864, 0.8961, 0.6905, 0.5420],
[0.6629, 0.8406, 0.5869, 0.3428, 0.5778, 0.6701]])

2.16 Tensor ouput with limitation (torch.clamp)

ouput elemets must be limited in [min, max]

A = torch.rand(3,3)
print(A)
B = torch.clamp(A, 0.2, 0.8)
print(B)

tensor([[0.4451, 0.8816, 0.3674],
[0.7309, 0.2765, 0.0899],
[0.7772, 0.4225, 0.6008]])
tensor([[0.4451, 0.8000, 0.3674],
[0.7309, 0.2765, 0.2000],
[0.7772, 0.4225, 0.6008]])

2.17 Gather data with index (torch.gather)

a = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 12, 9]])
print(a)
print()

b = torch.gather(input=a, dim=0, index=torch.tensor([[0,1,2], [1,2,0]]))
print(b)

tensor([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 12, 9]])

tensor([[ 1, 5, 9],
[ 4, 12, 3]])

1 1st layout(row), 0-th col, 0-th elemet
5 1st layout(row), 1-th col, 1-th elemet
9 1st layout(row), 2-th col, 2-th elemet

4 2nd layout(row), 0-th col, 1-th elemet
12 2nd layout(row), 1-th col, 2-th elemet
3 2nd layout(row), 2-th col, 0-th elemet

标签:02,rand,tensor,torch,Pytorch,使用手册,print,size,Size
From: https://www.cnblogs.com/Kalafinaian/p/17153775.html

相关文章

  • P8711 [蓝桥杯 2020 省 B1] 整除序列
    题目传送门题目大意有一个序列,序列的第一个数是\(n\),后面的每个数是前一个数整除\(2\),请输出这个序列中值为正数的项。解题思路序列的第一个数为\(n\),所以可以先直......
  • P8738 [蓝桥杯 2020 国 C] 天干地支
    题目传送门题目大意给定一个公元纪年的年份\(n\),请输出这一年的天干地支年份。解题思路将天干和地支分别存到\(a,b\)数组里;因为天干是\(10\)年一轮回,地支是\(12......
  • P8707 [蓝桥杯 2020 省 AB1] 走方格
    题目传送门题目大意现在有个人站在第\(1\)行第\(1\)列,要走到第\(n\)行第\(m\)列(只能向右或者向下走),如果行号和列数都是偶数,不能走入这一格中。问有多少种方案。......
  • P8709 [蓝桥杯 2020 省 A1] 超级胶水
    题目传送门题目大意有\(n\)个石子,两颗石子的重量之和就是并成的一颗新石子的重量,合并两个石子需要的胶水等于两颗石子重量的乘积。解题思路先将\(sum\)赋为第一个......
  • P8717 [蓝桥杯 2020 省 AB2] 成绩分析 题解
    题目传送门题目大意计算\(n\)个人考试的最高分、最低分和平均分。解题思路输入\(n\)个人成绩的同时,计算最大值,最小值和总数。再将总数除以\(n\)算出平均值并保......
  • C/C++医院排队看病系统[2023-02-25]
    C/C++医院排队看病系统[2023-02-25]题目18医院排队看病系统[说明及要求]病人到医院看病,需要排队等候,先到先看。请编写程序模拟病人看病的过程。(1)后到的病人必须排......
  • C/C++运动会管理系统[2023-02-24]
    C/C++运动会管理系统[2023-02-24]题目四运动会管理系统1题目背景某大型运动会需要一个管理系统对所有参与的运动员及其成绩进行统一管理,本题目要求用C语言设计一个运......
  • C/C++设计银行储蓄系统[2023-02-24]
    C/C++设计银行储蓄系统[2023-02-24]题目28设计银行储蓄系统开发一个实现储蓄业务最常用功能的系统,在该软件系统中,以储户信息为核心,围绕储户信息,实现其存款、取款和查询......
  • 2022秋招-京东-数据分析师笔试-编程题
    题目一题目描述有若干个数形成一个可重集合,它们中很多都想做一个中庸的数,即既不是集合中最大的(或之一),也不是集合中最小的(或之一)。显然,不可能满足所有数。好在还是有一些......
  • day02-面向对象高级2-接口&多态
    1,final关键字1,认识finalfinal关键字最终的意思,可以用来修饰(类,方法,变量)特点:修饰类类不能被继承修饰方法,方法不能被子类重写修饰变量,该变量只能被赋值一次final修......