欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

pytorch的Tensor基础操作

程序员文章站 2022-06-12 10:05:23
...

一.数据类型

64位整型:torch.LongTensor
32位整型:torch.IntTensor
16位整型:torch.ShortTensor
64位浮点型:torch.DoubleTensor
import torch
import numpy as np
from torch.autograd import Variable

x0=torch.tensor([1,2,3,4])
x1=torch.FloatTensor([1,2,3,4])
x2=torch.IntTensor([1,2,3,4])
x3=torch.LongTensor([1,2,3,4])
x4=torch.ShortTensor([1,2,3,4])
x5=torch.DoubleTensor([1,2,3,4])

print(x0)
print(x1)
print(x2)
print(x3)
print(x4)
print(x5)
print(x0.dtype) #tensor函数的dtype默认torch.int64
print(x3.dtype)

输出结果:

tensor([1, 2, 3, 4])
tensor([1., 2., 3., 4.])
tensor([1, 2, 3, 4], dtype=torch.int32)
tensor([1, 2, 3, 4])
tensor([1, 2, 3, 4], dtype=torch.int16)
tensor([1., 2., 3., 4.], dtype=torch.float64)
torch.int64
torch.int64
torch.float32

二.tensor 与numpy相互转换

rt=torch.randn(2,3)
print(type(rt))
rt_to_np=rt.numpy()
print(type(rt_to_np))
np_to_rt=torch.from_numpy(rt_to_np)
print(type(np_to_rt))

输出结果:

<class 'torch.Tensor'>
<class 'numpy.ndarray'>
<class 'torch.Tensor'>

 

三.tensor几种计算

x=torch.FloatTensor([1,2,3,4,5])
y=torch.FloatTensor([1,2,3,4,5])
print(x+y)
print(x-y)
print(x*y)
print(x/y)

# 聚合函数

print(torch.max(x))  # 最大,若是多维,在max函数中加上维度参数即可
print(torch.sum(x))  # 求和
print(torch.median(x))  # 中位数
print(torch.mean(x)) # 均值

''' 输出
tensor(5.)
tensor(15.)
tensor(3.)
tensor(3.)
'''

# 形状

print(x.shape)
print(x.size())  # size要加括号
print(rt.shape)
print(rt.size())

''' 输出
torch.Size([5])
torch.Size([5])
torch.Size([2, 3])
torch.Size([2, 3])
'''


# 求导

t=torch.FloatTensor([[1,2],[3,4]])
v=Variable(t,requires_grad=True)  # requires_grad 参数一定要
v2=torch.sum(v*v)
v2.backward()
print(v.grad)

''' 输出为
tensor([[2., 4.],
        [6., 8.]])

为什么,为什么?

!!!  v2为标量,标量对矩阵的求导为 标量对矩阵里各元素的求导,

v2= x11^2 + x12^2 +x13^2 +x14^2 ,所以得到求导矩阵为
[
   d(v2)/d(x11),d(v2)/d(x12)     
   d(v2)/d(x21),d(v2)/d(x22)
]

其中d(v2)/d(x11) = 2*x11=2 ,就这么简单 !

'''

 

四.随机生成

print(torch.rand(2,3))
# 生成I array
print(torch.ones(2,3))
# 生成单位对角array
print(torch.eye(3,3))
# 生成0 array
print(torch.zeros(2,3))
# 生成与某个array相似的I矩阵
print(torch.ones_like(torch.rand(4,3)))
# 生成与某个array相似的极小矩阵
print(torch.empty_like(torch.rand(4,3)))
# 生成某个数的array
print(torch.full((2,5,3),fill_value=4))

输出结果:

tensor([[0.0218, 0.6911, 0.8507],
        [0.3779, 0.0360, 0.4767]])
tensor([[1., 1., 1.],
        [1., 1., 1.]])
tensor([[1., 0., 0.],
        [0., 1., 0.],
        [0., 0., 1.]])
tensor([[0., 0., 0.],
        [0., 0., 0.]])
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])
tensor([[ 3.4162e-18,  3.0917e-41,  3.4162e-18],
        [ 3.0917e-41,  3.4162e-18,  3.0917e-41],
        [-3.7711e-34,  4.5804e-41, -3.5738e-34],
        [ 4.5804e-41, -1.3618e-38,  4.5804e-41]])
tensor([[[4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.]],

        [[4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.],
         [4., 4., 4.]]])

 

五.塑形

# view & reshape
test = torch.rand((4,1,2,2)) # 从外往里面数
print(test.view(4,2*2))
print(test.reshape(4,2*2))  # 和view作用一样

''' 输出
tensor([[0.0976, 0.6314, 0.3482, 0.8822],
        [0.9157, 0.9412, 0.7420, 0.6484],
        [0.4440, 0.7826, 0.5866, 0.7577],
        [0.2833, 0.8003, 0.5582, 0.9833]])
tensor([[0.0976, 0.6314, 0.3482, 0.8822],
        [0.9157, 0.9412, 0.7420, 0.6484],
        [0.4440, 0.7826, 0.5866, 0.7577],
        [0.2833, 0.8003, 0.5582, 0.9833]])
'''

#unsqueeze,在参数之前增加一维

print(test.shape)
test1=test.unsqueeze(0) # 在第0维前插入一维  -1代表最后一维
print(test1.shape)

'''输出
torch.Size([4, 1, 2, 2])
torch.Size([1, 4, 1, 2, 2])

'''

#squeeze 如果输入参数的维为1,则去掉该维

test1=test1.squeeze(2)
print(test1.shape)

'''输出

torch.Size([1, 4, 2, 2])

'''

# repeat 维度倍数
test2=test.repeat(1,1,2,3) # 在原来维度基础上乘上参数
print(test2.shape)

'''输出
torch.Size([4, 1, 4, 6])

'''


# t,值针对二维

c=torch.rand(2,3)
print(c.t())


# 两两维度交换
print(test2.shape)
print(test2.transpose(1,3).shape)

'''输出

torch.Size([4, 1, 4, 6])
torch.Size([4, 6, 4, 1])

'''

# 多个维度进行交换
print(test2.permute(3,2,1,0).shape)

'''输出
torch.Size([6, 4, 1, 4])

'''


#contiguous,前面遇到permute or tranpose等操作,先进行contiguous,再进行view
print(test2.permute(3,2,1,0).contiguous())

'''输出

tensor([[[[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]],

         [[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]]],


        [[[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]],

         [[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]]],


        [[[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]],

         [[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]]],


        [[[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]],

         [[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]]],


        [[[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]],

         [[0.0976, 0.9157, 0.4440, 0.2833]],

         [[0.3482, 0.7420, 0.5866, 0.5582]]],


        [[[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]],

         [[0.6314, 0.9412, 0.7826, 0.8003]],

         [[0.8822, 0.6484, 0.7577, 0.9833]]]])
'''

 

相关标签: python 深度学习