np.random.rand(3,5) 返回0-1之间的随机分布数据 np.random.randn(3,5) 返回正态分布的数据
numpy数据和tensor数据之间的相互转换,tensor数据可以和numpy进行相同的操作,还能在GPU上进行加速,所以运行速度较快。 numpy_tensor = np.random.randn(10, 2)
pytorch_tensor1 = torch.Tensor(numpy_tensor) pytorch_tensor2 = torch.from_numpy(numpy_tensor) type 返回的是数据类型,列表,数组,tensor dtype返回的是数据类型,int float print(type(pytorch_tensor1)) <class ‘torch.Tensor’> print(pytorch_tensor1.type()) torch.FloatTensor print(pytorch_tensor1.shape) torch.Size([10,2]) print(pytorch_tensor1.size()) torch.Size([10,2]) print(pytorch_tensor1.dim()) 2 print(pytorch_tensor1.numel()) 20 print(type(pytorch_tensor2)) numpy_array = pytorch_tensor1.numpy() print(type(numpy_array)) <class ‘numpy.ndarray’> print(numpy_array.dtype) float32
x = torch.randn(3, 4) print(x) print(x.shape) torch.Size([3, 4])
x = x.unsqueeze(0) print(x.shape) torch.Size([1, 3, 4]
x = x.squeeze(0) print(x.shape) torch.Size([3, 4])
#squeeze 数据维度的减少 unssqueeze数据维度的增加 x = torch.randn(3, 4, 5) print(x.shape) torch.Size([3, 4, 5]) x = x.permute(1, 0, 2) print(x.shape) torch.Size([4, 3, 5]) x = x.transpose(0, 2) print(x.shape) torch.Size([5, 3, 4]) print(x) #同一数据的不同维度进行数据交换permute #仅仅两个维度进行相互交换transpose
Variable import numpy as np import torch from torch.autograd import Variable
x = Variable(torch.Tensor([2]), requires_grad=True) y = x + 2 z = y**2 + 3 print(z) #简单梯度求导 z.backward() 对变量进行求导 print(x.grad) 提取对x求导的结果
m = Variable(torch.FloatTensor([[2, 3]]), requires_grad=True) n = Variable(torch.zeros(1, 2)) print(m) print(n)
print(m.shape) n[0, 0] = m[0, 0] ** 2 n[0, 1] = m[0, 1] ** 3 #二维的求导 n.backward(torch.ones_like(n)) #此处相当于对每个公式前面添加一个变量 print(m.grad) #对m进行求导
x = Variable(torch.FloatTensor([[3]]), requires_grad=True) y = x * 2 + x ** 2 + 3 print(y) 18 y.backward(retain_graph=True) #进行求导并保留求导之后的结果 print(x.grad) 8 y.backward() #两次求导的结果进行相加 为 8 +8 =16 print(x.grad) 16
x = Variable(torch.FloatTensor([2, 3]), requires_grad=True) k = Variable(torch.zeros(2)) k[0] = x[0] ** 2 + 3 * x[1] k[1] = x[1] ** 2 + 2 * x[0] #矩阵求导 两个公式分别读 x y求导,一共有四个值。k.backward()中添加的参数就是表示这两个公式分别对x y求导的值是否相加再一起 一下的表达方式可以得出四个值,若在参数中使用tensor.ones_like(n) 则可求出两个相加的值。 print(k) j = torch.zeros(2, 2) k.backward(torch.FloatTensor([1, 0]), retain_graph=True) j[0] = x.grad.data print(x.grad.data) x.grad.data.zero_() print(x.grad.data) k.backward(torch.FloatTensor([0, 1])) j[1] = x.grad.data print(j) print(x.grad.data)
|