import torch import numpy as np
print('*' * 10 + ' 从列表创建 Tensor ' + '*' * 10) data_list = [[1, 2], [3, 4]] data_tensor1 = torch.tensor(data_list) print(f'从列表创建的 Tensor:\n{data_tensor1}, 类型:{type(data_tensor1)}')
data_tensor1[0] = torch.tensor([5, 6]) print(f'修改后的 Tensor:\n{data_tensor1},\n原始列表不变:\n{data_list}')
print('*' * 10 + ' 从 NumPy 数组创建 Tensor ' + '*' * 10) data_ndarray = np.array([[3, 4], [5, 6]]) data_tensor2 = torch.tensor(data_ndarray)
data_tensor3 = torch.from_numpy(data_ndarray) print(f'从 NumPy 数组创建的 Tensor:\n{data_tensor2}, 类型:{type(data_tensor2)}') print(f'使用 from_numpy 创建的 Tensor:\n{data_tensor3}, 类型:{type(data_tensor3)}')
data_tensor2[0][1] = 5 print(f'修改后的 Tensor:\n{data_tensor2},\n原始 NumPy 数组也变了:\n{data_ndarray}')
print('*' * 10 + ' 使用 PyTorch 提供的函数创建 Tensor ' + '*' * 10)
data_tensor4 = torch.Tensor(2, 3, 4) print(f'未初始化的 Tensor:\n{data_tensor4}, 形状:{data_tensor4.shape}') data_tensor5 = torch.empty(2, 3, 4) print(f'未初始化的 Tensor:\n{data_tensor5}, 形状:{data_tensor5.size()}')
data_tensor6 = torch.eye(3, 4) print(f'单位矩阵:\n{data_tensor6}, 形状:{data_tensor6.size()}')
data_tensor7 = torch.linspace(1, 10, steps=5) print(f'等差数列:\n{data_tensor7}, 形状:{data_tensor7.size()}')
data_tensor8 = torch.logspace(1, 10, steps=5) print(f'等比数列:\n{data_tensor8}, 形状:{data_tensor8.size()}')
data_tensor9 = torch.arange(1, 10, step=2) print(f'指定范围的数列:\n{data_tensor9}, 形状:{data_tensor9.size()}')
data_tensor10 = torch.zeros(2, 3) print(f'全零 Tensor:\n{data_tensor10}, 形状:{data_tensor10.size()}') data_tensor11 = torch.ones(2, 3) print(f'全一 Tensor:\n{data_tensor11}, 形状:{data_tensor11.size()}')
data_tensor12 = torch.rand(2, 3) print(f'均匀分布随机数 Tensor:\n{data_tensor12}, 形状:{data_tensor12.size()}') data_tensor13 = torch.randn(2, 3) print(f'标准正态分布随机数 Tensor:\n{data_tensor13}, 形状:{data_tensor13.size()}')
data_tensor14 = torch.ones_like(data_tensor13) print(f'与已有 Tensor 形状相同的全一 Tensor:\n{data_tensor14}, 形状:{data_tensor14.size()}')
t1 = torch.tensor(1) t2 = torch.Tensor(1) print(f'torch.tensor(1): {t1}, dtype: {t1.dtype}, shape: {t1.shape}') print(f'torch.Tensor(1): {t2}, dtype: {t2.dtype}, shape: {t2.shape}') t3 = torch.tensor([1, 2, 3]) t4 = torch.Tensor([1, 2, 3]) print(f'torch.tensor() 推断数据类型: {t3.dtype}, {t3}') print(f'torch.Tensor() 默认数据类型为 Float: dtype: {t4.dtype}, {t4}')
x = torch.eye(3, 3) y = torch.ones(3, 3) print(f'张量 x:\n{x}\n张量 y:\n{y}') print(f'静态方法 x + y:\n{torch.add(x, y)}') print(f'实例方法 x + y:\n{x.add(y)}')
z = x.add(y) print(f'不修改 x 的值:\n{x}\n返回的新 Tensor z:\n{z}') x.add_(y) print(f'修改 x 的值:\n{x}')
print('*' * 10 + ' 修改 Tensor 形状 ' + '*' * 10)
x = torch.randn(2, 3, 2) print(f'原始 Tensor x:\n{x}, 形状: {x.size()}')
print(f'张量 x 的元素个数: {x.numel()}')
y = x.view(3, 4) print(f'修改形状后的 Tensor y:\n{y}, 形状: {y.size()}') z = x.view(-1) print(f'展平成一维的 Tensor z:\n{z}, 形状: {z.size()}')
x = torch.arange(12).view(3, 4) y = x.t() print(f'y.t() 后张量连续吗? {y.is_contiguous()}')
z = y.reshape(-1) print(f'使用 reshape 修改内存不连续张量的形状: {z}')
x = torch.randn(1, 3, 1, 4) print(f'原始 Tensor x:\n{x}, 形状: {x.size()}') y = x.squeeze() print(f'squeeze 后的 Tensor y:\n{y}, 形状: {y.size()}') z = x.squeeze(0) print(f'squeeze(0) 后的 Tensor z:\n{z}, 形状: {z.size()}') w = x.squeeze(2) print(f'squeeze(2) 后的 Tensor w:\n{w}, 形状: {w.size()}')
x = torch.randn(3, 4) print(f'原始 Tensor x:\n{x}, 形状: {x.size()}') y = x.unsqueeze(0) print(f'unsqueeze(0) 后的 Tensor y:\n{y}, 形状: {y.size()}') z = x.unsqueeze(2) print(f'unsqueeze(2) 后的 Tensor z:\n{z}, 形状: {z.size()}')
x = torch.randn(2, 3, 4) print(f'原始 Tensor x:\n{x}, 形状: {x.size()}') y = x.transpose(0, 1) print(f'transpose(0, 1) 后的 Tensor y:\n{y}, 形状: {y.size()}')
torch.manual_seed(100) x = torch.randn(2, 3) print(f'原始 Tensor x:\n{x}') print(f'获取第一行所有数据: {x[0, :]}') print(f'获取最后一列的数据: {x[:, -1]}')
mask = x > 0 print(f'获取 x > 0 的 mask :\n{mask}, 类型: {mask.dtype}')
print(f'利用 masked_select 获取大于 0 的值: {torch.masked_select(x, mask)}')
print(f'利用 nonzero 获取非 0 下标: {torch.nonzero(x)}') index = torch.LongTensor([[0, 1, 1]])
result = torch.gather(x, 0, index=index)
print(f'利用 gather 在第 0 维根据 index 收集元素: {torch.gather(x, 0, index=index)}') index = torch.LongTensor([[0, 1, 1], [1, 1, 1]]) a = torch.gather(x, 1, index=index)
print(f'利用 gather 在第 1 维根据 index 收集元素: {a}')
z = torch.zeros(2, 3) z.scatter_(1, index, a) print(f'利用 scatter_ 在第 1 维根据 index 补充数据: {z}')
A = torch.arange(0, 40, 10).reshape(4, 1) B = torch.arange(0, 3) C = A + B print(f'C = A + B 的结果:\n{C}, 形状: {C.size()}')
B1 = B.unsqueeze(0) A1 = A.expand(4, 3) B2 = B1.expand(4, 3) C1 = A1 + B2 print(f'手动实现广播机制的结果:\n{C1}, 形状: {C1.size()}')
t = torch.randn(1, 3) t1 = torch.randn(3, 1) t2 = torch.randn(1, 3)
ans = torch.addcdiv(t, value=0.1, tensor1=t1, tensor2=t2) print(f'利用 addcdiv 计算 t + 0.1*(t1 / t2):\n{ans}, 形状: {ans.size()}') print(f'计算 sigmoid(t):\n{torch.sigmoid(t)}, 形状: {torch.sigmoid(t).size()}') print(f'将 t 限制在 [0, 1] 之间:\n{torch.clamp(t, min=0, max=1)}, 形状: {torch.clamp(t, min=0, max=1).size()}') print(f'进行 t 原地加 1 操作:\n{t.add_(1)}, 形状: {t.size()}')
a = torch.linspace(0, 10, 6) a = a.view(2, 3) print(f'原始张量 a:\n{a}, 形状: {a.size()}') print(f'沿着 y 轴方向累加:{torch.sum(a, dim=0)}, 形状: {a.sum(dim=0).shape}') print(f'沿着 y 轴方向累加,保留维度:{torch.sum(a, dim=0, keepdim=True)}, 形状: {a.sum(dim=0, keepdim=True).shape}')
x = torch.linspace(0, 10, 6).view(2, 3) print(f'原始张量 x:\n{x}') print(f'求所有元素的最大值:\n{torch.max(x)}') print(f'求沿着 y 方向的最大值:\n{torch.max(x, 0)}') print(f'求 y 方向最大的 2 个值:\n{torch.topk(x, 2, dim=0)}')
a = torch.tensor([2, 3]) b = torch.tensor([3, 4]) print(f'一维张量 a 和 b 的点积: {torch.dot(a, b)}, 形状: {a.dot(b).shape}') a = torch.randint(10, (2, 3)) b = torch.randint(6, (3, 4)) print(f'二维张量的矩阵乘积:\n{torch.mm(a, b)}, 形状: {a.mm(b).shape}') a = torch.randint(10, (2, 3, 4)) b = torch.randint(6, (2, 4, 5)) print(f'三维张量的批量矩阵乘积:\n{torch.bmm(a, b)}, 形状: {a.bmm(b).shape}') print(f'使用 matmul 计算:\n{a @ b}, 形状: {a.matmul(b).shape}')
|