import torch
import torch.nn as nn
import torch.nn.functional as F
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(446)
np.random.seed(446)
x_numpy = np.array([0.1, 0.2, 0.3])
x_torch = torch.tensor([0.1, 0.2, 0.3])
print('x_numpy', 'x_torch')
print(x_numpy, x_torch)
x_numpy x_torch [0.1 0.2 0.3] tensor([0.1000, 0.2000, 0.3000])
print(torch.from_numpy(x_numpy), x_torch.numpy())
tensor([0.1000, 0.2000, 0.3000], dtype=torch.float64) [0.1 0.2 0.3]
y_numpy = np.array([3,4,5.])
y_torch = torch.tensor([3,4,5.])
print('x+y')
print(x_numpy + y_numpy, x_torch + y_torch)
x+y [3.1 4.2 5.3] tensor([3.1000, 4.2000, 5.3000])
print("norm")
print(np.linalg.norm(x_numpy), torch.norm(x_torch))
norm 0.37416573867739417 tensor(0.3742)
N, C, W, H = 10000, 3, 28, 28
X = torch.randn((N, C, W, H))
print(X.shape)
print(X.view(N, C, 784).shape)
print(X.view(-1, C, 784).shape)
torch.Size([10000, 3, 28, 28]) torch.Size([10000, 3, 784]) torch.Size([10000, 3, 784])
x=torch.empty(5,1,4,1)
y=torch.empty( 3,1,1)
print((x+y).size())
torch.Size([5, 3, 4, 1])
a = torch.tensor(2.0, requires_grad=True)
b = torch.tensor(1.0, requires_grad=True)
c = a + b
d = b + 1
e = c * d
print('c', c)
print('d', d)
print('e', e)
c tensor(3., grad_fn=) d tensor(2., grad_fn=) e tensor(6., grad_fn=)
cpu = torch.device("cpu")
gpu = torch.device("cuda")
x = torch.rand(10)
print(x)
x = x.to(gpu)
print(x)
x = x.to(cpu)
print(x)
tensor([0.3959, 0.6177, 0.7256, 0.0971, 0.9186, 0.8277, 0.4409, 0.9344, 0.8967, 0.1897])
def f(x):
return (x-2)**2
def fp(x):
return 2*(x-2)
x = torch.tensor([1.0], requires_grad=True)
y = f(x)
y.backward()
print(fp(x))
print('Pytorch\'s f\'(x):',x.grad)
tensor([-2.], grad_fn=<MulBackward0) Pytorch’s f’(x): tensor([-2.])
d = 2
n = 50
X = torch.randn(n, d)
true_w = torch.tensor([[-1.0], [2.0]])
y = X @ true_w + torch.randn(n, 1) * 0.1
print('X shape', X.shape)
print('y shape', y.shape)
print('w shape', true_w.shape)
X shape torch.Size([50, 2]) y shape torch.Size([50, 1]) w shape torch.Size([2, 1])
d_in = 3
d_out = 4
linear_module = nn.Linear(d_in, d_out)
example_tensor = torch.tensor([[1.,2,3], [4,5,6]])
transformed = linear_module(example_tensor)
print('example_tensor', example_tensor.shape)
print('transformed', transformed.shape)
print('w:', linear_module.weight)
print('b:', linear_module.bias)
example_tensor torch.Size([2, 3]) transformed torch.Size([2, 4]) w: Parameter containing: tensor([[ 0.5260, 0.4925, -0.0887], [ 0.3944, 0.4080, 0.2182], [-0.1409, 0.0518, 0.3034], [ 0.0913, 0.2452, -0.2616]], requires_grad=True) b: Parameter containing: tensor([0.5021, 0.0118, 0.1383, 0.4757], requires_grad=True)
activation_fn = nn.ReLU()
example_tensor = torch.tensor([-1.0, 1.0, 0.0])
activated = activation_fn(example_tensor)
print('example_tensor', example_tensor)
print('activated', activated)
example_tensor tensor([-1., 1., 0.]) activated tensor([0., 1., 0.])
d_in = 3
d_hidden = 4
d_out = 1
model = torch.nn.Sequential(
nn.Linear(d_in, d_hidden),
nn.Tanh(),
nn.Linear(d_hidden, d_out),
nn.Sigmoid()
)
example_tensor = torch.tensor([[1.,2,3], [4,5,6]])
transformed = model(example_tensor)
print('transformed', transformed.shape)
transformed torch.Size([2, 1])
params = model.parameters()
for param in params:
print(param)
Parameter containing: tensor([[ 0.5431, 0.0524, 0.1126], [ 0.2683, -0.2361, 0.2769], [-0.1380, 0.5661, -0.1071], [-0.3357, 0.0848, -0.0454]], requires_grad=True) Parameter containing: tensor([-0.2858, -0.0707, -0.3168, 0.1860], requires_grad=True) Parameter containing: tensor([[-0.1412, 0.4412, 0.4086, 0.2844]], requires_grad=True) Parameter containing: tensor([-0.4175], requires_grad=True)
mse_loss_fn = nn.MSELoss()
input = torch.tensor([[0. ,0, 0]])
target = torch.tensor([[1. , 0, -1]])
loss = mse_loss_fn(input, target)
print(loss)
tensor(0.6667)
model = nn.Linear(1, 1)
x_simple = torch.tensor([[1.]])
y_simple = torch.tensor([[2.]])
optim = torch.optim.SGD(model.parameters(), lr=1e-2)
y_hat = model(x_simple)
print('model params before:', model.weight)
loss = mse_loss_fn(y_hat, y_simple)
optim.zero_grad()
loss.backward()
optim.step()
print('model params after:', model.weight)
model params before: Parameter containing: tensor([[-0.5237]], requires_grad=True) model params after: Parameter containing: tensor([[-0.4818]], requires_grad=True)
|