import torch
import numpy as np
import matplotlib.pyplot as plt
class LinearRegression(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(LinearRegression, self).__init__()
self.layers = torch.nn.Sequential(
torch.nn.Linear(inputSize, outputSize)
)
def forward(self, x):
return self.layers(x)
# zbior danych
x = range(11)
y = [2*xi - 1 for xi in x]
# dostosowanie do pytorch
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
X_train = torch.from_numpy(x).view(-1,1)
y_train = torch.from_numpy(y).view(-1,1)
# obiekt liniowej regresji w wersji sieci nn
lr_model = LinearRegression(1,1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(lr_model.parameters(), lr=0.01)
print(lr_model)
num_params = sum(p.numel() for p in lr_model.parameters() if p.requires_grad)
print(f"liczba trenowalnych parametrów: {num_params}")
epochs = 400
# petla uczaca
for epoch in range(epochs):
lr_model.train()
y_pred = lr_model(X_train)
loss = criterion(y_pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 50 == 0:
print(f'epoch: {epoch+1:03d}, loss = {loss.item():.2f}')
lr_model.eval()
# po treningu jeszcze raz generujemy predykcje
lr_model.eval()
with torch.no_grad():
predicted = lr_model(X_train)
print(lr_model.layers[0].weight, lr_model.layers[0].bias)
plt.clf()
plt.plot(X_train, y_train, 'go', label='True data', alpha=0.5)
plt.plot(X_train, predicted, '--', label='Predictions', alpha=0.5)
plt.legend(loc='best')
plt.show()
LinearRegression(
(layers): Sequential(
(0): Linear(in_features=1, out_features=1, bias=True)
)
)
liczba trenowalnych parametrów: 2
epoch: 050, loss = 0.85
epoch: 100, loss = 0.48
epoch: 150, loss = 0.28
epoch: 200, loss = 0.16
epoch: 250, loss = 0.09
epoch: 300, loss = 0.05
epoch: 350, loss = 0.03
epoch: 400, loss = 0.02
Parameter containing:
tensor([[1.9655]], requires_grad=True) Parameter containing:
tensor([-0.7603], requires_grad=True)