Replies: 1 comment
-
Hi @doge4ever , It seems the You can remove it and just use the following lines: Training y_pred = torch.round(torch.sigmoid(y_logits)).to(device)
loss = Loss_fn(y_logits, y_train)
# Measure accuracy on y_pred (not y_logits)
acc = accuracy_fn(y_true=y_train, y_pred=y_pred) Testing with torch.inference_mode():
test_logits = model(X_test).squeeze()
test_pred = torch.round(torch.sigmoid(test_logits)).to(device)
test_loss = Loss_fn(test_logits, y_test)
# Measure testing acc on test_pred (not test_logits)
test_acc = accuracy_fn(y_true=y_test, y_pred=test_pred) Full code from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
import torch
from torch import nn
import matplotlib.pyplot as plt
import pandas as pd
from torchsummary import summary
import requests
from pathlib import Path
torch.manual_seed(42)
X, y = make_moons(n_samples=1000, noise=0.05)
device = 'cpu'
X = torch.from_numpy(X)
y = torch.from_numpy(y)
X.to(dtype = torch.float32)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2)
print("Xtrain",X_train.shape)
class Model_V1(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(nn.Linear(in_features=2, out_features=10),
nn.ReLU(),
nn.Linear(in_features=10, out_features=10),
nn.ReLU(),
nn.Linear(in_features=10, out_features=10),
nn.ReLU(),
nn.Linear(in_features=10, out_features=1))
def forward(self, x):
return self.main(x)
def accuracy_fn(y_true, y_pred):
correct = torch.eq(y_true, y_pred).sum().item() # torch.eq() calculates where two tensors are equal
acc = (correct / len(y_pred)) * 100
return acc
model = Model_V1()
Loss_fn = nn.L1Loss()
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.01)
epochs = 1000
X_train, y_train = X_train.to(device, dtype=torch.float32), y_train.to(device, dtype=torch.float32)
X_test, y_test = X_test.to(device, dtype = torch.float32), y_test.to(device, dtype = torch.float32)
for epoch in range(epochs):
model.train()
y_logits = model(X_train).squeeze().to(device)
y_pred = torch.round(torch.sigmoid(y_logits)).to(device)
loss = Loss_fn(y_logits, y_train)
acc = accuracy_fn(y_true=y_train, y_pred=y_pred)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
with torch.inference_mode():
test_logits = model(X_test).squeeze()
test_pred = torch.round(torch.sigmoid(test_logits)).to(device)
test_loss = Loss_fn(test_logits, y_test)
test_acc = accuracy_fn(y_true=y_test, y_pred=test_pred)
if epoch % 10 == 0:
print(f"epoch: {epoch} | loss : {loss: .5f} | accuracy: {acc:.2f} | test loss: {test_loss:.5f} |test acc: {test_acc:.5f} ")
from helper_functions import plot_predictions, plot_decision_boundary
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_decision_boundary(model, X_train, y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_decision_boundary(model, X_test, y_test)
plt.show() Running the above you should see the accuracy start to go up (after enough epochs). See demo notebook link: https://colab.research.google.com/drive/1WXhQhsMwvoj5ekGOhMiOKBOMtGGaqRLe?usp=sharing |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Hi!
I'm currently working on an exercices with Sklearn.make_moons(). I created a model, trained it but when I print the loss and the accuracy, the loss is goinf to 0.0001 and the accuracy is doing strange things(sometimes it's to 0 and othertime to 100).
Can you check ?
Thank a lot !!
Code :
A part of the output:
Beta Was this translation helpful? Give feedback.
All reactions