Pytorch getting RuntimeError: Found dtype Double but expected Float
Asked Answered
B

2

23

I am trying to implement a neural net in PyTorch but it doesn't seem to work. The problem seems to be in the training loop. I've spend several hours into this but can't get it right. Please help, thanks.

I haven't added the data preprocessing parts.

# importing libraries
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
# get x function (dataset related stuff)
def Getx(idx):
    sample = samples[idx]
    vector = Calculating_bottom(sample)
    vector = torch.as_tensor(vector, dtype = torch.float64)
    
    return vector

# get y function (dataset related stuff)
def Gety(idx):
    y = np.array(train.iloc[idx, 4], dtype = np.float64)
    y = torch.as_tensor(y, dtype = torch.float64)
    
    return y
# dataset
class mydataset(Dataset):

    def __init__(self):
        super().__init__()

    def __getitem__(self, index):
        x = Getx(index)
        y = Gety(index)
        
        return x, y

    def __len__(self):
        return len(train)
    
dataset = mydataset()
# sample dataset value
print(dataset.__getitem__(0))

(tensor([ 5., 5., 8., 14.], dtype=torch.float64), tensor(-0.3403, dtype=torch.float64))

# data-loader
dataloader = DataLoader(dataset, batch_size = 1, shuffle = True)
# nn architecture
class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(4, 4)
        self.fc2 = nn.Linear(4, 2)
        self.fc3 = nn.Linear(2, 1)

    def forward(self, x):
        x = x.float()
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

model = Net()
# device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
# hyper-parameters
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
# training loop

for epoch in range(5):
    
    for batch in dataloader:
        
        # unpacking
        x, y = batch
        x.to(device)
        y.to(device)
        
        # reset gradients
        optimizer.zero_grad()
        
        # forward propagation through the network
        out = model(x)
        
        # calculate the loss
        loss = criterion(out, y)
        
        # backpropagation
        loss.backward()
        
        # update the parameters
        optimizer.step()

Error:

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/loss.py:446: UserWarning: Using a target size (torch.Size([1])) that is different to the input size (torch.Size([1, 1])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
  return F.mse_loss(input, target, reduction=self.reduction)
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-18-3f68fcee9ff3> in <module>
     20 
     21         # backpropagation
---> 22         loss.backward()
     23 
     24         # update the parameters

/opt/conda/lib/python3.7/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
    219                 retain_graph=retain_graph,
    220                 create_graph=create_graph)
--> 221         torch.autograd.backward(self, gradient, retain_graph, create_graph)
    222 
    223     def register_hook(self, hook):

/opt/conda/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
    130     Variable._execution_engine.run_backward(
    131         tensors, grad_tensors_, retain_graph, create_graph,
--> 132         allow_unreachable=True)  # allow_unreachable flag
    133 
    134 

RuntimeError: Found dtype Double but expected Float
Barney answered 9/5, 2021 at 9:53 Comment(0)
D
26

You need the data type of the data to match the data type of the model.

Either convert the model to double (recommended for simple nets with no serious performance problems such as yours)

# nn architecture
class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(4, 4)
        self.fc2 = nn.Linear(4, 2)
        self.fc3 = nn.Linear(2, 1)
        self.double()

or convert the data to float.

class mydataset(Dataset):

    def __init__(self):
        super().__init__()

    def __getitem__(self, index):
        x = Getx(index)
        y = Gety(index)
        
        return x.float(), y.float()
Decisive answered 9/5, 2021 at 10:3 Comment(0)
C
18

Check data type of "out" and "y"

print(out.dtype)
print(y.dtype)

you may find a difference like

"torch.float32"
"torch.float64"

Set them in the same type.

Codd answered 17/2, 2022 at 20:3 Comment(0)

© 2022 - 2024 — McMap. All rights reserved.