Opps_0
Opps_0

Reputation: 438

AttributeError: 'tuple' object has no attribute 'train_dataloader'

I have a 3 file. In the datamodule file, I have created data and used the basic format of the PyTorch Lightning. In the linear_model I made a linear regression model based on this page. Finally, I have a train file, I am calling the model and trying to fit the data. But I am getting this error

GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
    train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
AttributeError: 'tuple' object has no attribute 'train_dataloader'

Sample datamodule file

class DataModuleClass(pl.LightningDataModule):
    def __init__(self):
        super().__init__()
        self.sigma = 5
        self.batch_size = 10
        self.prepare_data()
    
def prepare_data(self):
    x = np.random.uniform(0, 10, 10)
    e = np.random.normal(0, self.sigma, len(x))
    
    y = x + e

    X = np.transpose(np.array([x, e]))

    self.x_train_tensor = torch.from_numpy(X).float().to(device)
    self.y_train_tensor = torch.from_numpy(y).float().to(device)
    
    training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
    self.training_dataset = training_dataset

def setup(self):
    data = self.training_dataset
    self.train_data, self.val_data = random_split(data, [8, 2])
    return self.train_data, self.val_data
    
def train_dataloader(self):
    return DataLoader(self.train_data)

def val_dataloader(self):
    return DataLoader(self.val_data)

Sample training file

from . import datamodule, linear_model

model = linear_model.LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)

trainer = pl.Trainer()
trainer.fit(model, 
            train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
            val_dataloaders=datamodule.DataModuleClass().setup().val_dataloaders())

Let me know if you need more code or explanation.

Update (Based on the comment)

Now, I am getting the following error after removing self.prepare_data() from the __init__() of the DataModuleClass(), removed return self.train_data, self.val_data from setup(), and changed the test file to

data_module = datamodule.DataModuleClass()

trainer = pl.Trainer()
trainer.fit(model,data_module)

Error:

GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
    train_dataloader=datamodule.DataModuleClass().train_dataloader(),
  File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/datamodule.py", line 54, in train_dataloader
    return DataLoader(self.train_data)
AttributeError: 'DataModuleClass' object has no attribute 'train_data'

Upvotes: 2

Views: 1872

Answers (1)

KnowledgeGainer
KnowledgeGainer

Reputation: 1097

Most of the things were correct, except few things like:

def prepare_data(self):

This function was right except that it should not return anything.

Another thing was

def setup(self,stage=None):

Requires stage variable which can be set to a default value of none in case we don't want to switch between different test and train stage.

Putting everything together, here is the code:

from argparse import ArgumentParser
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import random_split, DataLoader, TensorDataset
import torch
from torch.autograd import Variable
from torchvision import transforms
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.optim.optimizer import Optimizer


class LinearRegression(pl.LightningModule):
    def __init__(
        self,
        input_dim: int = 2,
        output_dim: int = 1,
        bias: bool = True,
        learning_rate: float = 1e-4,
        optimizer: Optimizer = Adam,
        l1_strength: float = 0.0,
        l2_strength: float = 0.0
    ):
        super().__init__()
        self.save_hyperparameters()
        self.optimizer = optimizer

        self.linear = nn.Linear(in_features=self.hparams.input_dim, out_features=self.hparams.output_dim, bias=bias)

    def forward(self, x):
        y_hat = self.linear(x)
        return y_hat

    def training_step(self, batch, batch_idx):
        x, y = batch

        # flatten any input
        x = x.view(x.size(0), -1)

        y_hat = self(x)

        loss = F.mse_loss(y_hat, y, reduction='sum')

        # L1 regularizer
        if self.hparams.l1_strength > 0:
            l1_reg = sum(param.abs().sum() for param in self.parameters())
            loss += self.hparams.l1_strength * l1_reg

        # L2 regularizer
        if self.hparams.l2_strength > 0:
            l2_reg = sum(param.pow(2).sum() for param in self.parameters())
            loss += self.hparams.l2_strength * l2_reg

        loss /= x.size(0)

        tensorboard_logs = {'train_mse_loss': loss}
        progress_bar_metrics = tensorboard_logs
        return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        x = x.view(x.size(0), -1)
        y_hat = self(x)
        return {'val_loss': F.mse_loss(y_hat, y)}

    def validation_epoch_end(self, outputs):
        val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
        tensorboard_logs = {'val_mse_loss': val_loss}
        progress_bar_metrics = tensorboard_logs
        return {'val_loss': val_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}

    def configure_optimizers(self):
        return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)




np.random.seed(42)

device = 'cuda' if torch.cuda.is_available() else 'cpu'

class DataModuleClass(pl.LightningDataModule):
  def __init__(self):
      super().__init__()
      self.sigma = 5
      self.batch_size = 10
    
  def prepare_data(self):
      x = np.random.uniform(0, 10, 10)
      e = np.random.normal(0, self.sigma, len(x))
      
      y = x + e

      X = np.transpose(np.array([x, e]))

      self.x_train_tensor = torch.from_numpy(X).float().to(device)
      self.y_train_tensor = torch.from_numpy(y).float().to(device)
      
      training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
      self.training_dataset = training_dataset

  def setup(self,stage=None):
      data = self.training_dataset
      self.train_data, self.val_data = random_split(data, [8, 2])
      
  def train_dataloader(self):
      return DataLoader(self.train_data)

  def val_dataloader(self):
      return DataLoader(self.val_data)

model = LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)
trainer = pl.Trainer()
dummy = DataModuleClass()
trainer.fit(model,dummy)

Upvotes: 1

Related Questions