IndexError: list index out of range in deep learning model evaluation

I am evaluating deep learning model based on 7 classes. I have declared all the classes and mentioned the names. After, constructing confusion matrix we obtain an error list index out of range.

import numpy as np
import torch
import torchvision
from torchvision import datasets, models, transforms
import torch.utils.data as data
import multiprocessing
from sklearn.metrics import confusion_matrix
import sys

# Paths for image directory and model
EVAL_DIR=sys.argv[1]
EVAL_MODEL='model.pth'

# Load the model for evaluation
model = torch.load(EVAL_MODEL)
model.eval()

# Configure batch size and nuber of cpu's
num_cpu = multiprocessing.cpu_count()
bs = 8

# Prepare the eval data loader
eval_transform=transforms.Compose([
        transforms.Resize(size=256),
        transforms.CenterCrop(size=224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])])

eval_dataset=datasets.ImageFolder(root=EVAL_DIR, transform=eval_transform)
eval_loader=data.DataLoader(eval_dataset, batch_size=bs, shuffle=True,
                            num_workers=num_cpu, pin_memory=True)

# Enable gpu mode, if cuda available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Number of classes and dataset-size
num_classes=len(eval_dataset.classes)
dsize=len(eval_dataset)

# Class label names
class_names=['1','2','10','13','14','21','24']

# Initialize the prediction and label lists
predlist=torch.zeros(0,dtype=torch.long, device='cpu')
lbllist=torch.zeros(0,dtype=torch.long, device='cpu')

# Evaluate the model accuracy on the dataset
correct = 0
total = 0
with torch.no_grad():
    for images, labels in eval_loader:
        images, labels = images.to(device), labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)

        total += labels.size(0)
        correct += (predicted == labels).sum().item()

        predlist=torch.cat([predlist,predicted.view(-1).cpu()])
        lbllist=torch.cat([lbllist,labels.view(-1).cpu()])

# Overall accuracy
overall_accuracy=100 * correct / total
print('Accuracy of the network on the {:d} test images: {:.2f}%'.format(dsize, 
    overall_accuracy))

# Confusion matrix
conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy())
print('Confusion Matrix')
print('-'*16)
print(conf_mat,'n')

# Per-class accuracy
class_accuracy=100*conf_mat.diagonal()/conf_mat.sum(1)
print('Per class accuracy')
print('-'*18)
for label,accuracy in zip(eval_dataset.classes, class_accuracy):
     #print(class_names)
     print(class_names[int(label)])
     class_name=class_names[int(label)]
     print('Accuracy of class %8s : %0.2f %%'%(class_name, accuracy))

'''
Sample run: python eval.py eval_ds
'''

Error

Traceback (most recent call last):
  File "eval.py", line 80, in <module>
    print(class_names[int(label)])
IndexError: list index out of range

Answer

There are three methods to solve this problem and verify your results that each class belongs to its particular accuracy

Method #1

results = zip(eval_dataset.classes, class_accuracy)
i = 0
for label, accuracy in results:
    class_name = class_names[i]
    print('Accuracy of class %8s : %0.2f %%' % (class_name, accuracy))
    i += 1

Method #2

results = zip(eval_dataset.classes, class_accuracy)
i = 0
for label, accuracy in results:
    ind = 0
    if label in class_names:
        class_name = label
        print('Accuracy of class %8s : %0.2f %%' % (class_name, accuracy))

Method #3

results = zip(eval_dataset.classes, class_accuracy)
for label, accuracy in results:
    ind = 0
    if label in class_names:
        ind = class_names.index(label)
        class_name = class_names[ind]
        print('Accuracy of class %8s : %0.2f %%' % (class_name, accuracy))

Leave a Reply

Your email address will not be published. Required fields are marked *