1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
| import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from tqdm import tqdm
class Trainer: def __init__(self, model, train_loader, val_loader, criterion, optimizer, device, epochs): self.model = model.to(device) self.train_loader = train_loader self.val_loader = val_loader self.criterion = criterion self.optimizer = optimizer self.device = device self.epochs = epochs self.best_val_loss = float('inf') self.history = {'train': [], 'val': []}
def train_epoch(self): self.model.train() total_loss = 0 correct = 0 total = 0
pbar = tqdm(self.train_loader, desc='Training') for batch_idx, (data, target) in enumerate(pbar): data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad() output = self.model(data) loss = self.criterion(output, target)
loss.backward() self.optimizer.step()
total_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item()
pbar.set_postfix({ 'loss': f'{loss.item():.4f}', 'acc': f'{100.*correct/total:.2f}%' })
return total_loss / len(self.train_loader), 100. * correct / total
def validate(self): self.model.eval() total_loss = 0 correct = 0 total = 0
with torch.no_grad(): for data, target in self.val_loader: data, target = data.to(self.device), target.to(self.device) output = self.model(data) loss = self.criterion(output, target)
total_loss += loss.item() _, predicted = output.max(1) total += target.size(0) correct += predicted.eq(target).sum().item()
return total_loss / len(self.val_loader), 100. * correct / total
def train(self): for epoch in range(1, self.epochs + 1): print(f'\nEpoch {epoch}/{self.epochs}')
train_loss, train_acc = self.train_epoch() val_loss, val_acc = self.validate()
self.history['train'].append({'loss': train_loss, 'acc': train_acc}) self.history['val'].append({'loss': val_loss, 'acc': val_acc})
print(f'Train Loss: {train_loss:.4f}, Acc: {train_acc:.2f}%') print(f'Val Loss: {val_loss:.4f}, Acc: {val_acc:.2f}%')
if val_loss < self.best_val_loss: self.best_val_loss = val_loss self.save_checkpoint('best_model.pth')
|