diff --git a/cifar.py b/cifar.py index fe7d07d..4916b4d 100644 --- a/cifar.py +++ b/cifar.py @@ -222,7 +222,7 @@ def train(trainloader, model, criterion, optimizer, epoch, use_cuda): data_time.update(time.time() - end) if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda(async=True) + inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output @@ -274,41 +274,42 @@ def test(testloader, model, criterion, epoch, use_cuda): end = time.time() bar = Bar('Processing', max=len(testloader)) - for batch_idx, (inputs, targets) in enumerate(testloader): - # measure data loading time - data_time.update(time.time() - end) - - if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda() - inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets) - - # compute output - outputs = model(inputs) - loss = criterion(outputs, targets) - - # measure accuracy and record loss - prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) - losses.update(loss.item(), inputs.size(0)) - top1.update(prec1.item(), inputs.size(0)) - top5.update(prec5.item(), inputs.size(0)) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - # plot progress - bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( - batch=batch_idx + 1, - size=len(testloader), - data=data_time.avg, - bt=batch_time.avg, - total=bar.elapsed_td, - eta=bar.eta_td, - loss=losses.avg, - top1=top1.avg, - top5=top5.avg, - ) - bar.next() + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + # measure data loading time + data_time.update(time.time() - end) + + if use_cuda: + inputs, targets = inputs.cuda(), targets.cuda() + inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) + + # compute output + outputs = model(inputs) + loss = criterion(outputs, targets) + + # measure accuracy and record loss + prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) + losses.update(loss.item(), inputs.size(0)) + top1.update(prec1.item(), inputs.size(0)) + top5.update(prec5.item(), inputs.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + # plot progress + bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( + batch=batch_idx + 1, + size=len(testloader), + data=data_time.avg, + bt=batch_time.avg, + total=bar.elapsed_td, + eta=bar.eta_td, + loss=losses.avg, + top1=top1.avg, + top5=top5.avg, + ) + bar.next() bar.finish() return (losses.avg, top1.avg) diff --git a/fashionmnist.py b/fashionmnist.py index 08f9fc7..369f7ca 100644 --- a/fashionmnist.py +++ b/fashionmnist.py @@ -221,7 +221,7 @@ def train(trainloader, model, criterion, optimizer, epoch, use_cuda): data_time.update(time.time() - end) if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda(async=True) + inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True) inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) # compute output @@ -273,41 +273,42 @@ def test(testloader, model, criterion, epoch, use_cuda): end = time.time() bar = Bar('Processing', max=len(testloader)) - for batch_idx, (inputs, targets) in enumerate(testloader): - # measure data loading time - data_time.update(time.time() - end) - - if use_cuda: - inputs, targets = inputs.cuda(), targets.cuda() - inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets) - - # compute output - outputs = model(inputs) - loss = criterion(outputs, targets) - - # measure accuracy and record loss - prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) - losses.update(loss.item(), inputs.size(0)) - top1.update(prec1.item(), inputs.size(0)) - top5.update(prec5.item(), inputs.size(0)) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - # plot progress - bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( - batch=batch_idx + 1, - size=len(testloader), - data=data_time.avg, - bt=batch_time.avg, - total=bar.elapsed_td, - eta=bar.eta_td, - loss=losses.avg, - top1=top1.avg, - top5=top5.avg, - ) - bar.next() + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + # measure data loading time + data_time.update(time.time() - end) + + if use_cuda: + inputs, targets = inputs.cuda(), targets.cuda() + inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets) + + # compute output + outputs = model(inputs) + loss = criterion(outputs, targets) + + # measure accuracy and record loss + prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5)) + losses.update(loss.item(), inputs.size(0)) + top1.update(prec1.item(), inputs.size(0)) + top5.update(prec5.item(), inputs.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + # plot progress + bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format( + batch=batch_idx + 1, + size=len(testloader), + data=data_time.avg, + bt=batch_time.avg, + total=bar.elapsed_td, + eta=bar.eta_td, + loss=losses.avg, + top1=top1.avg, + top5=top5.avg, + ) + bar.next() bar.finish() return (losses.avg, top1.avg)