forked from ml-lab/MobilePose-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
eval.py
163 lines (133 loc) · 5.73 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# coding: utf-8
'''
File: eval.py
Project: MobilePose
File Created: Thursday, 8th March 2018 1:54:07 pm
Author: Yuliang Xiu ([email protected])
-----
Last Modified: Thursday, 8th March 2018 3:01:51 pm
Modified By: Yuliang Xiu ([email protected]>)
-----
Copyright 2018 - 2018 Shanghai Jiao Tong University, Machine Vision and Intelligence Group
'''
import warnings
warnings.filterwarnings('ignore')
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms, utils, models
from tqdm import tqdm
from skimage import io, transform
from math import ceil
import numpy as np
import torch
import csv
import os
import argparse
import time
from dataloader import *
from coco_utils import *
from networks import *
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
gpus = [0,1]
os.environ["CUDA_VISIBLE_DEVICES"]="0"
torch.backends.cudnn.enabled = True
print(torch.cuda.device_count())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MobilePose Demo')
parser.add_argument('--model', type=str, default="resnet")
args = parser.parse_args()
modeltype = args.model
# user defined parameters
filename = "final-aug.t7"
num_threads = 10
PATH_PREFIX = "./results/{}".format(modeltype)
full_name="./models/{}/{}".format(modeltype, filename)
# full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/mobilenetv2_224x224-robust.t7"
# full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/resnet18_227x227.t7"
# full_name = "/home/yuliang/code/MobilePose-pytorch/models/demo/mobilenet-best.t7"
# ROOT_DIR = "/home/yuliang/code/deeppose_tf/datasets/mpii"
ROOT_DIR = "../deeppose_tf/datasets/mpii"
if modeltype == 'resnet':
input_size = 227
elif modeltype == 'mobilenet':
input_size = 224
print("Loading testing dataset, wait...")
# load dataset
test_dataset = PoseDataset(csv_file=os.path.join(ROOT_DIR,'test_joints.csv'),
transform=transforms.Compose([
# Rescale((input_size, input_size)), # for resnet18 and mobilenet
Wrap((input_size,input_size)), # only for mobilenet-best
Expansion(),
ToTensor()
]))
test_dataset_size = len(test_dataset)
test_dataloader = DataLoader(test_dataset, batch_size=test_dataset_size,
shuffle=False, num_workers = num_threads)
# get all test data
all_test_data = {}
for i_batch, sample_batched in enumerate(tqdm(test_dataloader)):
all_test_data = sample_batched
def eval_coco(net_path, result_gt_json_path, result_pred_json_path):
"""
Example:
eval_coco('/home/yuliang/code/PoseFlow/checkpoint140.t7',
'result-gt-json.txt', 'result-pred-json.txt')
"""
# gpu mode
net = Net().cuda(device_id=gpus[0])
net = torch.load(net_path).cuda(device_id=gpus[0])
# cpu mode
# net = Net()
# net = torch.load(net_path, map_location=lambda storage, loc: storage)
## generate groundtruth json
total_size = len(all_test_data['image'])
all_coco_images_arr = []
all_coco_annotations_arr = []
transform_to_coco_gt(all_test_data['pose'], all_coco_images_arr, all_coco_annotations_arr)
coco = CocoData(all_coco_images_arr, all_coco_annotations_arr)
coco_str = coco.dumps()
result_gt_json = float2int(coco_str)
# save ground truth json to file
f = open(result_gt_json_path, "w")
f.write(result_gt_json)
f.close()
# generate predictioin json
total_size = len(all_test_data['image'])
all_coco_pred_annotations_arr = []
for i in tqdm(range(1, int(ceil(total_size / 100.0 + 1)))):
sample_data = {}
# gpu mode
sample_data['image'] = all_test_data['image'][100 * (i - 1) : min(100 * i, total_size)].cuda(device=gpus[0])
# cpu mode
# sample_data['image'] = all_test_data['image'][100 * (i - 1) : min(100 * i, total_size)]
# print('test dataset contains: %d'%(len(sample_data['image'])))
# t0 = time.time()
output = net(Variable(sample_data['image'],volatile=True))
# print('FPS is %f'%(1.0/((time.time()-t0)/len(sample_data['image']))))
transform_to_coco_pred(output, all_coco_pred_annotations_arr, 100 * (i - 1))
all_coco_pred_annotations_arr = [item._asdict() for item in all_coco_pred_annotations_arr]
result_pred_json = json.dumps(all_coco_pred_annotations_arr, cls=MyEncoder)
result_pred_json = float2int(result_pred_json)
# save result predict json to file
f = open(result_pred_json_path, "w")
f.write(result_pred_json)
f.close()
eval_coco(full_name, os.path.join(PATH_PREFIX, 'result-gt-json.txt'), os.path.join(PATH_PREFIX, 'result-pred-json.txt'))
# evaluation
annType = ['segm','bbox','keypoints']
annType = annType[2]
prefix = 'person_keypoints' if annType=='keypoints' else 'instances'
print('Running demo for *%s* results.'%(annType))
annFile = os.path.join(PATH_PREFIX, "result-gt-json.txt")
cocoGt=COCO(annFile)
resFile = os.path.join(PATH_PREFIX,"result-pred-json.txt")
cocoDt=cocoGt.loadRes(resFile)
imgIds=sorted(cocoGt.getImgIds())
cocoEval = COCOeval(cocoGt,cocoDt,annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()