Skip to content

Commit

Permalink
Match HOTA with official result (#186)
Browse files Browse the repository at this point in the history
* support computing hota metrics

* fix code style for flake8

* update docstring

* add unit test for hota metrics

* add overall computation for hota

* fix invalid denominator

* fix unconinious id when computing assa

* update README for hota metrics

* update README for hota metrics without for loop

* fix typo

* add detail instruction for hota
  • Loading branch information
Justin900429 authored May 27, 2024
1 parent 7210fcc commit b687013
Show file tree
Hide file tree
Showing 6 changed files with 365 additions and 38 deletions.
51 changes: 51 additions & 0 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ print(mh.list_metrics_markdown())
| pred_frequencies | `pd.Series` Total number of occurrences of individual predictions over all frames. |
| track_ratios | `pd.Series` Ratio of assigned to total appearance count per unique object id. |
| id_global_assignment | `dict` ID measures: Global min-cost assignment for ID measures. |
| deta_alpha | HOTA: Detection Accuracy (DetA) for a given threshold. |
| assa_alpha | HOTA: Association Accuracy (AssA) for a given threshold. |
| hota_alpha | HOTA: Higher Order Tracking Accuracy (HOTA) for a given threshold. |

<a name="MOTChallengeCompatibility"></a>

Expand Down Expand Up @@ -362,6 +365,54 @@ OVERALL 80.0% 80.0% 80.0% 80.0% 80.0% 4 2 2 0 2 2 1 1 50.0% 0.275
"""
```

#### [Underdeveloped] Computing HOTA metrics

Computing HOTA metrics is also possible. However, it cannot be used with the `Accumulator` class directly, as HOTA requires to computing a reweighting matrix from all the frames at the beginning. Here is an example of how to use it:

```python
import os
import numpy as np
import motmetrics as mm


def compute_motchallenge(dir_name):
# `gt.txt` and `test.txt` should be prepared in MOT15 format
df_gt = mm.io.loadtxt(os.path.join(dir_name, "gt.txt"))
df_test = mm.io.loadtxt(os.path.join(dir_name, "test.txt"))
# Require different thresholds for matching
th_list = np.arange(0.05, 0.99, 0.05)
res_list = mm.utils.compare_to_groundtruth_reweighting(df_gt, df_test, "iou", distth=th_list)
return res_list

# `data_dir` is the directory containing the gt.txt and test.txt files
acc = compute_motchallenge("data_dir")
mh = mm.metrics.create()

summary = mh.compute_many(
acc,
metrics=[
"deta_alpha",
"assa_alpha",
"hota_alpha",
],
generate_overall=True, # `Overall` is the average we need only
)
strsummary = mm.io.render_summary(
summary.iloc[[-1], :], # Use list to preserve `DataFrame` type
formatters=mh.formatters,
namemap={"hota_alpha": "HOTA", "assa_alpha": "ASSA", "deta_alpha": "DETA"},
)
print(strsummary)
"""
# data_dir=motmetrics/data/TUD-Campus
DETA ASSA HOTA
OVERALL 41.8% 36.9% 39.1%
# data_dir=motmetrics/data/TUD-Stadtmitte
DETA ASSA HOTA
OVERALL 39.2% 40.9% 39.8%
"""
```

### Computing distances

Up until this point we assumed the pairwise object/hypothesis distances to be known. Usually this is not the case. You are mostly given either rectangles or points (centroids) of related objects. To compute a distance matrix from them you can use `motmetrics.distance` module as shown below.
Expand Down
11 changes: 8 additions & 3 deletions motmetrics/distances.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def boxiou(a, b):
math_util.quiet_divide(i_vol, u_vol))


def iou_matrix(objs, hyps, max_iou=1.):
def iou_matrix(objs, hyps, max_iou=1., return_dist=True):
"""Computes 'intersection over union (IoU)' distance matrix between object and hypothesis rectangles.
The IoU is computed as
Expand All @@ -104,11 +104,14 @@ def iou_matrix(objs, hyps, max_iou=1.):
Maximum tolerable overlap distance. Object / hypothesis points
with larger distance are set to np.nan signalling do-not-pair. Defaults
to 0.5
return_dist : bool
If true, return distance matrix. If false, return similarity (IoU) matrix.
Returns
-------
C : NxK array
Distance matrix containing pairwise distances or np.nan.
if `return_dist` is False, then the matrix contains the pairwise IoU.
"""

if np.size(objs) == 0 or np.size(hyps) == 0:
Expand All @@ -119,5 +122,7 @@ def iou_matrix(objs, hyps, max_iou=1.):
assert objs.shape[1] == 4
assert hyps.shape[1] == 4
iou = boxiou(objs[:, None], hyps[None, :])
dist = 1 - iou
return np.where(dist > max_iou, np.nan, dist)
if return_dist:
dist = 1 - iou
return np.where(dist > max_iou, np.nan, dist)
return iou
86 changes: 82 additions & 4 deletions motmetrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,12 @@

# pylint: disable=redefined-outer-name

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function

from collections import OrderedDict
import inspect
import logging
import time
from collections import OrderedDict

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -495,6 +493,22 @@ def num_predictions(df, pred_frequencies):
simple_add_func.append(num_predictions)


def num_gt_ids(df):
"""Number of unique gt ids."""
return df.full["OId"].dropna().unique().shape[0]


simple_add_func.append(num_gt_ids)


def num_dt_ids(df):
"""Number of unique dt ids."""
return df.full["HId"].dropna().unique().shape[0]


simple_add_func.append(num_dt_ids)


def track_ratios(df, obj_frequencies):
"""Ratio of assigned to total appearance count per unique object id."""
tracked = df.noraw[df.noraw.Type != "MISS"]["OId"].value_counts()
Expand Down Expand Up @@ -597,6 +611,64 @@ def recall_m(partials, num_detections, num_objects):
return math_util.quiet_divide(num_detections, num_objects)


def deta_alpha(df, num_detections, num_objects, num_false_positives):
r"""DeTA under specific threshold $\alpha$
Source: https://jonathonluiten.medium.com/how-to-evaluate-tracking-with-the-hota-metrics-754036d183e1
"""
del df # unused
return math_util.quiet_divide(num_detections, max(1, num_objects + num_false_positives))


def deta_alpha_m(partials):
res = 0
for v in partials:
res += v["deta_alpha"]
return math_util.quiet_divide(res, len(partials))


def assa_alpha(df, num_detections, num_gt_ids, num_dt_ids):
r"""AssA under specific threshold $\alpha$
Source: https://github.com/JonathonLuiten/TrackEval/blob/12c8791b303e0a0b50f753af204249e622d0281a/trackeval/metrics/hota.py#L107-L108
"""
max_gt_ids = int(df.noraw.OId.max())
max_dt_ids = int(df.noraw.HId.max())

match_count_array = np.zeros((max_gt_ids, max_dt_ids))
gt_id_counts = np.zeros((max_gt_ids, 1))
tracker_id_counts = np.zeros((1, max_dt_ids))
for idx in range(len(df.noraw)):
oid, hid = df.noraw.iloc[idx, 1], df.noraw.iloc[idx, 2]
if df.noraw.iloc[idx, 0] in ["SWITCH", "MATCH"]:
match_count_array[int(oid) - 1, int(hid) - 1] += 1
if oid == oid: # check non nan
gt_id_counts[int(oid) - 1] += 1
if hid == hid:
tracker_id_counts[0, int(hid) - 1] += 1

ass_a = match_count_array / np.maximum(1, gt_id_counts + tracker_id_counts - match_count_array)
return math_util.quiet_divide((ass_a * match_count_array).sum(), max(1, num_detections))


def assa_alpha_m(partials):
res = 0
for v in partials:
res += v["assa_alpha"]
return math_util.quiet_divide(res, len(partials))


def hota_alpha(df, deta_alpha, assa_alpha):
r"""HOTA under specific threshold $\alpha$"""
del df
return (deta_alpha * assa_alpha) ** 0.5


def hota_alpha_m(partials):
res = 0
for v in partials:
res += v["hota_alpha"]
return math_util.quiet_divide(res, len(partials))


class DataFrameMap: # pylint: disable=too-few-public-methods
def __init__(self, full, raw, noraw, extra):
self.full = full
Expand Down Expand Up @@ -783,6 +855,8 @@ def create():
m.register(num_detections, formatter="{:d}".format)
m.register(num_objects, formatter="{:d}".format)
m.register(num_predictions, formatter="{:d}".format)
m.register(num_gt_ids, formatter="{:d}".format)
m.register(num_dt_ids, formatter="{:d}".format)
m.register(num_unique_objects, formatter="{:d}".format)
m.register(track_ratios)
m.register(mostly_tracked, formatter="{:d}".format)
Expand All @@ -802,6 +876,10 @@ def create():
m.register(idr, formatter="{:.1%}".format)
m.register(idf1, formatter="{:.1%}".format)

m.register(deta_alpha, formatter="{:.1%}".format)
m.register(assa_alpha, formatter="{:.1%}".format)
m.register(hota_alpha, formatter="{:.1%}".format)

return m


Expand Down
64 changes: 36 additions & 28 deletions motmetrics/mot.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _append_to_events(self, typestr, oid, hid, distance):
self._events['HId'].append(hid)
self._events['D'].append(distance)

def update(self, oids, hids, dists, frameid=None, vf=''):
def update(self, oids, hids, dists, frameid=None, vf='', similartiy_matrix=None, th=None):
"""Updates the accumulator with frame specific objects/detections.
This method generates events based on the following algorithm [1]:
Expand Down Expand Up @@ -202,6 +202,12 @@ def update(self, oids, hids, dists, frameid=None, vf=''):
self._append_to_indices(frameid, next(eid))
self._append_to_events('RAW', np.nan, np.nan, np.nan)

# Postcompute the distance matrix if necessary. (e.g., HOTA)
cost_for_matching = dists.copy()
if similartiy_matrix is not None and th is not None:
dists = 1 - similartiy_matrix
dists = np.where(similartiy_matrix < th - np.finfo("float").eps, np.nan, dists)

# There must be at least one RAW event per object and hypothesis.
# Record all finite distances as RAW events.
valid_i, valid_j = np.where(np.isfinite(dists))
Expand All @@ -224,34 +230,36 @@ def update(self, oids, hids, dists, frameid=None, vf=''):

if oids.size * hids.size > 0:
# 1. Try to re-establish tracks from correspondences in last update
for i in range(oids.shape[0]):
# No need to check oids_masked[i] here.
if not (oids[i] in self.m and self.last_match[oids[i]] == self.last_update_frameid):
continue

hprev = self.m[oids[i]]
j, = np.where(~hids_masked & (hids == hprev))
if j.shape[0] == 0:
continue
j = j[0]
# ignore this if post processing is performed (e.g., HOTA)
if similartiy_matrix is None or th is None:
for i in range(oids.shape[0]):
# No need to check oids_masked[i] here.
if not (oids[i] in self.m and self.last_match[oids[i]] == self.last_update_frameid):
continue

hprev = self.m[oids[i]]
j, = np.where(~hids_masked & (hids == hprev))
if j.shape[0] == 0:
continue
j = j[0]

if np.isfinite(dists[i, j]):
o = oids[i]
h = hids[j]
oids_masked[i] = True
hids_masked[j] = True
self.m[oids[i]] = hids[j]

if np.isfinite(dists[i, j]):
o = oids[i]
h = hids[j]
oids_masked[i] = True
hids_masked[j] = True
self.m[oids[i]] = hids[j]

self._append_to_indices(frameid, next(eid))
self._append_to_events('MATCH', oids[i], hids[j], dists[i, j])
self.last_match[o] = frameid
self.hypHistory[h] = frameid
self._append_to_indices(frameid, next(eid))
self._append_to_events('MATCH', oids[i], hids[j], dists[i, j])
self.last_match[o] = frameid
self.hypHistory[h] = frameid

# 2. Try to remaining objects/hypotheses
dists[oids_masked, :] = np.nan
dists[:, hids_masked] = np.nan

rids, cids = linear_sum_assignment(dists)
rids, cids = linear_sum_assignment(cost_for_matching)

for i, j in zip(rids, cids):
if not np.isfinite(dists[i, j]):
Expand All @@ -265,10 +273,10 @@ def update(self, oids, hids, dists, frameid=None, vf=''):
# self.m[o] != h and
# abs(frameid - self.last_occurrence[o]) <= self.max_switch_time)
switch_condition = (
o in self.m and
self.m[o] != h and
o in self.last_occurrence and # Ensure the object ID 'o' is initialized in last_occurrence
abs(frameid - self.last_occurrence[o]) <= self.max_switch_time
o in self.m and
self.m[o] != h and
o in self.last_occurrence and # Ensure the object ID 'o' is initialized in last_occurrence
abs(frameid - self.last_occurrence[o]) <= self.max_switch_time
)
is_switch = switch_condition
######################################################################
Expand Down Expand Up @@ -471,7 +479,7 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd
copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore')
infos['hid_map'] = hid_map

r = pd.concat([r,copy])
r = pd.concat([r, copy])
mapping_infos.append(infos)

if return_mappings:
Expand Down
46 changes: 46 additions & 0 deletions motmetrics/tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,3 +537,49 @@ def my_motp(df: mm.metrics.DataFrameMap):
)

print(summary)


def test_hota():
TUD_golden_ans = { # From TrackEval
"TUD-Campus": {"hota": 0.3913974378451139, "deta": 0.418047030142763, "assa": 0.36912068120832836},
"TUD-Stadtmitte": {"hota": 0.3978490169927877, "deta": 0.3922675723693166, "assa": 0.4088407518112996}
}

DATA_DIR = "motmetrics/data"

def compute_motchallenge(dname):
df_gt = mm.io.loadtxt(os.path.join(dname, "gt.txt"))
df_test = mm.io.loadtxt(os.path.join(dname, "test.txt"))
th_list = np.arange(0.05, 0.99, 0.05)
res_list = mm.utils.compare_to_groundtruth_reweighting(df_gt, df_test, "iou", distth=th_list)
return res_list

accs = [compute_motchallenge(os.path.join(DATA_DIR, d)) for d in TUD_golden_ans.keys()]
mh = mm.metrics.create()

for dataset_idx, dname in enumerate(TUD_golden_ans.keys()):
deta = []
assa = []
hota = []
for alpha_idx in range(len(accs[dataset_idx])):
summary = mh.compute_many(
[accs[dataset_idx][alpha_idx]],
metrics=[
"deta_alpha",
"assa_alpha",
"hota_alpha",
],
names=[dname],
generate_overall=False,
)
deta.append(float(summary["deta_alpha"].iloc[0]))
assa.append(float(summary["assa_alpha"].iloc[0]))
hota.append(float(summary["hota_alpha"].iloc[0]))

deta = sum(deta) / len(deta)
assa = sum(assa) / len(assa)
hota = sum(hota) / len(hota)

assert deta == approx(TUD_golden_ans[dname]["deta"])
assert assa == approx(TUD_golden_ans[dname]["assa"])
assert hota == approx(TUD_golden_ans[dname]["hota"])
Loading

0 comments on commit b687013

Please sign in to comment.