From e9a08bfd06a754b6c4dc987684b473726e475557 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Fri, 26 Oct 2018 19:27:26 +0900 Subject: [PATCH 01/16] initial commit for tracking benchmark --- .../benchmark/evaluate_tracking.py | 799 ++++++++++++++++++ .../benchmark/parse_tracklet_xml.py | 366 ++++++++ .../benchmark/rotated_rect.py | 29 + .../include/imm_ukf_pda.h | 11 + .../launch/benchmark.launch | 133 +++ .../nodes/imm_ukf_pda/imm_ukf_pda.cpp | 81 +- 6 files changed, 1418 insertions(+), 1 deletion(-) create mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py create mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py create mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py create mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py new file mode 100644 index 00000000000..3cca68b4f98 --- /dev/null +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -0,0 +1,799 @@ +import sys,os,copy,math +from munkres import Munkres +from rotated_rect import RotatedRect +from parse_tracklet_xml import dump_frames_text_from_tracklets +from collections import defaultdict +from time import gmtime, strftime +try: + from ordereddict import OrderedDict # can be installed using pip +except: + from collections import OrderedDict # only included from python 2.7 on +from tqdm import tqdm + +class tData: + """ + Utility class to load data. + """ + + def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\ + obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\ + X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1): + """ + Constructor, initializes the object given the parameters. + """ + + # init object data + self.frame = frame + self.track_id = track_id + self.obj_type = obj_type + self.truncation = truncation + self.occlusion = occlusion + self.obs_angle = obs_angle + self.x1 = x1 + self.y1 = y1 + self.x2 = x2 + self.y2 = y2 + self.w = w + self.h = h + self.l = l + self.X = X + self.Y = Y + self.Z = Z + self.yaw = yaw + self.score = score + self.ignored = False + self.valid = False + self.tracker = -1 + + def __str__(self): + """ + Print read data. + """ + + attrs = vars(self) + return '\n'.join("%s: %s" % item for item in attrs.items()) + + +class trackingEvaluation(object): + """ tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall) + MOTA - Multi-object tracking accuracy in [0,100] + MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D) + MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches) + + id-switches - number of id switches + fragments - number of fragmentations + + MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories + + recall - recall = percentage of detected targets + precision - precision = percentage of correctly detected targets + FAR - number of false alarms per frame + falsepositives - number of false positives (FP) + missed - number of missed targets (FN) + """ + + def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_truncation = 0, min_height = 25, max_occlusion = 2): + # data and parameter + self.n_frames = data_num + self.result_data = -1 + + # statistics and numbers for evaluation + self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives + self.n_igt = 0 # number of ignored ground truth detections + self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE + self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE + self.n_gt_trajectories = 0 + self.n_gt_seq = [] + self.n_tr = 0 # number of tracker detections minus ignored tracker detections + self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE + self.n_itr = 0 # number of ignored tracker detections + self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE + # self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored + # self.n_tr_trajectories = 0 + self.n_tr_seq = [] + self.MOTA = 0 + self.MOTP = 0 + self.MOTAL = 0 + self.MODA = 0 + self.MODP = 0 + self.MODP_t = [] + self.recall = 0 + self.precision = 0 + self.F1 = 0 + self.FAR = 0 + self.total_cost = 0 + self.itp = 0 # number of ignored true positives + self.tp = 0 # number of true positives including ignored true positives! + self.fn = 0 # number of false negatives WITHOUT ignored false negatives + self.ifn = 0 # number of ignored false negatives + self.fp = 0 # number of false positives + # a bit tricky, the number of ignored false negatives and ignored true positives + # is subtracted, but if both tracker detection and ground truth detection + # are ignored this number is added again to avoid double counting + self.fps = [] # above PER SEQUENCE + self.mme = 0 + self.fragments = 0 + self.id_switches = 0 + self.MT = 0 + self.PT = 0 + self.ML = 0 + + self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics + self.max_truncation = max_truncation # maximum truncation of an object for evaluation + self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation + self.min_height = min_height # minimum height of an object for evaluation + self.n_sample_points = 500 + + # this should be enough to hold all groundtruth trajectories + # is expanded if necessary and reduced in any case + + def loadTrackedData(self, result_file_path): + """ + Helper function to load tracker data. + """ + + try: + if(os.path.exists(result_file_path)): + if not self._loadData(result_file_path, loading_groundtruth=False): + return False + else: + print("Error: There is no result data file") + raise + except IOError: + return False + return True + + def loadGroundtruth(self, gt_file_path): + """ + Helper function to load ground truth. + """ + + try: + if(os.path.exists(gt_file_path)): + self._loadData(gt_file_path, loading_groundtruth=True) + else: + print("Error: There is no groundtruth file") + raise + except IOError: + return False + return True + + def _loadData(self, file_path, min_score=-1000, loading_groundtruth=False): + """ + Generic loader for ground truth and tracking data. + Use loadGroundtruth() or loadTracker() to load this data. + Loads detections in KITTI format from textfiles. + """ + # construct objectDetections object to hold detection data + t_data = tData() + data = [] + eval_3d = True + eval_2d = False + + n_trajectories = 0 + + i = 0 + f = open(file_path, "r") + + f_data = [[] for x in range(self.n_frames)] # current set has only 1059 entries, sufficient length is checked anyway + ids = [] + n_in_seq = 0 + id_frame_cache = [] + + for line in f: + # KITTI tracking benchmark data format: + # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry) + line = line.strip() + fields = line.split(" ") + # print(fields[0]) + + # get fields from table + t_data.frame = int(float(fields[0])) # frame + t_data.track_id = int(float(fields[1])) # id + t_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...] + t_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2] + t_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2] + t_data.obs_angle = float(fields[5]) # observation angle [rad] + t_data.x1 = float(fields[6]) # left [px] + t_data.y1 = float(fields[7]) # top [px] + t_data.x2 = float(fields[8]) # right [px] + t_data.y2 = float(fields[9]) # bottom [px] + t_data.h = float(fields[10]) # height [m] + t_data.w = float(fields[11]) # width [m] + t_data.l = float(fields[12]) # length [m] + t_data.X = float(fields[13]) # X [m] + t_data.Y = float(fields[14]) # Y [m] + t_data.Z = float(fields[15]) # Z [m] + t_data.yaw = float(fields[16]) # yaw angle [rad] + + # do not consider objects marked as invalid + # do not consider objects marked as invalid + if t_data.track_id is -1 and t_data.obj_type != "DontCare": + continue + idx = t_data.frame + + if t_data.frame >= self.n_frames: + continue + + try: + id_frame = (t_data.frame,t_data.track_id) + if id_frame in id_frame_cache and not loading_groundtruth: + print("track ids are not unique for frame %d" % (t_data.frame)) + print("track id %d occured at least twice for this frame" % t_data.track_id) + print("Exiting...") + #continue # this allows to evaluate non-unique result files + return False + id_frame_cache.append(id_frame) + f_data[t_data.frame].append(copy.copy(t_data)) + except: + raise + + if t_data.track_id not in ids and t_data.obj_type!="DontCare": + ids.append(t_data.track_id) + n_trajectories +=1 + f.close() + + + if not loading_groundtruth: + self.n_tr_trajectories=n_trajectories + self.result_data = f_data + else: + # split ground truth and DontCare areas + self.dcareas = [] + self.groundtruth = [] + s_g, s_dc = [],[] + for f in range(len(f_data)): + frame_gts = f_data[f] + g,dc = [],[] + for gg in frame_gts: + if gg.obj_type=="dontcare": + dc.append(gg) + else: + g.append(gg) + s_g.append(g) + s_dc.append(dc) + self.dcareas = s_dc + self.groundtruth = s_g + self.n_gt_trajectories=n_trajectories + return True + + def createEvalDir(self): + """ + Creates directory to store evaluation results and data for visualization. + """ + + self.eval_dir = "./results/" + if not os.path.exists(self.eval_dir): + print ("create directory:", self.eval_dir) + os.makedirs(self.eval_dir) + print ("done") + + def compute3rdPartyMetrics(self): + """ + Computes the metrics defined in + - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics + MOTA, MOTAL, MOTP + - Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows + MT/PT/ML + """ + # construct Munkres object for Hungarian Method association + hm = Munkres() + max_cost = 1e9 + + # go through all frames and associate ground truth and tracker results + # groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections + seq_gt = self.groundtruth + seq_dc = self.dcareas # don't care areas + seq_result_data = self.result_data + seq_trajectories = defaultdict(list) + seq_ignored = defaultdict(list) + + # statistics over the current sequence, check the corresponding + # variable comments in __init__ to get their meaning + seqtp = 0 + seqitp = 0 + seqfn = 0 + seqifn = 0 + seqfp = 0 + seqigt = 0 + seqitr = 0 + + last_frame_ids = [[],[]] + + n_gts = 0 + n_trs = 0 + + + for i_frame in tqdm(range(len(seq_gt))): + frame_gts = seq_gt[i_frame] + frame_dcs = seq_dc[i_frame] + + frame_results = seq_result_data[i_frame] + # counting total number of ground truth and tracker objects + self.n_gt += len(frame_gts) + self.n_tr += len(frame_results) + + n_gts += len(frame_gts) + n_trs += len(frame_results) + + # use hungarian method to associate, using boxoverlap 0..1 as cost + # build cost matrix + cost_matrix = [] + frame_ids = [[],[]] + # loop over ground truth objects in one frame + for gt in frame_gts: + # print("location ", gt.X, gt.Y) + # save current ids + frame_ids[0].append(gt.track_id) + frame_ids[1].append(-1) + gt.tracker = -1 + gt.id_switch = 0 + gt.fragmentation = 0 + cost_row = [] + # loop over tracked objects in one frame + # print("gt", gt.X, gt.Y, gt.l, gt.w, gt.yaw) + for result in frame_results: + # overlap == 1 is cost ==0 + # RotatedRect(cx, cy, l, w, angle) + # todo might be wrong: euclidean cluster messed up calculating w, l + # Better implementation if switching w and l based on box's quartenion + # and self.min_overlap might be too big + r1 = RotatedRect(gt.X, gt.Y, gt.l, gt.w, gt.yaw) + r2 = RotatedRect(result.X, result.Y, result.l, result.w, result.yaw) + iou = r1.intersection_over_union(r2) + # print("iou ",iou) + cost = 1- iou + # gating for boxoverlap + if cost<=self.min_overlap: + cost_row.append(cost) + else: + cost_row.append(max_cost) # = 1e9 + # return + cost_matrix.append(cost_row) + # all ground truth trajectories are initially not associated + # extend groundtruth trajectories lists (merge lists) + seq_trajectories[gt.track_id].append(-1) + seq_ignored[gt.track_id].append(False) + + if len(frame_gts) is 0: + cost_matrix=[[]] + # associate + association_matrix = hm.compute(cost_matrix) + + # tmp variables for sanity checks and MODP computation + tmptp = 0 + tmpfp = 0 + tmpfn = 0 + tmpc = 0 # this will sum up the overlaps for all true positives + tmpcs = [0]*len(frame_gts) # this will save the overlaps for all true positives + # the reason is that some true positives might be ignored + # later such that the corrsponding overlaps can + # be subtracted from tmpc for MODP computation + + # mapping for tracker ids and ground truth ids + for row,col in association_matrix: + # apply gating on boxoverlap + c = cost_matrix[row][col] + if c < max_cost: + frame_gts[row].tracker = frame_results[col].track_id + frame_ids[1][row] = frame_results[col].track_id + frame_results[col].valid = True + frame_gts[row].distance = c + self.total_cost += 1-c + tmpc += 1-c + tmpcs[row] = 1-c + seq_trajectories[frame_gts[row].track_id][-1] = frame_results[col].track_id + + # true positives are only valid associations + self.tp += 1 + tmptp += 1 + else: + frame_gts[row].tracker = -1 + self.fn += 1 + tmpfn += 1 + + # associate tracker and DontCare areas + # ignore tracker in neighboring classes + nignoredtracker = 0 # number of ignored tracker detections + ignoredtrackers = dict() # will associate the track_id with -1 + # if it is not ignored and 1 if it is + # ignored; + # this is used to avoid double counting ignored + # cases, see the next loop + + # check for ignored FN/TP (truncation or neighboring object class) + nignoredfn = 0 # the number of ignored false negatives + nignoredtp = 0 # the number of ignored true positives + # nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive + # which is ignored but where the associated tracker + # detection has already been ignored + + gi = 0 + for gt in frame_gts: + if gt.tracker < 0: + if gt.occlusion>self.max_occlusion or gt.truncation>self.max_truncation: + seq_ignored[gt.track_id][-1] = True + gt.ignored = True + nignoredfn += 1 + + elif gt.tracker>=0: + if gt.occlusion>self.max_occlusion or gt.truncation>self.max_truncation: + seq_ignored[gt.track_id][-1] = True + gt.ignored = True + nignoredtp += 1 + + # for computing MODP, the overlaps from ignored detections + # are subtracted + tmpc -= tmpcs[gi] + gi += 1 + + # the below might be confusion, check the comments in __init__ + # to see what the individual statistics represent + + # correct TP by number of ignored TP due to truncation + # ignored TP are shown as tracked in visualization + tmptp -= nignoredtp + + # count the number of ignored true positives + self.itp += nignoredtp + + # adjust the number of ground truth objects considered + self.n_gt -= (nignoredfn + nignoredtp) + + # count the number of ignored ground truth objects + self.n_igt += nignoredfn + nignoredtp + + # count the number of ignored tracker objects + self.n_itr += nignoredtracker + + # false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes + tmpfn += len(frame_gts)-len(association_matrix)-nignoredfn + self.fn += len(frame_gts)-len(association_matrix)-nignoredfn + self.ifn += nignoredfn + + # false positives = tracker bboxes - associated tracker bboxes + # mismatches (mme_t) + tmpfp += len(frame_results) - tmptp - nignoredtracker - nignoredtp + self.fp += len(frame_results) - tmptp - nignoredtracker - nignoredtp + + # update sequence data + seqtp += tmptp + seqitp += nignoredtp + seqfp += tmpfp + seqfn += tmpfn + seqifn += nignoredfn + seqigt += nignoredfn + nignoredtp + seqitr += nignoredtracker + + # sanity checks + # - the number of true positives minues ignored true positives + # should be greater or equal to 0 + # - the number of false negatives should be greater or equal to 0 + # - the number of false positives needs to be greater or equal to 0 + # otherwise ignored detections might be counted double + # - the number of counted true positives (plus ignored ones) + # and the number of counted false negatives (plus ignored ones) + # should match the total number of ground truth objects + # - the number of counted true positives (plus ignored ones) + # and the number of counted false positives + # plus the number of ignored tracker detections should + # match the total number of tracker detections; note that + # nignoredpairs is subtracted here to avoid double counting + # of ignored detection sin nignoredtp and nignoredtracker + if tmptp<0: + print (tmptp, nignoredtp) + raise NameError("Something went wrong! TP is negative") + if tmpfn<0: + print (tmpfn, len(frame_gts), len(association_matrix), nignoredfn, nignoredpairs) + raise NameError("Something went wrong! FN is negative") + if tmpfp<0: + print (tmpfp, len(frame_results), tmptp, nignoredtracker, nignoredtp, nignoredpairs) + raise NameError("Something went wrong! FP is negative") + if tmptp + tmpfn is not len(frame_gts)-nignoredfn-nignoredtp: + print ("seqidx", seq_idx) + print ("frame ", f) + print ("TP ", tmptp) + print ("FN ", tmpfn) + print ("FP ", tmpfp) + print ("nGT ", len(frame_gts)) + print ("nAss ", len(association_matrix)) + print ("ign GT", nignoredfn) + print ("ign TP", nignoredtp) + raise NameError("Something went wrong! nGroundtruth is not TP+FN") + if tmptp+tmpfp+nignoredtp+nignoredtracker is not len(frame_results): + print (seq_idx, f, len(frame_results), tmptp, tmpfp) + print (len(association_matrix), association_matrix) + raise NameError("Something went wrong! nTracker is not TP+FP") + + # loop over ground truth track_id + # check for id switches or fragmentations + for i,gt_id in enumerate(frame_ids[0]): + if gt_id in last_frame_ids[0]: + idx = last_frame_ids[0].index(gt_id) + tid = frame_ids[1][i] + lid = last_frame_ids[1][idx] + if tid != lid and lid != -1 and tid != -1: + if frame_gts[i].truncation=0 else 0 + lgt = 0 if ign_g[0] else 1 + for f in range(1,len(g)): + if ign_g[f]: + last_id = -1 + continue + lgt+=1 + if last_id != g[f] and last_id != -1 and g[f] != -1 and g[f-1] != -1: + tmpId_switches += 1 + self.id_switches += 1 + if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1: + tmpFragments += 1 + self.fragments += 1 + if g[f] != -1: + tracked += 1 + last_id = g[f] + # handle last frame; tracked state is handled in for loop (g[f]!=-1) + if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]: + tmpFragments += 1 + self.fragments += 1 + + # compute MT/PT/ML + tracking_ratio = tracked / float(len(g) - sum(ign_g)) + if tracking_ratio > 0.8: + tmpMT += 1 + self.MT += 1 + elif tracking_ratio < 0.2: + tmpML += 1 + self.ML += 1 + else: # 0.2 <= tracking_ratio <= 0.8 + tmpPT += 1 + self.PT += 1 + + if (self.n_gt_trajectories-n_ignored_tr_total)==0: + self.MT = 0. + self.PT = 0. + self.ML = 0. + else: + self.MT /= float(self.n_gt_trajectories-n_ignored_tr_total) + self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total) + self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total) + + # precision/recall etc. + if (self.fp+self.tp)==0 or (self.tp+self.fn)==0: + self.recall = 0. + self.precision = 0. + else: + self.recall = self.tp/float(self.tp+self.fn) + self.precision = self.tp/float(self.fp+self.tp) + if (self.recall+self.precision)==0: + self.F1 = 0. + else: + self.F1 = 2.*(self.precision*self.recall)/(self.precision+self.recall) + if self.n_frames==0: + self.FAR = "n/a" + else: + self.FAR = self.fp/float(self.n_frames) + + # compute CLEARMOT + if self.n_gt==0: + self.MOTA = -float("inf") + self.MODA = -float("inf") + else: + self.MOTA = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt) + self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt) + if self.tp==0: + self.MOTP = float("inf") + else: + self.MOTP = self.total_cost / float(self.tp) + if self.n_gt!=0: + if self.id_switches==0: + self.MOTAL = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt) + else: + self.MOTAL = 1 - (self.fn + self.fp + math.log10(self.id_switches))/float(self.n_gt) + else: + self.MOTAL = -float("inf") + if self.n_frames==0: + self.MODP = "n/a" + else: + self.MODP = sum(self.MODP_t)/float(self.n_frames) + return True + + def printEntry(self, key, val,width=(70,10)): + """ + Pretty print an entry in a table fashion. + """ + + s_out = key.ljust(width[0]) + if type(val)==int: + s = "%%%dd" % width[1] + s_out += s % val + elif type(val)==float: + s = "%%%df" % (width[1]) + s_out += s % val + else: + s_out += ("%s"%val).rjust(width[1]) + return s_out + + def createSummary(self): + """ + Generate and mail a summary of the results. + If mailpy.py is present, the summary is instead printed. + """ + + summary = "" + + summary += "tracking evaluation summary".center(80,"=") + "\n" + summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" + summary += self.printEntry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" + summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" + summary += self.printEntry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" + summary += self.printEntry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" + summary += "\n" + summary += self.printEntry("Recall", self.recall) + "\n" + summary += self.printEntry("Precision", self.precision) + "\n" + summary += self.printEntry("F1", self.F1) + "\n" + summary += self.printEntry("False Alarm Rate", self.FAR) + "\n" + summary += "\n" + summary += self.printEntry("Mostly Tracked", self.MT) + "\n" + summary += self.printEntry("Partly Tracked", self.PT) + "\n" + summary += self.printEntry("Mostly Lost", self.ML) + "\n" + summary += "\n" + summary += self.printEntry("True Positives", self.tp) + "\n" + summary += self.printEntry("Ignored True Positives", self.itp) + "\n" + summary += self.printEntry("False Positives", self.fp) + "\n" + summary += self.printEntry("False Negatives", self.fn) + "\n" + summary += self.printEntry("Ignored False Negatives", self.ifn) + "\n" + summary += self.printEntry("Missed Targets", self.fn) + "\n" + summary += self.printEntry("ID-switches", self.id_switches) + "\n" + summary += self.printEntry("Fragmentations", self.fragments) + "\n" + summary += "\n" + summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n" + summary += self.printEntry("Ignored Ground Truth Objects", self.n_igt) + "\n" + summary += self.printEntry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n" + summary += "\n" + summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n" + summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n" + summary += self.printEntry("Tracker Trajectories", self.n_tr_trajectories) + "\n" + summary += "="*80 + + return summary + + def saveToStats(self): + """ + Save the statistics in a whitespace separate file. + """ + + # create pretty summary + summary = self.createSummary() + + # mail or print the summary. + print(summary) + # mail.msg(summary) + + # write summary to file summary_cls.txt + filename = os.path.join("./results", "summary.txt" ) + dump = open(filename, "w+") + # print>>dump, summary + print(summary, end="", file=dump) + dump.close() + + +def evaluate(velo_data_num, result_file_path, gt_file_path): + """ + Entry point for evaluation, will load the data and start evaluation for + CAR and PEDESTRIAN if available. + """ + + # start evaluation and instanciated eval object + e = trackingEvaluation(velo_data_num) + # load tracker data and check provided classes + try: + if not e.loadTrackedData(result_file_path): + "failed to load tracked data" + return + print("Loading Results - Success") + print("Size of result data ", len(e.result_data)) + except: + print("Caught exception while loading result data.") + return + # load groundtruth data for this class + if not e.loadGroundtruth(gt_file_path): + raise ValueError("Ground truth not found.") + print("Loading Groundtruth - Success") + print("Size of ground truth data ", len(e.groundtruth)) + # sanity checks + if len(e.groundtruth) is not len(e.result_data): + print("The uploaded data does not provide results for every sequence.") + return False + print("Loaded %d Sequences." % len(e.groundtruth)) + print("Start Evaluation...") + # create needed directories, evaluate and save stats + try: + e.createEvalDir() + except: + print("Caught exception while creating results.") + if e.compute3rdPartyMetrics(): + print("Finished evaluation") + e.saveToStats() + else: + print("There seem to be no true positives or false positives at all in the submitted data.") + + # finish + print("Thank you for participating in our benchmark!") + return True + +# def convertRawDataGTtoTrackingGT(): +# + +import datetime +######################################################################### +# entry point of evaluation script +if __name__ == "__main__": + # evaluate results and send notification email to user + # base_dir = '/home/kosuke/hdd/kitti' + # date = '2011_09_26' + # drive = '0005' + # partial_tracklet_path = '{}/{}_drive_{}_sync/tracklet_labels.xml'.format(date, date, drive) + # partial_velo_path = '{}/{}_drive_{}_sync/velodyne_points/data'.format(date, date, drive) + # tracklet_path = os.path.join(base_dir, partial_tracklet_path) + # velo_dir = os.path.join(base_dir, partial_velo_path) + # gt_file_path = './results/gt_frame.txt' + # result_file_path = './results/result.txt' + # velo_data_num = len(os.listdir(velo_dir)) + # dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) + # success = evaluate(velo_data_num, result_file_path, gt_file_path) + + + # print (datetime.datetime.now()) + if datetime.datetime.now().minute < 10: + minute_str = str(0) + str(datetime.datetime.now().minute) + else: + minute_str = str(datetime.datetime.now().minute) + if datetime.datetime.now().second < 10: + second_str = str(0) + str(datetime.datetime.now().second) + else: + second_str = str(datetime.datetime.now().second) + file_name = str(datetime.datetime.now().year) + "_" + \ + str(datetime.datetime.now().month) + \ + str(datetime.datetime.now().day) + "_" + \ + str(datetime.datetime.now().hour) + \ + minute_str + second_str + print (file_name) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py new file mode 100644 index 00000000000..7f2d5a93d85 --- /dev/null +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py @@ -0,0 +1,366 @@ +#!/usr/bin/env python +""" +parse XML files containing tracklet info for kitti data base (raw data section) +(http://cvlibs.net/datasets/kitti/raw_data.php) + +No guarantees that this code is correct, usage is at your own risk! + +created by Christian Herdtweck, Max Planck Institute for Biological Cybernetics + (christian.herdtweck@tuebingen.mpg.de) +updated by Alex Staravoitau + +requires numpy! + +example usage: + import parseTrackletXML as xmlParser + kittiDir = '/path/to/kitti/data' + drive = '2011_09_26_drive_0001' + xmlParser.example(kittiDir, drive) +or simply on command line: + python parseTrackletXML.py +""" + +# Version History: +# 20/3/17 Alex Staravoitau: updated for Python 3, cleaned up docstring comments, fixed tabulation; +# 4/7/12 Christian Herdtweck: seems to work with a few random test xml tracklet files; +# converts file contents to ElementTree and then to list of Tracklet objects; +# Tracklet objects have str and iter functions +# 5/7/12 ch: added constants for state, occlusion, truncation and added consistency checks +# 30/1/14 ch: create example function from example code + +from sys import argv as cmdLineArgs +from xml.etree.ElementTree import ElementTree +import numpy as np +from warnings import warn +import os + +STATE_UNSET = 0 +STATE_INTERP = 1 +STATE_LABELED = 2 +stateFromText = {'0':STATE_UNSET, '1':STATE_INTERP, '2':STATE_LABELED} + +OCC_UNSET = 255 # -1 as uint8 +OCC_VISIBLE = 0 +OCC_PARTLY = 1 +OCC_FULLY = 2 +occFromText = {'-1':OCC_UNSET, '0':OCC_VISIBLE, '1':OCC_PARTLY, '2':OCC_FULLY} + +TRUNC_UNSET = 255 # -1 as uint8, but in xml files the value '99' is used! +TRUNC_IN_IMAGE = 0 +TRUNC_TRUNCATED = 1 +TRUNC_OUT_IMAGE = 2 +TRUNC_BEHIND_IMAGE = 3 +truncFromText = {'99':TRUNC_UNSET, '0':TRUNC_IN_IMAGE, '1':TRUNC_TRUNCATED, '2':TRUNC_OUT_IMAGE, '3': TRUNC_BEHIND_IMAGE} + + +class Tracklet(object): + """ + Representation an annotated object track + + Tracklets are created in function parseXML and can most conveniently used as follows: + + for trackletObj in parseXML(trackletFile): + for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj: + ... your code here ... + #end: for all frames + #end: for all tracklets + + absoluteFrameNumber is in range [firstFrame, firstFrame+nFrames[ + amtOcclusion and amtBorders could be None + + You can of course also directly access the fields objType (string), size (len-3 ndarray), firstFrame/nFrames (int), + trans/rots (nFrames x 3 float ndarrays), states/truncs (len-nFrames uint8 ndarrays), occs (nFrames x 2 uint8 ndarray), + and for some tracklets amtOccs (nFrames x 2 float ndarray) and amtBorders (nFrames x 3 float ndarray). The last two + can be None if the xml file did not include these fields in poses + """ + + objectType = None + size = None # len-3 float array: (height, width, length) + firstFrame = None + trans = None # n x 3 float array (x,y,z) + rots = None # n x 3 float array (x,y,z) + states = None # len-n uint8 array of states + occs = None # n x 2 uint8 array (occlusion, occlusion_kf) + truncs = None # len-n uint8 array of truncation + amtOccs = None # None or (n x 2) float array (amt_occlusion, amt_occlusion_kf) + amtBorders = None # None (n x 3) float array (amt_border_l / _r / _kf) + nFrames = None + + def __init__(self): + """ + Creates Tracklet with no info set + """ + self.size = np.nan*np.ones(3, dtype=float) + + def __str__(self): + """ + Returns human-readable string representation of tracklet object + + called implicitly in + print trackletObj + or in + text = str(trackletObj) + """ + return '[Tracklet over {0} frames for {1}]'.format(self.nFrames, self.objectType) + + def __iter__(self): + """ + Returns an iterator that yields tuple of all the available data for each frame + + called whenever code iterates over a tracklet object, e.g. in + for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj: + ...do something ... + or + trackDataIter = iter(trackletObj) + """ + if self.amtOccs is None: + return zip(self.trans, self.rots, self.states, self.occs, self.truncs, + itertools.repeat(None), itertools.repeat(None), range(self.firstFrame, self.firstFrame+self.nFrames)) + else: + return zip(self.trans, self.rots, self.states, self.occs, self.truncs, + self.amtOccs, self.amtBorders, range(self.firstFrame, self.firstFrame+self.nFrames)) +#end: class Tracklet + + +def parseXML(trackletFile): + """ + Parses tracklet xml file and convert results to list of Tracklet objects + + :param trackletFile: name of a tracklet xml file + :returns: list of Tracklet objects read from xml file + """ + + # convert tracklet XML data to a tree structure + eTree = ElementTree() + print('Parsing tracklet file', trackletFile) + with open(trackletFile) as f: + eTree.parse(f) + + # now convert output to list of Tracklet objects + trackletsElem = eTree.find('tracklets') + tracklets = [] + trackletIdx = 0 + nTracklets = None + for trackletElem in trackletsElem: + #print 'track:', trackletElem.tag + if trackletElem.tag == 'count': + nTracklets = int(trackletElem.text) + print('File contains', nTracklets, 'tracklets') + elif trackletElem.tag == 'item_version': + pass + elif trackletElem.tag == 'item': + #print 'tracklet {0} of {1}'.format(trackletIdx, nTracklets) + # a tracklet + newTrack = Tracklet() + isFinished = False + hasAmt = False + frameIdx = None + for info in trackletElem: + #print 'trackInfo:', info.tag + if isFinished: + raise ValueError('more info on element after finished!') + if info.tag == 'objectType': + newTrack.objectType = info.text + elif info.tag == 'h': + newTrack.size[0] = float(info.text) + elif info.tag == 'w': + newTrack.size[1] = float(info.text) + elif info.tag == 'l': + newTrack.size[2] = float(info.text) + elif info.tag == 'first_frame': + newTrack.firstFrame = int(info.text) + elif info.tag == 'poses': + # this info is the possibly long list of poses + for pose in info: + #print 'trackInfoPose:', pose.tag + if pose.tag == 'count': # this should come before the others + if newTrack.nFrames is not None: + raise ValueError('there are several pose lists for a single track!') + elif frameIdx is not None: + raise ValueError('?!') + newTrack.nFrames = int(pose.text) + newTrack.trans = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) + newTrack.rots = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) + newTrack.states = np.nan * np.ones(newTrack.nFrames, dtype='uint8') + newTrack.occs = np.nan * np.ones((newTrack.nFrames, 2), dtype='uint8') + newTrack.truncs = np.nan * np.ones(newTrack.nFrames, dtype='uint8') + newTrack.amtOccs = np.nan * np.ones((newTrack.nFrames, 2), dtype=float) + newTrack.amtBorders = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) + frameIdx = 0 + elif pose.tag == 'item_version': + pass + elif pose.tag == 'item': + # pose in one frame + if frameIdx is None: + raise ValueError('pose item came before number of poses!') + for poseInfo in pose: + #print 'trackInfoPoseInfo:', poseInfo.tag + if poseInfo.tag == 'tx': + newTrack.trans[frameIdx, 0] = float(poseInfo.text) + elif poseInfo.tag == 'ty': + newTrack.trans[frameIdx, 1] = float(poseInfo.text) + elif poseInfo.tag == 'tz': + newTrack.trans[frameIdx, 2] = float(poseInfo.text) + elif poseInfo.tag == 'rx': + newTrack.rots[frameIdx, 0] = float(poseInfo.text) + elif poseInfo.tag == 'ry': + newTrack.rots[frameIdx, 1] = float(poseInfo.text) + elif poseInfo.tag == 'rz': + newTrack.rots[frameIdx, 2] = float(poseInfo.text) + elif poseInfo.tag == 'state': + newTrack.states[frameIdx] = stateFromText[poseInfo.text] + elif poseInfo.tag == 'occlusion': + newTrack.occs[frameIdx, 0] = occFromText[poseInfo.text] + elif poseInfo.tag == 'occlusion_kf': + newTrack.occs[frameIdx, 1] = occFromText[poseInfo.text] + elif poseInfo.tag == 'truncation': + newTrack.truncs[frameIdx] = truncFromText[poseInfo.text] + elif poseInfo.tag == 'amt_occlusion': + newTrack.amtOccs[frameIdx,0] = float(poseInfo.text) + hasAmt = True + elif poseInfo.tag == 'amt_occlusion_kf': + newTrack.amtOccs[frameIdx,1] = float(poseInfo.text) + hasAmt = True + elif poseInfo.tag == 'amt_border_l': + newTrack.amtBorders[frameIdx,0] = float(poseInfo.text) + hasAmt = True + elif poseInfo.tag == 'amt_border_r': + newTrack.amtBorders[frameIdx,1] = float(poseInfo.text) + hasAmt = True + elif poseInfo.tag == 'amt_border_kf': + newTrack.amtBorders[frameIdx,2] = float(poseInfo.text) + hasAmt = True + else: + raise ValueError('unexpected tag in poses item: {0}!'.format(poseInfo.tag)) + frameIdx += 1 + else: + raise ValueError('unexpected pose info: {0}!'.format(pose.tag)) + elif info.tag == 'finished': + isFinished = True + else: + raise ValueError('unexpected tag in tracklets: {0}!'.format(info.tag)) + #end: for all fields in current tracklet + + # some final consistency checks on new tracklet + if not isFinished: + warn('tracklet {0} was not finished!'.format(trackletIdx)) + if newTrack.nFrames is None: + warn('tracklet {0} contains no information!'.format(trackletIdx)) + elif frameIdx != newTrack.nFrames: + warn('tracklet {0} is supposed to have {1} frames, but perser found {1}!'.format(trackletIdx, newTrack.nFrames, frameIdx)) + if np.abs(newTrack.rots[:,:2]).sum() > 1e-16: + warn('track contains rotation other than yaw!') + + # if amtOccs / amtBorders are not set, set them to None + if not hasAmt: + newTrack.amtOccs = None + newTrack.amtBorders = None + + # add new tracklet to list + tracklets.append(newTrack) + trackletIdx += 1 + + else: + raise ValueError('unexpected tracklet info') + #end: for tracklet list items + + print('Loaded', trackletIdx, 'tracklets.') + + # final consistency check + if trackletIdx != nTracklets: + warn('according to xml information the file has {0} tracklets, but parser found {1}!'.format(nTracklets, trackletIdx)) + + return tracklets +#end: function parseXML + +def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): + """ + Loads dataset labels also referred to as tracklets, saving them individually for each frame. + + Parameters + ---------- + n_frames : Number of frames in the dataset. + xml_path : Path to the tracklets XML. + + Returns + ------- + Tuple of dictionaries with integer keys corresponding to absolute frame numbers and arrays as values. First array + contains coordinates of bounding box vertices for each object in the frame, and the second array contains objects + types as strings. + """ + tracklets = parseXML(xml_path) + + frame_num = {} #1 + frame_track_id = {} #1 + frame_obj_type = {} #1 + frame_truncated = {} #1 + frame_occluded = {} #1 + # frame_alpha = {} // #1 + # frame_bbox = {} #4 + frame_dimensions = {} #3 + frame_location = {} #3 + frame_rotation_yaw = {} #1 + for i in range(n_frames): + frame_num[i] = [] + frame_track_id[i] = [] + frame_obj_type[i] = [] + frame_truncated[i] = [] + frame_occluded[i] = [] + frame_dimensions[i] = [] + frame_location[i] = [] + frame_rotation_yaw[i] = [] + + # loop over tracklets + for i, tracklet in enumerate(tracklets): + # this part is inspired by kitti object development kit matlab code: computeBox3D + h, w, l = tracklet.size + # loop over all data in tracklet + for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet: + # determine if object is in the image; otherwise continue + # if truncation not in (TRUNC_IN_IMAGE, TRUNC_TRUNCATED): + # continue + yaw = rotation[2] # other rotations are supposedly 0 + assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!' + + # concate data + frame_num[absoluteFrameNumber] = frame_num[absoluteFrameNumber] + [absoluteFrameNumber] + frame_track_id[absoluteFrameNumber] = frame_track_id[absoluteFrameNumber] + [i] + frame_obj_type[absoluteFrameNumber] = frame_obj_type[absoluteFrameNumber] + [tracklet.objectType] + frame_truncated[absoluteFrameNumber] = frame_truncated[absoluteFrameNumber] + [truncation] + frame_occluded[absoluteFrameNumber] = frame_occluded[absoluteFrameNumber] + [occlusion[0]] + frame_dimensions[absoluteFrameNumber] = frame_dimensions[absoluteFrameNumber] + [np.array([l, w, h])] + frame_location[absoluteFrameNumber] = frame_location[absoluteFrameNumber] + [np.array(translation)] + frame_rotation_yaw[absoluteFrameNumber] = frame_rotation_yaw[absoluteFrameNumber] + [yaw] + + gt_text_file = os.path.join(gt_file_path) + file = open(gt_text_file, 'w') + for i_frame in range(n_frames): + for i_object in range(len(frame_num[i_frame])): + file.write(str(frame_num[i_frame][i_object])) + file.write(' ') + file.write(str(frame_track_id[i_frame][i_object])) + file.write(' ') + file.write(frame_obj_type[i_frame][i_object]) + file.write(' ') + file.write(str(frame_truncated[i_frame][i_object])) + file.write(' ') + file.write(str(frame_occluded[i_frame][i_object])) + file.write(' ') + file.write('-10') + file.write(' ') + file.write('-1 -1 -1 -1') + file.write(' ') + file.write(str(frame_dimensions[i_frame][i_object][0])) + file.write(' ') + file.write(str(frame_dimensions[i_frame][i_object][1])) + file.write(' ') + file.write(str(frame_dimensions[i_frame][i_object][2])) + file.write(' ') + file.write(str(frame_location[i_frame][i_object][0])) + file.write(' ') + file.write(str(frame_location[i_frame][i_object][1])) + file.write(' ') + file.write(str(frame_location[i_frame][i_object][2])) + file.write(' ') + file.write(str(frame_rotation_yaw[i_frame][i_object])) + file.write('\n') diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py new file mode 100644 index 00000000000..92b5d30db2e --- /dev/null +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py @@ -0,0 +1,29 @@ +import shapely.geometry +import shapely.affinity + +class RotatedRect: + def __init__(self, cx, cy, l, w, angle): + self.cx = cx + self.cy = cy + self.l = l + self.w = w + self.angle = angle + + def get_contour(self): + l = self.l + w = self.w + #shapely.geometry.geo.box(minx, miny, maxx, maxy, ccw=True) + c = shapely.geometry.box(-l/2.0, -w/2.0, l/2.0, w/2.0) + rc = shapely.affinity.rotate(c, self.angle) + return shapely.affinity.translate(rc, self.cx, self.cy) + + def intersection(self, other): + return self.get_contour().intersection(other.get_contour()) + + def union(self, other): + return self.get_contour().union(other.get_contour()) + + def intersection_over_union(self, other): + intersection_area = self.intersection(other).area + union_area = self.union(other).area + return intersection_area/union_area diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h index 344e818abfd..95b1f3de400 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h @@ -33,6 +33,8 @@ #include +#include + #include #include #include @@ -83,6 +85,13 @@ class ImmUkfPda // whether if publish debug ros markers bool is_debug_; + // whether if benchmarking tracking result + bool is_benchmark_; + int frame_count_; + + // for benchmark + std::string result_file_path_; + // prevent explode param for ukf double prevent_explosion_thres_; @@ -150,6 +159,8 @@ class ImmUkfPda void pubDebugRosMarker(const autoware_msgs::DetectedObjectArray& input); + void dumpResultText(autoware_msgs::DetectedObjectArray& detected_objects); + void tracker(const autoware_msgs::DetectedObjectArray& transformed_input, jsk_recognition_msgs::BoundingBoxArray& jskbboxes_output, autoware_msgs::DetectedObjectArray& detected_objects_output); diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch new file mode 100644 index 00000000000..31b52e27c71 --- /dev/null +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index a5c7aaae14f..3fa75bae850 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -37,7 +37,8 @@ ImmUkfPda::ImmUkfPda() : target_id_(0) , // assign unique ukf_id_ to each tracking targets - init_(false) + init_(false), + frame_count_(0) { ros::NodeHandle private_nh_("~"); private_nh_.param("pointcloud_frame", pointcloud_frame_, "velodyne"); @@ -51,6 +52,52 @@ ImmUkfPda::ImmUkfPda() private_nh_.param("prevent_explosion_thres", prevent_explosion_thres_, 1000); private_nh_.param("use_sukf", use_sukf_, false); private_nh_.param("is_debug", is_debug_, false); + private_nh_.param("is_benchmark", is_benchmark_, false); + + // std::string kitti_data_dir; + // private_nh_.getParam("kitti_data_dir", kitti_data_dir); + + if(is_benchmark_) + { + //TODO use rosparam insteasd of harcoded path + std::string kitti_data_dir = "/home/kosuke/hdd/kitti/2011_09_26/2011_09_26_drive_0005_sync/"; + + //TODO: make function fot get current time file path and think about when to use that funciton + // benchmarked time + time_t t = time(nullptr); + const tm* lt = localtime(&t); + + std::stringstream yyyy_mmdd_hhmmss; + yyyy_mmdd_hhmmss<<"20"; + yyyy_mmdd_hhmmss<tm_year-100; + yyyy_mmdd_hhmmss<<"_"; + yyyy_mmdd_hhmmss<tm_mon+1; + yyyy_mmdd_hhmmss<tm_mday; + yyyy_mmdd_hhmmss<<"_"; + yyyy_mmdd_hhmmss<tm_hour; + if(lt->tm_min < 10) + { + yyyy_mmdd_hhmmss<<"0"; + yyyy_mmdd_hhmmss<tm_min; + } + else + { + yyyy_mmdd_hhmmss<tm_min; + } + if(lt->tm_sec < 10) + { + yyyy_mmdd_hhmmss<<"0"; + yyyy_mmdd_hhmmss<tm_sec; + } + else + { + yyyy_mmdd_hhmmss<tm_sec; + } + + result_file_path_ = kitti_data_dir +yyyy_mmdd_hhmmss.str() + ".txt"; + result_file_path_ = kitti_data_dir + "benchmark_results.txt"; + remove(result_file_path_.c_str()); + } } void ImmUkfPda::run() @@ -80,6 +127,11 @@ void ImmUkfPda::callback(const autoware_msgs::DetectedObjectArray& input) transformPoseToLocal(jskbboxes_output, detected_objects_output); pub_jskbbox_array_.publish(jskbboxes_output); pub_object_array_.publish(detected_objects_output); + + if(is_benchmark_) + { + dumpResultText(detected_objects_output); + } } void ImmUkfPda::relayJskbbox(const autoware_msgs::DetectedObjectArray& input, @@ -752,6 +804,33 @@ void ImmUkfPda::pubDebugRosMarker(const autoware_msgs::DetectedObjectArray& inpu pub_texts_array_.publish(texts_markers); } +void ImmUkfPda::dumpResultText(autoware_msgs::DetectedObjectArray& detected_objects) +{ + std::ofstream outputfile(result_file_path_, std::ofstream::out | std::ofstream::app); + for(size_t i = 0; i < detected_objects.objects.size(); i++) + { + tf::Quaternion q(detected_objects.objects[i].pose.orientation.x, detected_objects.objects[i].pose.orientation.y, + detected_objects.objects[i].pose.orientation.z, detected_objects.objects[i].pose.orientation.w); + double roll, pitch, yaw; + tf::Matrix3x3(q).getRPY(roll, pitch, yaw); + outputfile << std::to_string(frame_count_) <<" " + << std::to_string(detected_objects.objects[i].id) <<" " + << "Unknown" <<" " + << "-1" <<" " + << "-1" <<" " + << "-10" <<" " + << "-1 -1 -1 -1" <<" " + << std::to_string(detected_objects.objects[i].dimensions.x) <<" " + << std::to_string(detected_objects.objects[i].dimensions.y) <<" " + << "-1" <<" " + << std::to_string(detected_objects.objects[i].pose.position.x)<<" " + << std::to_string(detected_objects.objects[i].pose.position.y)<<" " + << "-1" <<" " + << std::to_string(yaw) <<"\n"; + } + frame_count_ ++; +} + void ImmUkfPda::tracker(const autoware_msgs::DetectedObjectArray& input, jsk_recognition_msgs::BoundingBoxArray& jskbboxes_output, autoware_msgs::DetectedObjectArray& detected_objects_output) From ce0b2e50e5fd89dfc9a9adb4dc4bfaf5708d635b Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Sun, 28 Oct 2018 18:25:05 +0900 Subject: [PATCH 02/16] Make benchmark script work by loading kitti dir --- .../benchmark/evaluate_tracking.py | 69 +++++++++++-------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 3cca68b4f98..75d7a0c6a46 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -9,6 +9,8 @@ except: from collections import OrderedDict # only included from python 2.7 on from tqdm import tqdm +from shutil import copyfile +import datetime class tData: """ @@ -257,12 +259,12 @@ def _loadData(self, file_path, min_score=-1000, loading_groundtruth=False): self.n_gt_trajectories=n_trajectories return True - def createEvalDir(self): + def createEvalDir(self, benchmark_dir): """ Creates directory to store evaluation results and data for visualization. """ - self.eval_dir = "./results/" + self.eval_dir = benchmark_dir if not os.path.exists(self.eval_dir): print ("create directory:", self.eval_dir) os.makedirs(self.eval_dir) @@ -709,14 +711,14 @@ def saveToStats(self): # mail.msg(summary) # write summary to file summary_cls.txt - filename = os.path.join("./results", "summary.txt" ) + filename = os.path.join(self.eval_dir, "summary.txt" ) dump = open(filename, "w+") # print>>dump, summary print(summary, end="", file=dump) dump.close() -def evaluate(velo_data_num, result_file_path, gt_file_path): +def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): """ Entry point for evaluation, will load the data and start evaluation for CAR and PEDESTRIAN if available. @@ -747,7 +749,7 @@ def evaluate(velo_data_num, result_file_path, gt_file_path): print("Start Evaluation...") # create needed directories, evaluate and save stats try: - e.createEvalDir() + e.createEvalDir(benchmark_dir) except: print("Caught exception while creating results.") if e.compute3rdPartyMetrics(): @@ -760,29 +762,13 @@ def evaluate(velo_data_num, result_file_path, gt_file_path): print("Thank you for participating in our benchmark!") return True -# def convertRawDataGTtoTrackingGT(): -# +# def makeBench(): + -import datetime ######################################################################### # entry point of evaluation script if __name__ == "__main__": - # evaluate results and send notification email to user - # base_dir = '/home/kosuke/hdd/kitti' - # date = '2011_09_26' - # drive = '0005' - # partial_tracklet_path = '{}/{}_drive_{}_sync/tracklet_labels.xml'.format(date, date, drive) - # partial_velo_path = '{}/{}_drive_{}_sync/velodyne_points/data'.format(date, date, drive) - # tracklet_path = os.path.join(base_dir, partial_tracklet_path) - # velo_dir = os.path.join(base_dir, partial_velo_path) - # gt_file_path = './results/gt_frame.txt' - # result_file_path = './results/result.txt' - # velo_data_num = len(os.listdir(velo_dir)) - # dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) - # success = evaluate(velo_data_num, result_file_path, gt_file_path) - - - # print (datetime.datetime.now()) + if datetime.datetime.now().minute < 10: minute_str = str(0) + str(datetime.datetime.now().minute) else: @@ -791,9 +777,32 @@ def evaluate(velo_data_num, result_file_path, gt_file_path): second_str = str(0) + str(datetime.datetime.now().second) else: second_str = str(datetime.datetime.now().second) - file_name = str(datetime.datetime.now().year) + "_" + \ - str(datetime.datetime.now().month) + \ - str(datetime.datetime.now().day) + "_" + \ - str(datetime.datetime.now().hour) + \ - minute_str + second_str - print (file_name) + time_file_name = str(datetime.datetime.now().year) + "_" + \ + str(datetime.datetime.now().month) + \ + str(datetime.datetime.now().day) + "_" + \ + str(datetime.datetime.now().hour) + \ + minute_str + second_str + benchmark_dir_name = "benchmark_" + time_file_name + print (benchmark_dir_name) + + # TODO:python argument + base_dir = "/home/kosuke/hdd/kitti/2011_09_26/2011_09_26_drive_0005_sync" + # copy benchmark_results.txt to `benchmark_dir_name`/benchmark_results.txt + benchmark_dir = os.path.join(base_dir, benchmark_dir_name) + os.makedirs(benchmark_dir) + result_file_name = "benchmark_results.txt" + result_file_path = os.path.join(base_dir, result_file_name) + result_file_in_benchmark_dir = os.path.join(benchmark_dir, result_file_name) + copyfile(result_file_path, result_file_in_benchmark_dir) + + print (result_file_path) + print (result_file_in_benchmark_dir) + + tracklet_file_name = "tracklet_labels.xml" + partial_velo_path = "velodyne_points/data" + tracklet_path = os.path.join(base_dir, tracklet_file_name) + gt_file_path = os.path.join(base_dir, "gt_frame.txt") + velo_dir = os.path.join(base_dir, partial_velo_path) + velo_data_num = len(os.listdir(velo_dir)) + dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) + success = evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir) From 1099b8caa4371e87a0274f32cc5c02db0f12dd42 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 15:47:20 +0900 Subject: [PATCH 03/16] Refactor codes --- .../benchmark/evaluate_tracking.py | 165 +++++++++--------- .../benchmark/parse_tracklet_xml.py | 37 ---- .../benchmark/{rotated_rect.py => rect.py} | 2 +- 3 files changed, 82 insertions(+), 122 deletions(-) rename ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/{rotated_rect.py => rect.py} (97%) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 75d7a0c6a46..3cb768ed623 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -1,7 +1,5 @@ import sys,os,copy,math from munkres import Munkres -from rotated_rect import RotatedRect -from parse_tracklet_xml import dump_frames_text_from_tracklets from collections import defaultdict from time import gmtime, strftime try: @@ -12,6 +10,9 @@ from shutil import copyfile import datetime +from rect import Rect +from parse_tracklet_xml import dump_frames_text_from_tracklets + class tData: """ Utility class to load data. @@ -56,7 +57,7 @@ def __str__(self): return '\n'.join("%s: %s" % item for item in attrs.items()) -class trackingEvaluation(object): +class TrackingEvaluation(object): """ tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall) MOTA - Multi-object tracking accuracy in [0,100] MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D) @@ -129,14 +130,14 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca # this should be enough to hold all groundtruth trajectories # is expanded if necessary and reduced in any case - def loadTrackedData(self, result_file_path): + def load_tracked_data(self, result_file_path): """ Helper function to load tracker data. """ try: if(os.path.exists(result_file_path)): - if not self._loadData(result_file_path, loading_groundtruth=False): + if not self._load_data(result_file_path, loading_groundtruth=False): return False else: print("Error: There is no result data file") @@ -145,14 +146,14 @@ def loadTrackedData(self, result_file_path): return False return True - def loadGroundtruth(self, gt_file_path): + def load_ground_truth(self, gt_file_path): """ Helper function to load ground truth. """ try: if(os.path.exists(gt_file_path)): - self._loadData(gt_file_path, loading_groundtruth=True) + self._load_data(gt_file_path, loading_groundtruth=True) else: print("Error: There is no groundtruth file") raise @@ -160,10 +161,10 @@ def loadGroundtruth(self, gt_file_path): return False return True - def _loadData(self, file_path, min_score=-1000, loading_groundtruth=False): + def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): """ Generic loader for ground truth and tracking data. - Use loadGroundtruth() or loadTracker() to load this data. + Use load_ground_truth() or load_tracked_data() to load this data. Loads detections in KITTI format from textfiles. """ # construct objectDetections object to hold detection data @@ -259,7 +260,7 @@ def _loadData(self, file_path, min_score=-1000, loading_groundtruth=False): self.n_gt_trajectories=n_trajectories return True - def createEvalDir(self, benchmark_dir): + def create_eval_dir(self, benchmark_dir): """ Creates directory to store evaluation results and data for visualization. """ @@ -270,7 +271,7 @@ def createEvalDir(self, benchmark_dir): os.makedirs(self.eval_dir) print ("done") - def compute3rdPartyMetrics(self): + def compute_3rd_party_metrics(self): """ Computes the metrics defined in - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics @@ -324,7 +325,6 @@ def compute3rdPartyMetrics(self): frame_ids = [[],[]] # loop over ground truth objects in one frame for gt in frame_gts: - # print("location ", gt.X, gt.Y) # save current ids frame_ids[0].append(gt.track_id) frame_ids[1].append(-1) @@ -333,17 +333,15 @@ def compute3rdPartyMetrics(self): gt.fragmentation = 0 cost_row = [] # loop over tracked objects in one frame - # print("gt", gt.X, gt.Y, gt.l, gt.w, gt.yaw) for result in frame_results: # overlap == 1 is cost ==0 - # RotatedRect(cx, cy, l, w, angle) + # Rect(cx, cy, l, w, angle) # todo might be wrong: euclidean cluster messed up calculating w, l # Better implementation if switching w and l based on box's quartenion # and self.min_overlap might be too big - r1 = RotatedRect(gt.X, gt.Y, gt.l, gt.w, gt.yaw) - r2 = RotatedRect(result.X, result.Y, result.l, result.w, result.yaw) + r1 = Rect(gt.X, gt.Y, gt.l, gt.w, gt.yaw) + r2 = Rect(result.X, result.Y, result.l, result.w, result.yaw) iou = r1.intersection_over_union(r2) - # print("iou ",iou) cost = 1- iou # gating for boxoverlap if cost<=self.min_overlap: @@ -638,7 +636,7 @@ def compute3rdPartyMetrics(self): self.MODP = sum(self.MODP_t)/float(self.n_frames) return True - def printEntry(self, key, val,width=(70,10)): + def _print_entry(self, key, val,width=(70,10)): """ Pretty print an entry in a table fashion. """ @@ -654,7 +652,7 @@ def printEntry(self, key, val,width=(70,10)): s_out += ("%s"%val).rjust(width[1]) return s_out - def createSummary(self): + def create_summary(self): """ Generate and mail a summary of the results. If mailpy.py is present, the summary is instead printed. @@ -663,52 +661,51 @@ def createSummary(self): summary = "" summary += "tracking evaluation summary".center(80,"=") + "\n" - summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" - summary += self.printEntry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" - summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" - summary += self.printEntry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" - summary += self.printEntry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" + summary += self._print_entry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" + summary += self._print_entry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" + summary += self._print_entry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" + summary += self._print_entry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" + summary += self._print_entry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" summary += "\n" - summary += self.printEntry("Recall", self.recall) + "\n" - summary += self.printEntry("Precision", self.precision) + "\n" - summary += self.printEntry("F1", self.F1) + "\n" - summary += self.printEntry("False Alarm Rate", self.FAR) + "\n" + summary += self._print_entry("Recall", self.recall) + "\n" + summary += self._print_entry("Precision", self.precision) + "\n" + summary += self._print_entry("F1", self.F1) + "\n" + summary += self._print_entry("False Alarm Rate", self.FAR) + "\n" summary += "\n" - summary += self.printEntry("Mostly Tracked", self.MT) + "\n" - summary += self.printEntry("Partly Tracked", self.PT) + "\n" - summary += self.printEntry("Mostly Lost", self.ML) + "\n" + summary += self._print_entry("Mostly Tracked", self.MT) + "\n" + summary += self._print_entry("Partly Tracked", self.PT) + "\n" + summary += self._print_entry("Mostly Lost", self.ML) + "\n" summary += "\n" - summary += self.printEntry("True Positives", self.tp) + "\n" - summary += self.printEntry("Ignored True Positives", self.itp) + "\n" - summary += self.printEntry("False Positives", self.fp) + "\n" - summary += self.printEntry("False Negatives", self.fn) + "\n" - summary += self.printEntry("Ignored False Negatives", self.ifn) + "\n" - summary += self.printEntry("Missed Targets", self.fn) + "\n" - summary += self.printEntry("ID-switches", self.id_switches) + "\n" - summary += self.printEntry("Fragmentations", self.fragments) + "\n" + summary += self._print_entry("True Positives", self.tp) + "\n" + summary += self._print_entry("Ignored True Positives", self.itp) + "\n" + summary += self._print_entry("False Positives", self.fp) + "\n" + summary += self._print_entry("False Negatives", self.fn) + "\n" + summary += self._print_entry("Ignored False Negatives", self.ifn) + "\n" + summary += self._print_entry("Missed Targets", self.fn) + "\n" + summary += self._print_entry("ID-switches", self.id_switches) + "\n" + summary += self._print_entry("Fragmentations", self.fragments) + "\n" summary += "\n" - summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n" - summary += self.printEntry("Ignored Ground Truth Objects", self.n_igt) + "\n" - summary += self.printEntry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n" + summary += self._print_entry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n" + summary += self._print_entry("Ignored Ground Truth Objects", self.n_igt) + "\n" + summary += self._print_entry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n" summary += "\n" - summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n" - summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n" - summary += self.printEntry("Tracker Trajectories", self.n_tr_trajectories) + "\n" + summary += self._print_entry("Tracker Objects (Total)", self.n_tr) + "\n" + summary += self._print_entry("Ignored Tracker Objects", self.n_itr) + "\n" + summary += self._print_entry("Tracker Trajectories", self.n_tr_trajectories) + "\n" summary += "="*80 return summary - def saveToStats(self): + def save_to_stats(self): """ Save the statistics in a whitespace separate file. """ # create pretty summary - summary = self.createSummary() + summary = self.create_summary() - # mail or print the summary. + # print the summary. print(summary) - # mail.msg(summary) # write summary to file summary_cls.txt filename = os.path.join(self.eval_dir, "summary.txt" ) @@ -717,18 +714,40 @@ def saveToStats(self): print(summary, end="", file=dump) dump.close() +def get_benchmark_dir_name(): + if datetime.datetime.now().minute < 10: + minute_str = str(0) + str(datetime.datetime.now().minute) + else: + minute_str = str(datetime.datetime.now().minute) + if datetime.datetime.now().second < 10: + second_str = str(0) + str(datetime.datetime.now().second) + else: + second_str = str(datetime.datetime.now().second) + time_file_name = str(datetime.datetime.now().year) + "_" + \ + str(datetime.datetime.now().month) + \ + str(datetime.datetime.now().day) + "_" + \ + str(datetime.datetime.now().hour) + \ + minute_str + second_str + benchmark_dir_name = "benchmark_" + time_file_name + return benchmark_dir_name + +def copy_result_to_current_time_dir(base_dir, benchmark_dir_name, result_file_path): + benchmark_dir = os.path.join(base_dir, benchmark_dir_name) + os.makedirs(benchmark_dir) + result_file_name = "benchmark_results.txt" + result_file_in_benchmark_dir = os.path.join(benchmark_dir, result_file_name) + copyfile(result_file_path, result_file_in_benchmark_dir) def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): """ - Entry point for evaluation, will load the data and start evaluation for - CAR and PEDESTRIAN if available. + Entry point for evaluation, will load the data and start evaluation """ # start evaluation and instanciated eval object - e = trackingEvaluation(velo_data_num) + e = TrackingEvaluation(velo_data_num) # load tracker data and check provided classes try: - if not e.loadTrackedData(result_file_path): + if not e.load_tracked_data(result_file_path): "failed to load tracked data" return print("Loading Results - Success") @@ -737,7 +756,7 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): print("Caught exception while loading result data.") return # load groundtruth data for this class - if not e.loadGroundtruth(gt_file_path): + if not e.load_ground_truth(gt_file_path): raise ValueError("Ground truth not found.") print("Loading Groundtruth - Success") print("Size of ground truth data ", len(e.groundtruth)) @@ -749,12 +768,12 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): print("Start Evaluation...") # create needed directories, evaluate and save stats try: - e.createEvalDir(benchmark_dir) + e.create_eval_dir(benchmark_dir) except: print("Caught exception while creating results.") - if e.compute3rdPartyMetrics(): + if e.compute_3rd_party_metrics(): print("Finished evaluation") - e.saveToStats() + e.save_to_stats() else: print("There seem to be no true positives or false positives at all in the submitted data.") @@ -762,41 +781,18 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): print("Thank you for participating in our benchmark!") return True -# def makeBench(): - - ######################################################################### # entry point of evaluation script if __name__ == "__main__": - - if datetime.datetime.now().minute < 10: - minute_str = str(0) + str(datetime.datetime.now().minute) - else: - minute_str = str(datetime.datetime.now().minute) - if datetime.datetime.now().second < 10: - second_str = str(0) + str(datetime.datetime.now().second) - else: - second_str = str(datetime.datetime.now().second) - time_file_name = str(datetime.datetime.now().year) + "_" + \ - str(datetime.datetime.now().month) + \ - str(datetime.datetime.now().day) + "_" + \ - str(datetime.datetime.now().hour) + \ - minute_str + second_str - benchmark_dir_name = "benchmark_" + time_file_name - print (benchmark_dir_name) - # TODO:python argument base_dir = "/home/kosuke/hdd/kitti/2011_09_26/2011_09_26_drive_0005_sync" - # copy benchmark_results.txt to `benchmark_dir_name`/benchmark_results.txt + benchmark_dir_name = get_benchmark_dir_name() benchmark_dir = os.path.join(base_dir, benchmark_dir_name) - os.makedirs(benchmark_dir) + result_file_name = "benchmark_results.txt" result_file_path = os.path.join(base_dir, result_file_name) - result_file_in_benchmark_dir = os.path.join(benchmark_dir, result_file_name) - copyfile(result_file_path, result_file_in_benchmark_dir) - - print (result_file_path) - print (result_file_in_benchmark_dir) + # copy benchmark_results.txt to `benchmark_dir_name`/benchmark_results.txt + copy_result_to_current_time_dir(base_dir, benchmark_dir_name, result_file_path) tracklet_file_name = "tracklet_labels.xml" partial_velo_path = "velodyne_points/data" @@ -804,5 +800,6 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): gt_file_path = os.path.join(base_dir, "gt_frame.txt") velo_dir = os.path.join(base_dir, partial_velo_path) velo_data_num = len(os.listdir(velo_dir)) + dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) success = evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py index 7f2d5a93d85..57602ff7277 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py @@ -1,32 +1,4 @@ #!/usr/bin/env python -""" -parse XML files containing tracklet info for kitti data base (raw data section) -(http://cvlibs.net/datasets/kitti/raw_data.php) - -No guarantees that this code is correct, usage is at your own risk! - -created by Christian Herdtweck, Max Planck Institute for Biological Cybernetics - (christian.herdtweck@tuebingen.mpg.de) -updated by Alex Staravoitau - -requires numpy! - -example usage: - import parseTrackletXML as xmlParser - kittiDir = '/path/to/kitti/data' - drive = '2011_09_26_drive_0001' - xmlParser.example(kittiDir, drive) -or simply on command line: - python parseTrackletXML.py -""" - -# Version History: -# 20/3/17 Alex Staravoitau: updated for Python 3, cleaned up docstring comments, fixed tabulation; -# 4/7/12 Christian Herdtweck: seems to work with a few random test xml tracklet files; -# converts file contents to ElementTree and then to list of Tracklet objects; -# Tracklet objects have str and iter functions -# 5/7/12 ch: added constants for state, occlusion, truncation and added consistency checks -# 30/1/14 ch: create example function from example code from sys import argv as cmdLineArgs from xml.etree.ElementTree import ElementTree @@ -156,7 +128,6 @@ def parseXML(trackletFile): hasAmt = False frameIdx = None for info in trackletElem: - #print 'trackInfo:', info.tag if isFinished: raise ValueError('more info on element after finished!') if info.tag == 'objectType': @@ -172,7 +143,6 @@ def parseXML(trackletFile): elif info.tag == 'poses': # this info is the possibly long list of poses for pose in info: - #print 'trackInfoPose:', pose.tag if pose.tag == 'count': # this should come before the others if newTrack.nFrames is not None: raise ValueError('there are several pose lists for a single track!') @@ -190,11 +160,9 @@ def parseXML(trackletFile): elif pose.tag == 'item_version': pass elif pose.tag == 'item': - # pose in one frame if frameIdx is None: raise ValueError('pose item came before number of poses!') for poseInfo in pose: - #print 'trackInfoPoseInfo:', poseInfo.tag if poseInfo.tag == 'tx': newTrack.trans[frameIdx, 0] = float(poseInfo.text) elif poseInfo.tag == 'ty': @@ -295,8 +263,6 @@ def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): frame_obj_type = {} #1 frame_truncated = {} #1 frame_occluded = {} #1 - # frame_alpha = {} // #1 - # frame_bbox = {} #4 frame_dimensions = {} #3 frame_location = {} #3 frame_rotation_yaw = {} #1 @@ -316,9 +282,6 @@ def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): h, w, l = tracklet.size # loop over all data in tracklet for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet: - # determine if object is in the image; otherwise continue - # if truncation not in (TRUNC_IN_IMAGE, TRUNC_TRUNCATED): - # continue yaw = rotation[2] # other rotations are supposedly 0 assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!' diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py similarity index 97% rename from ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py rename to ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py index 92b5d30db2e..762baccf890 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rotated_rect.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py @@ -1,7 +1,7 @@ import shapely.geometry import shapely.affinity -class RotatedRect: +class Rect: def __init__(self, cx, cy, l, w, angle): self.cx = cx self.cy = cy From ea8e671f431111566a0964c57b112a1671878b09 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 16:16:47 +0900 Subject: [PATCH 04/16] finished refactoring naming for parse_tracklet_xml --- .../benchmark/parse_tracklet_xml.py | 249 +++++++++--------- 1 file changed, 123 insertions(+), 126 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py index 57602ff7277..85251471817 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py @@ -9,20 +9,20 @@ STATE_UNSET = 0 STATE_INTERP = 1 STATE_LABELED = 2 -stateFromText = {'0':STATE_UNSET, '1':STATE_INTERP, '2':STATE_LABELED} +STATEFROMTEXT = {'0':STATE_UNSET, '1':STATE_INTERP, '2':STATE_LABELED} OCC_UNSET = 255 # -1 as uint8 OCC_VISIBLE = 0 OCC_PARTLY = 1 OCC_FULLY = 2 -occFromText = {'-1':OCC_UNSET, '0':OCC_VISIBLE, '1':OCC_PARTLY, '2':OCC_FULLY} +OCCFROMTEXT = {'-1':OCC_UNSET, '0':OCC_VISIBLE, '1':OCC_PARTLY, '2':OCC_FULLY} TRUNC_UNSET = 255 # -1 as uint8, but in xml files the value '99' is used! TRUNC_IN_IMAGE = 0 TRUNC_TRUNCATED = 1 TRUNC_OUT_IMAGE = 2 TRUNC_BEHIND_IMAGE = 3 -truncFromText = {'99':TRUNC_UNSET, '0':TRUNC_IN_IMAGE, '1':TRUNC_TRUNCATED, '2':TRUNC_OUT_IMAGE, '3': TRUNC_BEHIND_IMAGE} +TRUNCFROMTEXT = {'99':TRUNC_UNSET, '0':TRUNC_IN_IMAGE, '1':TRUNC_TRUNCATED, '2':TRUNC_OUT_IMAGE, '3': TRUNC_BEHIND_IMAGE} class Tracklet(object): @@ -31,32 +31,32 @@ class Tracklet(object): Tracklets are created in function parseXML and can most conveniently used as follows: - for trackletObj in parseXML(trackletFile): - for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj: + for trackletObj in parseXML(tracklet_file): + for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in trackletObj: ... your code here ... #end: for all frames #end: for all tracklets - absoluteFrameNumber is in range [firstFrame, firstFrame+nFrames[ - amtOcclusion and amtBorders could be None + absolute_frame_number is in range [first_frame, first_frame+n_frames[ + amt_occlusion and amt_borders could be None - You can of course also directly access the fields objType (string), size (len-3 ndarray), firstFrame/nFrames (int), - trans/rots (nFrames x 3 float ndarrays), states/truncs (len-nFrames uint8 ndarrays), occs (nFrames x 2 uint8 ndarray), - and for some tracklets amtOccs (nFrames x 2 float ndarray) and amtBorders (nFrames x 3 float ndarray). The last two + You can of course also directly access the fields objType (string), size (len-3 ndarray), first_frame/n_frames (int), + trans/rots (n_frames x 3 float ndarrays), states/truncs (len-n_frames uint8 ndarrays), occs (n_frames x 2 uint8 ndarray), + and for some tracklets amt_occs (n_frames x 2 float ndarray) and amt_borders (n_frames x 3 float ndarray). The last two can be None if the xml file did not include these fields in poses """ - objectType = None + object_type = None size = None # len-3 float array: (height, width, length) - firstFrame = None + first_frame = None trans = None # n x 3 float array (x,y,z) rots = None # n x 3 float array (x,y,z) states = None # len-n uint8 array of states occs = None # n x 2 uint8 array (occlusion, occlusion_kf) truncs = None # len-n uint8 array of truncation - amtOccs = None # None or (n x 2) float array (amt_occlusion, amt_occlusion_kf) - amtBorders = None # None (n x 3) float array (amt_border_l / _r / _kf) - nFrames = None + amt_occs = None # None or (n x 2) float array (amt_occlusion, amt_occlusion_kf) + amt_borders = None # None (n x 3) float array (amt_border_l / _r / _kf) + n_frames = None def __init__(self): """ @@ -73,170 +73,167 @@ def __str__(self): or in text = str(trackletObj) """ - return '[Tracklet over {0} frames for {1}]'.format(self.nFrames, self.objectType) + return '[Tracklet over {0} frames for {1}]'.format(self.n_frames, self.object_type) def __iter__(self): """ Returns an iterator that yields tuple of all the available data for each frame called whenever code iterates over a tracklet object, e.g. in - for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj: + for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in trackletObj: ...do something ... or trackDataIter = iter(trackletObj) """ - if self.amtOccs is None: + if self.amt_occs is None: return zip(self.trans, self.rots, self.states, self.occs, self.truncs, - itertools.repeat(None), itertools.repeat(None), range(self.firstFrame, self.firstFrame+self.nFrames)) + itertools.repeat(None), itertools.repeat(None), range(self.first_frame, self.first_frame+self.n_frames)) else: return zip(self.trans, self.rots, self.states, self.occs, self.truncs, - self.amtOccs, self.amtBorders, range(self.firstFrame, self.firstFrame+self.nFrames)) + self.amt_occs, self.amt_borders, range(self.first_frame, self.first_frame+self.n_frames)) #end: class Tracklet -def parseXML(trackletFile): +def parse_xml(tracklet_file): """ Parses tracklet xml file and convert results to list of Tracklet objects - :param trackletFile: name of a tracklet xml file + :param tracklet_file: name of a tracklet xml file :returns: list of Tracklet objects read from xml file """ # convert tracklet XML data to a tree structure - eTree = ElementTree() - print('Parsing tracklet file', trackletFile) - with open(trackletFile) as f: - eTree.parse(f) + e_tree = ElementTree() + print('Parsing tracklet file', tracklet_file) + with open(tracklet_file) as f: + e_tree.parse(f) # now convert output to list of Tracklet objects - trackletsElem = eTree.find('tracklets') + tracklets_elem = e_tree.find('tracklets') tracklets = [] - trackletIdx = 0 - nTracklets = None - for trackletElem in trackletsElem: - #print 'track:', trackletElem.tag - if trackletElem.tag == 'count': - nTracklets = int(trackletElem.text) - print('File contains', nTracklets, 'tracklets') - elif trackletElem.tag == 'item_version': + tracklet_idx = 0 + n_tracklets = None + for tracklet_elem in tracklets_elem: + if tracklet_elem.tag == 'count': + n_tracklets = int(tracklet_elem.text) + print('File contains', n_tracklets, 'tracklets') + elif tracklet_elem.tag == 'item_version': pass - elif trackletElem.tag == 'item': - #print 'tracklet {0} of {1}'.format(trackletIdx, nTracklets) - # a tracklet - newTrack = Tracklet() - isFinished = False - hasAmt = False - frameIdx = None - for info in trackletElem: - if isFinished: + elif tracklet_elem.tag == 'item': + new_track = Tracklet() + is_finished = False + has_amt = False + frame_idx = None + for info in tracklet_elem: + if is_finished: raise ValueError('more info on element after finished!') if info.tag == 'objectType': - newTrack.objectType = info.text + new_track.object_type = info.text elif info.tag == 'h': - newTrack.size[0] = float(info.text) + new_track.size[0] = float(info.text) elif info.tag == 'w': - newTrack.size[1] = float(info.text) + new_track.size[1] = float(info.text) elif info.tag == 'l': - newTrack.size[2] = float(info.text) + new_track.size[2] = float(info.text) elif info.tag == 'first_frame': - newTrack.firstFrame = int(info.text) + new_track.first_frame = int(info.text) elif info.tag == 'poses': # this info is the possibly long list of poses for pose in info: if pose.tag == 'count': # this should come before the others - if newTrack.nFrames is not None: + if new_track.n_frames is not None: raise ValueError('there are several pose lists for a single track!') - elif frameIdx is not None: + elif frame_idx is not None: raise ValueError('?!') - newTrack.nFrames = int(pose.text) - newTrack.trans = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) - newTrack.rots = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) - newTrack.states = np.nan * np.ones(newTrack.nFrames, dtype='uint8') - newTrack.occs = np.nan * np.ones((newTrack.nFrames, 2), dtype='uint8') - newTrack.truncs = np.nan * np.ones(newTrack.nFrames, dtype='uint8') - newTrack.amtOccs = np.nan * np.ones((newTrack.nFrames, 2), dtype=float) - newTrack.amtBorders = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) - frameIdx = 0 + new_track.n_frames = int(pose.text) + new_track.trans = np.nan * np.ones((new_track.n_frames, 3), dtype=float) + new_track.rots = np.nan * np.ones((new_track.n_frames, 3), dtype=float) + new_track.states = np.nan * np.ones(new_track.n_frames, dtype='uint8') + new_track.occs = np.nan * np.ones((new_track.n_frames, 2), dtype='uint8') + new_track.truncs = np.nan * np.ones(new_track.n_frames, dtype='uint8') + new_track.amt_occs = np.nan * np.ones((new_track.n_frames, 2), dtype=float) + new_track.amt_borders = np.nan * np.ones((new_track.n_frames, 3), dtype=float) + frame_idx = 0 elif pose.tag == 'item_version': pass elif pose.tag == 'item': - if frameIdx is None: + if frame_idx is None: raise ValueError('pose item came before number of poses!') - for poseInfo in pose: - if poseInfo.tag == 'tx': - newTrack.trans[frameIdx, 0] = float(poseInfo.text) - elif poseInfo.tag == 'ty': - newTrack.trans[frameIdx, 1] = float(poseInfo.text) - elif poseInfo.tag == 'tz': - newTrack.trans[frameIdx, 2] = float(poseInfo.text) - elif poseInfo.tag == 'rx': - newTrack.rots[frameIdx, 0] = float(poseInfo.text) - elif poseInfo.tag == 'ry': - newTrack.rots[frameIdx, 1] = float(poseInfo.text) - elif poseInfo.tag == 'rz': - newTrack.rots[frameIdx, 2] = float(poseInfo.text) - elif poseInfo.tag == 'state': - newTrack.states[frameIdx] = stateFromText[poseInfo.text] - elif poseInfo.tag == 'occlusion': - newTrack.occs[frameIdx, 0] = occFromText[poseInfo.text] - elif poseInfo.tag == 'occlusion_kf': - newTrack.occs[frameIdx, 1] = occFromText[poseInfo.text] - elif poseInfo.tag == 'truncation': - newTrack.truncs[frameIdx] = truncFromText[poseInfo.text] - elif poseInfo.tag == 'amt_occlusion': - newTrack.amtOccs[frameIdx,0] = float(poseInfo.text) - hasAmt = True - elif poseInfo.tag == 'amt_occlusion_kf': - newTrack.amtOccs[frameIdx,1] = float(poseInfo.text) - hasAmt = True - elif poseInfo.tag == 'amt_border_l': - newTrack.amtBorders[frameIdx,0] = float(poseInfo.text) - hasAmt = True - elif poseInfo.tag == 'amt_border_r': - newTrack.amtBorders[frameIdx,1] = float(poseInfo.text) - hasAmt = True - elif poseInfo.tag == 'amt_border_kf': - newTrack.amtBorders[frameIdx,2] = float(poseInfo.text) - hasAmt = True + for pose_info in pose: + if pose_info.tag == 'tx': + new_track.trans[frame_idx, 0] = float(pose_info.text) + elif pose_info.tag == 'ty': + new_track.trans[frame_idx, 1] = float(pose_info.text) + elif pose_info.tag == 'tz': + new_track.trans[frame_idx, 2] = float(pose_info.text) + elif pose_info.tag == 'rx': + new_track.rots[frame_idx, 0] = float(pose_info.text) + elif pose_info.tag == 'ry': + new_track.rots[frame_idx, 1] = float(pose_info.text) + elif pose_info.tag == 'rz': + new_track.rots[frame_idx, 2] = float(pose_info.text) + elif pose_info.tag == 'state': + new_track.states[frame_idx] = STATEFROMTEXT[pose_info.text] + elif pose_info.tag == 'occlusion': + new_track.occs[frame_idx, 0] = OCCFROMTEXT[pose_info.text] + elif pose_info.tag == 'occlusion_kf': + new_track.occs[frame_idx, 1] = OCCFROMTEXT[pose_info.text] + elif pose_info.tag == 'truncation': + new_track.truncs[frame_idx] = TRUNCFROMTEXT[pose_info.text] + elif pose_info.tag == 'amt_occlusion': + new_track.amt_occs[frame_idx,0] = float(pose_info.text) + has_amt = True + elif pose_info.tag == 'amt_occlusion_kf': + new_track.amt_occs[frame_idx,1] = float(pose_info.text) + has_amt = True + elif pose_info.tag == 'amt_border_l': + new_track.amt_borders[frame_idx,0] = float(pose_info.text) + has_amt = True + elif pose_info.tag == 'amt_border_r': + new_track.amt_borders[frame_idx,1] = float(pose_info.text) + has_amt = True + elif pose_info.tag == 'amt_border_kf': + new_track.amt_borders[frame_idx,2] = float(pose_info.text) + has_amt = True else: - raise ValueError('unexpected tag in poses item: {0}!'.format(poseInfo.tag)) - frameIdx += 1 + raise ValueError('unexpected tag in poses item: {0}!'.format(pose_info.tag)) + frame_idx += 1 else: raise ValueError('unexpected pose info: {0}!'.format(pose.tag)) elif info.tag == 'finished': - isFinished = True + is_finished = True else: raise ValueError('unexpected tag in tracklets: {0}!'.format(info.tag)) #end: for all fields in current tracklet # some final consistency checks on new tracklet - if not isFinished: - warn('tracklet {0} was not finished!'.format(trackletIdx)) - if newTrack.nFrames is None: - warn('tracklet {0} contains no information!'.format(trackletIdx)) - elif frameIdx != newTrack.nFrames: - warn('tracklet {0} is supposed to have {1} frames, but perser found {1}!'.format(trackletIdx, newTrack.nFrames, frameIdx)) - if np.abs(newTrack.rots[:,:2]).sum() > 1e-16: + if not is_finished: + warn('tracklet {0} was not finished!'.format(tracklet_idx)) + if new_track.n_frames is None: + warn('tracklet {0} contains no information!'.format(tracklet_idx)) + elif frame_idx != new_track.n_frames: + warn('tracklet {0} is supposed to have {1} frames, but perser found {1}!'.format(tracklet_idx, new_track.n_frames, frame_idx)) + if np.abs(new_track.rots[:,:2]).sum() > 1e-16: warn('track contains rotation other than yaw!') - # if amtOccs / amtBorders are not set, set them to None - if not hasAmt: - newTrack.amtOccs = None - newTrack.amtBorders = None + # if amt_occs / amt_borders are not set, set them to None + if not has_amt: + new_track.amt_occs = None + new_track.amt_borders = None # add new tracklet to list - tracklets.append(newTrack) - trackletIdx += 1 + tracklets.append(new_track) + tracklet_idx += 1 else: raise ValueError('unexpected tracklet info') #end: for tracklet list items - print('Loaded', trackletIdx, 'tracklets.') + print('Loaded', tracklet_idx, 'tracklets.') # final consistency check - if trackletIdx != nTracklets: - warn('according to xml information the file has {0} tracklets, but parser found {1}!'.format(nTracklets, trackletIdx)) + if tracklet_idx != n_tracklets: + warn('according to xml information the file has {0} tracklets, but parser found {1}!'.format(n_tracklets, tracklet_idx)) return tracklets #end: function parseXML @@ -256,7 +253,7 @@ def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): contains coordinates of bounding box vertices for each object in the frame, and the second array contains objects types as strings. """ - tracklets = parseXML(xml_path) + tracklets = parse_xml(xml_path) frame_num = {} #1 frame_track_id = {} #1 @@ -281,19 +278,19 @@ def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): # this part is inspired by kitti object development kit matlab code: computeBox3D h, w, l = tracklet.size # loop over all data in tracklet - for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in tracklet: + for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in tracklet: yaw = rotation[2] # other rotations are supposedly 0 assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!' # concate data - frame_num[absoluteFrameNumber] = frame_num[absoluteFrameNumber] + [absoluteFrameNumber] - frame_track_id[absoluteFrameNumber] = frame_track_id[absoluteFrameNumber] + [i] - frame_obj_type[absoluteFrameNumber] = frame_obj_type[absoluteFrameNumber] + [tracklet.objectType] - frame_truncated[absoluteFrameNumber] = frame_truncated[absoluteFrameNumber] + [truncation] - frame_occluded[absoluteFrameNumber] = frame_occluded[absoluteFrameNumber] + [occlusion[0]] - frame_dimensions[absoluteFrameNumber] = frame_dimensions[absoluteFrameNumber] + [np.array([l, w, h])] - frame_location[absoluteFrameNumber] = frame_location[absoluteFrameNumber] + [np.array(translation)] - frame_rotation_yaw[absoluteFrameNumber] = frame_rotation_yaw[absoluteFrameNumber] + [yaw] + frame_num[absolute_frame_number] = frame_num[absolute_frame_number] + [absolute_frame_number] + frame_track_id[absolute_frame_number] = frame_track_id[absolute_frame_number] + [i] + frame_obj_type[absolute_frame_number] = frame_obj_type[absolute_frame_number] + [tracklet.object_type] + frame_truncated[absolute_frame_number] = frame_truncated[absolute_frame_number] + [truncation] + frame_occluded[absolute_frame_number] = frame_occluded[absolute_frame_number] + [occlusion[0]] + frame_dimensions[absolute_frame_number] = frame_dimensions[absolute_frame_number] + [np.array([l, w, h])] + frame_location[absolute_frame_number] = frame_location[absolute_frame_number] + [np.array(translation)] + frame_rotation_yaw[absolute_frame_number] = frame_rotation_yaw[absolute_frame_number] + [yaw] gt_text_file = os.path.join(gt_file_path) file = open(gt_text_file, 'w') From ff4acdebf416c4f279c130087c55ab29d7ab71ee Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 16:26:16 +0900 Subject: [PATCH 05/16] Refactor codes --- .../benchmark/evaluate_tracking.py | 60 ++++++++++--------- .../benchmark/parse_tracklet_xml.py | 2 - 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 3cb768ed623..1563a05ea53 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import sys,os,copy,math from munkres import Munkres from collections import defaultdict @@ -13,9 +15,9 @@ from rect import Rect from parse_tracklet_xml import dump_frames_text_from_tracklets -class tData: +class ObjectData: """ - Utility class to load data. + Utility class to load object data. """ def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\ @@ -168,7 +170,7 @@ def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): Loads detections in KITTI format from textfiles. """ # construct objectDetections object to hold detection data - t_data = tData() + object_data = ObjectData() data = [] eval_3d = True eval_2d = False @@ -191,48 +193,48 @@ def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): # print(fields[0]) # get fields from table - t_data.frame = int(float(fields[0])) # frame - t_data.track_id = int(float(fields[1])) # id - t_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...] - t_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2] - t_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2] - t_data.obs_angle = float(fields[5]) # observation angle [rad] - t_data.x1 = float(fields[6]) # left [px] - t_data.y1 = float(fields[7]) # top [px] - t_data.x2 = float(fields[8]) # right [px] - t_data.y2 = float(fields[9]) # bottom [px] - t_data.h = float(fields[10]) # height [m] - t_data.w = float(fields[11]) # width [m] - t_data.l = float(fields[12]) # length [m] - t_data.X = float(fields[13]) # X [m] - t_data.Y = float(fields[14]) # Y [m] - t_data.Z = float(fields[15]) # Z [m] - t_data.yaw = float(fields[16]) # yaw angle [rad] + object_data.frame = int(float(fields[0])) # frame + object_data.track_id = int(float(fields[1])) # id + object_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...] + object_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2] + object_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2] + object_data.obs_angle = float(fields[5]) # observation angle [rad] + object_data.x1 = float(fields[6]) # left [px] + object_data.y1 = float(fields[7]) # top [px] + object_data.x2 = float(fields[8]) # right [px] + object_data.y2 = float(fields[9]) # bottom [px] + object_data.h = float(fields[10]) # height [m] + object_data.w = float(fields[11]) # width [m] + object_data.l = float(fields[12]) # length [m] + object_data.X = float(fields[13]) # X [m] + object_data.Y = float(fields[14]) # Y [m] + object_data.Z = float(fields[15]) # Z [m] + object_data.yaw = float(fields[16]) # yaw angle [rad] # do not consider objects marked as invalid # do not consider objects marked as invalid - if t_data.track_id is -1 and t_data.obj_type != "DontCare": + if object_data.track_id is -1 and object_data.obj_type != "DontCare": continue - idx = t_data.frame + idx = object_data.frame - if t_data.frame >= self.n_frames: + if object_data.frame >= self.n_frames: continue try: - id_frame = (t_data.frame,t_data.track_id) + id_frame = (object_data.frame,object_data.track_id) if id_frame in id_frame_cache and not loading_groundtruth: - print("track ids are not unique for frame %d" % (t_data.frame)) - print("track id %d occured at least twice for this frame" % t_data.track_id) + print("track ids are not unique for frame %d" % (object_data.frame)) + print("track id %d occured at least twice for this frame" % object_data.track_id) print("Exiting...") #continue # this allows to evaluate non-unique result files return False id_frame_cache.append(id_frame) - f_data[t_data.frame].append(copy.copy(t_data)) + f_data[object_data.frame].append(copy.copy(object_data)) except: raise - if t_data.track_id not in ids and t_data.obj_type!="DontCare": - ids.append(t_data.track_id) + if object_data.track_id not in ids and object_data.obj_type!="DontCare": + ids.append(object_data.track_id) n_trajectories +=1 f.close() diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py index 85251471817..f0a2e4dee67 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from sys import argv as cmdLineArgs from xml.etree.ElementTree import ElementTree import numpy as np From 6b5bfe1c73d4a340db2a7f415db119f9d9c5262b Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 18:33:10 +0900 Subject: [PATCH 06/16] Refactor codes --- .../benchmark/evaluate_tracking.py | 142 ++++-------------- 1 file changed, 29 insertions(+), 113 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 1563a05ea53..16587d4848c 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys,os,copy,math from munkres import Munkres @@ -22,7 +22,7 @@ class ObjectData: def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\ obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\ - X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1): + cx=-1000,cy=-1000,cz=-1000,yaw=-10,score=-1000,track_id=-1): """ Constructor, initializes the object given the parameters. """ @@ -41,9 +41,9 @@ def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\ self.w = w self.h = h self.l = l - self.X = X - self.Y = Y - self.Z = Z + self.cx = cx + self.cy = cy + self.cz = cz self.yaw = yaw self.score = score self.ignored = False @@ -93,8 +93,6 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE self.n_itr = 0 # number of ignored tracker detections self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE - # self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored - # self.n_tr_trajectories = 0 self.n_tr_seq = [] self.MOTA = 0 self.MOTP = 0 @@ -127,10 +125,6 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca self.max_truncation = max_truncation # maximum truncation of an object for evaluation self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation self.min_height = min_height # minimum height of an object for evaluation - self.n_sample_points = 500 - - # this should be enough to hold all groundtruth trajectories - # is expanded if necessary and reduced in any case def load_tracked_data(self, result_file_path): """ @@ -171,26 +165,17 @@ def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): """ # construct objectDetections object to hold detection data object_data = ObjectData() - data = [] - eval_3d = True - eval_2d = False - n_trajectories = 0 - - i = 0 - f = open(file_path, "r") - + file = open(file_path, "r") f_data = [[] for x in range(self.n_frames)] # current set has only 1059 entries, sufficient length is checked anyway ids = [] - n_in_seq = 0 id_frame_cache = [] - for line in f: + for line in file: # KITTI tracking benchmark data format: - # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry) + # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,cx,cy,cz,yaw) line = line.strip() fields = line.split(" ") - # print(fields[0]) # get fields from table object_data.frame = int(float(fields[0])) # frame @@ -206,9 +191,9 @@ def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): object_data.h = float(fields[10]) # height [m] object_data.w = float(fields[11]) # width [m] object_data.l = float(fields[12]) # length [m] - object_data.X = float(fields[13]) # X [m] - object_data.Y = float(fields[14]) # Y [m] - object_data.Z = float(fields[15]) # Z [m] + object_data.cx = float(fields[13]) # cx [m] + object_data.cy = float(fields[14]) # cy [m] + object_data.cz = float(fields[15]) # cz [m] object_data.yaw = float(fields[16]) # yaw angle [rad] # do not consider objects marked as invalid @@ -236,7 +221,7 @@ def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): if object_data.track_id not in ids and object_data.obj_type!="DontCare": ids.append(object_data.track_id) n_trajectories +=1 - f.close() + file.close() if not loading_groundtruth: @@ -293,22 +278,8 @@ def compute_3rd_party_metrics(self): seq_trajectories = defaultdict(list) seq_ignored = defaultdict(list) - # statistics over the current sequence, check the corresponding - # variable comments in __init__ to get their meaning - seqtp = 0 - seqitp = 0 - seqfn = 0 - seqifn = 0 - seqfp = 0 - seqigt = 0 - seqitr = 0 - last_frame_ids = [[],[]] - n_gts = 0 - n_trs = 0 - - for i_frame in tqdm(range(len(seq_gt))): frame_gts = seq_gt[i_frame] frame_dcs = seq_dc[i_frame] @@ -318,9 +289,6 @@ def compute_3rd_party_metrics(self): self.n_gt += len(frame_gts) self.n_tr += len(frame_results) - n_gts += len(frame_gts) - n_trs += len(frame_results) - # use hungarian method to associate, using boxoverlap 0..1 as cost # build cost matrix cost_matrix = [] @@ -336,15 +304,12 @@ def compute_3rd_party_metrics(self): cost_row = [] # loop over tracked objects in one frame for result in frame_results: - # overlap == 1 is cost ==0 + # overlap == 1 means cost == 0 # Rect(cx, cy, l, w, angle) - # todo might be wrong: euclidean cluster messed up calculating w, l - # Better implementation if switching w and l based on box's quartenion - # and self.min_overlap might be too big - r1 = Rect(gt.X, gt.Y, gt.l, gt.w, gt.yaw) - r2 = Rect(result.X, result.Y, result.l, result.w, result.yaw) + r1 = Rect(gt.cx, gt.cy, gt.l, gt.w, gt.yaw) + r2 = Rect(result.cx, result.cy, result.l, result.w, result.yaw) iou = r1.intersection_over_union(r2) - cost = 1- iou + cost = 1 - iou # gating for boxoverlap if cost<=self.min_overlap: cost_row.append(cost) @@ -373,7 +338,7 @@ def compute_3rd_party_metrics(self): # be subtracted from tmpc for MODP computation # mapping for tracker ids and ground truth ids - for row,col in association_matrix: + for row, col in association_matrix: # apply gating on boxoverlap c = cost_matrix[row][col] if c < max_cost: @@ -390,6 +355,7 @@ def compute_3rd_party_metrics(self): self.tp += 1 tmptp += 1 else: + # wrong data association frame_gts[row].tracker = -1 self.fn += 1 tmpfn += 1 @@ -458,15 +424,6 @@ def compute_3rd_party_metrics(self): tmpfp += len(frame_results) - tmptp - nignoredtracker - nignoredtp self.fp += len(frame_results) - tmptp - nignoredtracker - nignoredtp - # update sequence data - seqtp += tmptp - seqitp += nignoredtp - seqfp += tmpfp - seqfn += tmpfn - seqifn += nignoredfn - seqigt += nignoredfn + nignoredtp - seqitr += nignoredtracker - # sanity checks # - the number of true positives minues ignored true positives # should be greater or equal to 0 @@ -526,21 +483,13 @@ def compute_3rd_party_metrics(self): # save current index last_frame_ids = frame_ids - # todo modify here motp - # compute MOTP_t - MODP_t = 1 - if tmptp!=0: - MODP_t = tmpc/float(tmptp) - self.MODP_t.append(MODP_t) - # compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories n_ignored_tr_total = 0 - # for seq_idx, (seq_trajectories,seq_ignored) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)): if len(seq_trajectories)==0: print("Error: There is no trajectories data") return - tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0]*5 + tmpMT, tmpML, tmpPT, tmpId_switches = [0]*4 n_ignored_tr = 0 for g, ign_g in zip(seq_trajectories.values(), seq_ignored.values()): # all frames of this gt trajectory are ignored @@ -567,14 +516,12 @@ def compute_3rd_party_metrics(self): tmpId_switches += 1 self.id_switches += 1 if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1: - tmpFragments += 1 self.fragments += 1 if g[f] != -1: tracked += 1 last_id = g[f] # handle last frame; tracked state is handled in for loop (g[f]!=-1) if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]: - tmpFragments += 1 self.fragments += 1 # compute MT/PT/ML @@ -598,44 +545,13 @@ def compute_3rd_party_metrics(self): self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total) self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total) - # precision/recall etc. + # precision/recall if (self.fp+self.tp)==0 or (self.tp+self.fn)==0: self.recall = 0. self.precision = 0. else: self.recall = self.tp/float(self.tp+self.fn) self.precision = self.tp/float(self.fp+self.tp) - if (self.recall+self.precision)==0: - self.F1 = 0. - else: - self.F1 = 2.*(self.precision*self.recall)/(self.precision+self.recall) - if self.n_frames==0: - self.FAR = "n/a" - else: - self.FAR = self.fp/float(self.n_frames) - - # compute CLEARMOT - if self.n_gt==0: - self.MOTA = -float("inf") - self.MODA = -float("inf") - else: - self.MOTA = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt) - self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt) - if self.tp==0: - self.MOTP = float("inf") - else: - self.MOTP = self.total_cost / float(self.tp) - if self.n_gt!=0: - if self.id_switches==0: - self.MOTAL = 1 - (self.fn + self.fp + self.id_switches)/float(self.n_gt) - else: - self.MOTAL = 1 - (self.fn + self.fp + math.log10(self.id_switches))/float(self.n_gt) - else: - self.MOTAL = -float("inf") - if self.n_frames==0: - self.MODP = "n/a" - else: - self.MODP = sum(self.MODP_t)/float(self.n_frames) return True def _print_entry(self, key, val,width=(70,10)): @@ -663,16 +579,16 @@ def create_summary(self): summary = "" summary += "tracking evaluation summary".center(80,"=") + "\n" - summary += self._print_entry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" - summary += self._print_entry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" - summary += self._print_entry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" - summary += self._print_entry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" - summary += self._print_entry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" - summary += "\n" + # summary += self._print_entry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" + # summary += self._print_entry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" + # summary += self._print_entry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" + # summary += self._print_entry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" + # summary += self._print_entry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" + # summary += "\n" summary += self._print_entry("Recall", self.recall) + "\n" - summary += self._print_entry("Precision", self.precision) + "\n" - summary += self._print_entry("F1", self.F1) + "\n" - summary += self._print_entry("False Alarm Rate", self.FAR) + "\n" + # summary += self._print_entry("Precision", self.precision) + "\n" + # summary += self._print_entry("F1", self.F1) + "\n" + # summary += self._print_entry("False Alarm Rate", self.FAR) + "\n" summary += "\n" summary += self._print_entry("Mostly Tracked", self.MT) + "\n" summary += self._print_entry("Partly Tracked", self.PT) + "\n" From f09c5e761b6c2f4a3c7b7df0348a87f9787b98a7 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 18:52:50 +0900 Subject: [PATCH 07/16] Refactor codes --- .../benchmark/evaluate_tracking.py | 96 +++++-------------- 1 file changed, 22 insertions(+), 74 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 16587d4848c..97e2220ae12 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -60,15 +60,11 @@ def __str__(self): class TrackingEvaluation(object): - """ tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall) - MOTA - Multi-object tracking accuracy in [0,100] - MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D) - MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches) - + """ tracking statistics (CLEAR MOT, id-switches, fragments, mostly_lost/partialy_tracked/mostly_tracked, precision/recall) id-switches - number of id switches fragments - number of fragmentations - MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories + mostly_tracked, partialy_tracked, mostly_lost - number of mostly tracked, partially tracked and mostly lost trajectories recall - recall = percentage of detected targets precision - precision = percentage of correctly detected targets @@ -85,26 +81,11 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca # statistics and numbers for evaluation self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives self.n_igt = 0 # number of ignored ground truth detections - self.n_gts = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE - self.n_igts = [] # number of ground ignored truth detections PER SEQUENCE self.n_gt_trajectories = 0 - self.n_gt_seq = [] self.n_tr = 0 # number of tracker detections minus ignored tracker detections - self.n_trs = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE self.n_itr = 0 # number of ignored tracker detections - self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE - self.n_tr_seq = [] - self.MOTA = 0 - self.MOTP = 0 - self.MOTAL = 0 - self.MODA = 0 - self.MODP = 0 - self.MODP_t = [] self.recall = 0 self.precision = 0 - self.F1 = 0 - self.FAR = 0 - self.total_cost = 0 self.itp = 0 # number of ignored true positives self.tp = 0 # number of true positives including ignored true positives! self.fn = 0 # number of false negatives WITHOUT ignored false negatives @@ -113,13 +94,12 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca # a bit tricky, the number of ignored false negatives and ignored true positives # is subtracted, but if both tracker detection and ground truth detection # are ignored this number is added again to avoid double counting - self.fps = [] # above PER SEQUENCE self.mme = 0 self.fragments = 0 self.id_switches = 0 - self.MT = 0 - self.PT = 0 - self.ML = 0 + self.mostly_tracked = 0 + self.partialy_tracked = 0 + self.mostly_lost = 0 self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics self.max_truncation = max_truncation # maximum truncation of an object for evaluation @@ -264,7 +244,7 @@ def compute_3rd_party_metrics(self): - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics MOTA, MOTAL, MOTP - Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows - MT/PT/ML + mostly_tracked/partialy_tracked/mostly_lost """ # construct Munkres object for Hungarian Method association hm = Munkres() @@ -327,15 +307,10 @@ def compute_3rd_party_metrics(self): # associate association_matrix = hm.compute(cost_matrix) - # tmp variables for sanity checks and MODP computation + # tmp variables for sanity checks tmptp = 0 tmpfp = 0 tmpfn = 0 - tmpc = 0 # this will sum up the overlaps for all true positives - tmpcs = [0]*len(frame_gts) # this will save the overlaps for all true positives - # the reason is that some true positives might be ignored - # later such that the corrsponding overlaps can - # be subtracted from tmpc for MODP computation # mapping for tracker ids and ground truth ids for row, col in association_matrix: @@ -346,9 +321,6 @@ def compute_3rd_party_metrics(self): frame_ids[1][row] = frame_results[col].track_id frame_results[col].valid = True frame_gts[row].distance = c - self.total_cost += 1-c - tmpc += 1-c - tmpcs[row] = 1-c seq_trajectories[frame_gts[row].track_id][-1] = frame_results[col].track_id # true positives are only valid associations @@ -372,9 +344,6 @@ def compute_3rd_party_metrics(self): # check for ignored FN/TP (truncation or neighboring object class) nignoredfn = 0 # the number of ignored false negatives nignoredtp = 0 # the number of ignored true positives - # nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive - # which is ignored but where the associated tracker - # detection has already been ignored gi = 0 for gt in frame_gts: @@ -390,9 +359,6 @@ def compute_3rd_party_metrics(self): gt.ignored = True nignoredtp += 1 - # for computing MODP, the overlaps from ignored detections - # are subtracted - tmpc -= tmpcs[gi] gi += 1 # the below might be confusion, check the comments in __init__ @@ -420,7 +386,6 @@ def compute_3rd_party_metrics(self): self.ifn += nignoredfn # false positives = tracker bboxes - associated tracker bboxes - # mismatches (mme_t) tmpfp += len(frame_results) - tmptp - nignoredtracker - nignoredtp self.fp += len(frame_results) - tmptp - nignoredtracker - nignoredtp @@ -474,22 +439,19 @@ def compute_3rd_party_metrics(self): if tid != lid and lid != -1 and tid != -1: if frame_gts[i].truncation1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]: self.fragments += 1 - # compute MT/PT/ML + # compute mostly_tracked/partialy_tracked/mostly_lost tracking_ratio = tracked / float(len(g) - sum(ign_g)) if tracking_ratio > 0.8: - tmpMT += 1 - self.MT += 1 + self.mostly_tracked += 1 elif tracking_ratio < 0.2: - tmpML += 1 - self.ML += 1 + self.mostly_lost += 1 else: # 0.2 <= tracking_ratio <= 0.8 - tmpPT += 1 - self.PT += 1 + self.partialy_tracked += 1 if (self.n_gt_trajectories-n_ignored_tr_total)==0: - self.MT = 0. - self.PT = 0. - self.ML = 0. + self.mostly_tracked = 0. + self.partialy_tracked = 0. + self.mostly_lost = 0. else: - self.MT /= float(self.n_gt_trajectories-n_ignored_tr_total) - self.PT /= float(self.n_gt_trajectories-n_ignored_tr_total) - self.ML /= float(self.n_gt_trajectories-n_ignored_tr_total) + self.mostly_tracked /= float(self.n_gt_trajectories-n_ignored_tr_total) + self.partialy_tracked /= float(self.n_gt_trajectories-n_ignored_tr_total) + self.mostly_lost /= float(self.n_gt_trajectories-n_ignored_tr_total) # precision/recall if (self.fp+self.tp)==0 or (self.tp+self.fn)==0: @@ -579,20 +536,11 @@ def create_summary(self): summary = "" summary += "tracking evaluation summary".center(80,"=") + "\n" - # summary += self._print_entry("Multiple Object Tracking Accuracy (MOTA)", self.MOTA) + "\n" - # summary += self._print_entry("Multiple Object Tracking Precision (MOTP)", self.MOTP) + "\n" - # summary += self._print_entry("Multiple Object Tracking Accuracy (MOTAL)", self.MOTAL) + "\n" - # summary += self._print_entry("Multiple Object Detection Accuracy (MODA)", self.MODA) + "\n" - # summary += self._print_entry("Multiple Object Detection Precision (MODP)", self.MODP) + "\n" - # summary += "\n" summary += self._print_entry("Recall", self.recall) + "\n" - # summary += self._print_entry("Precision", self.precision) + "\n" - # summary += self._print_entry("F1", self.F1) + "\n" - # summary += self._print_entry("False Alarm Rate", self.FAR) + "\n" summary += "\n" - summary += self._print_entry("Mostly Tracked", self.MT) + "\n" - summary += self._print_entry("Partly Tracked", self.PT) + "\n" - summary += self._print_entry("Mostly Lost", self.ML) + "\n" + summary += self._print_entry("Mostly Tracked", self.mostly_tracked) + "\n" + summary += self._print_entry("Partly Tracked", self.partialy_tracked) + "\n" + summary += self._print_entry("Mostly Lost", self.mostly_lost) + "\n" summary += "\n" summary += self._print_entry("True Positives", self.tp) + "\n" summary += self._print_entry("Ignored True Positives", self.itp) + "\n" From 55d16f4e1633cfe6d5b3e1d6a7bbe9ccafdaa28c Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Mon, 29 Oct 2018 19:16:36 +0900 Subject: [PATCH 08/16] Refactor codes --- .../benchmark/evaluate_tracking.py | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py index 97e2220ae12..2a711461afc 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py @@ -1,6 +1,9 @@ #!/usr/bin/env python3 -import sys,os,copy,math +import sys +import os +import math +import copy from munkres import Munkres from collections import defaultdict from time import gmtime, strftime @@ -97,9 +100,9 @@ def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_trunca self.mme = 0 self.fragments = 0 self.id_switches = 0 - self.mostly_tracked = 0 - self.partialy_tracked = 0 - self.mostly_lost = 0 + self.mostly_tracked = 0 + self.partialy_tracked = 0 + self.mostly_lost = 0 self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics self.max_truncation = max_truncation # maximum truncation of an object for evaluation @@ -647,13 +650,20 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): print("Thank you for participating in our benchmark!") return True -######################################################################### -# entry point of evaluation script -if __name__ == "__main__": - # TODO:python argument - base_dir = "/home/kosuke/hdd/kitti/2011_09_26/2011_09_26_drive_0005_sync" +def main(argv): + if len(argv) == 0: + print("You have set a path to the kitti data directory") + print("Usage: python3 evaluate_tracking.py /home/hoge/2011_09_26/2011_09_26_drive_0005_sync") + return + + base_dir = argv[0] + if os.path.exists(base_dir) == 0: + print ("Error: you need to set valid path") + return + benchmark_dir_name = get_benchmark_dir_name() benchmark_dir = os.path.join(base_dir, benchmark_dir_name) + print (benchmark_dir) result_file_name = "benchmark_results.txt" result_file_path = os.path.join(base_dir, result_file_name) @@ -669,3 +679,6 @@ def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) success = evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir) + +if __name__ == "__main__": + main(sys.argv[1:]) From 355a62ac64c3e474cd8d551d5327d7272ad72cc0 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Tue, 30 Oct 2018 11:23:35 +0900 Subject: [PATCH 09/16] Refactor codes --- .../include/imm_ukf_pda.h | 1 + .../launch/benchmark.launch | 2 + .../nodes/imm_ukf_pda/imm_ukf_pda.cpp | 40 +------------------ 3 files changed, 4 insertions(+), 39 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h index 95b1f3de400..ab95ebb4afd 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h @@ -88,6 +88,7 @@ class ImmUkfPda // whether if benchmarking tracking result bool is_benchmark_; int frame_count_; + std::string kitti_data_dir_; // for benchmark std::string result_file_path_; diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch index 31b52e27c71..0e7f12f3b9b 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch @@ -101,6 +101,7 @@ + @@ -119,6 +120,7 @@ + diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index 3fa75bae850..f1475445695 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -53,48 +53,10 @@ ImmUkfPda::ImmUkfPda() private_nh_.param("use_sukf", use_sukf_, false); private_nh_.param("is_debug", is_debug_, false); private_nh_.param("is_benchmark", is_benchmark_, false); - - // std::string kitti_data_dir; - // private_nh_.getParam("kitti_data_dir", kitti_data_dir); + private_nh_.param("kitti_data_dir", kitti_data_dir_, "/home/hoge/kitti/2011_09_26/2011_09_26_drive_0005_sync/"); if(is_benchmark_) { - //TODO use rosparam insteasd of harcoded path - std::string kitti_data_dir = "/home/kosuke/hdd/kitti/2011_09_26/2011_09_26_drive_0005_sync/"; - - //TODO: make function fot get current time file path and think about when to use that funciton - // benchmarked time - time_t t = time(nullptr); - const tm* lt = localtime(&t); - - std::stringstream yyyy_mmdd_hhmmss; - yyyy_mmdd_hhmmss<<"20"; - yyyy_mmdd_hhmmss<tm_year-100; - yyyy_mmdd_hhmmss<<"_"; - yyyy_mmdd_hhmmss<tm_mon+1; - yyyy_mmdd_hhmmss<tm_mday; - yyyy_mmdd_hhmmss<<"_"; - yyyy_mmdd_hhmmss<tm_hour; - if(lt->tm_min < 10) - { - yyyy_mmdd_hhmmss<<"0"; - yyyy_mmdd_hhmmss<tm_min; - } - else - { - yyyy_mmdd_hhmmss<tm_min; - } - if(lt->tm_sec < 10) - { - yyyy_mmdd_hhmmss<<"0"; - yyyy_mmdd_hhmmss<tm_sec; - } - else - { - yyyy_mmdd_hhmmss<tm_sec; - } - - result_file_path_ = kitti_data_dir +yyyy_mmdd_hhmmss.str() + ".txt"; result_file_path_ = kitti_data_dir + "benchmark_results.txt"; remove(result_file_path_.c_str()); } From c47f8c1495a9c8be23179ee55bea6faef4eba4c2 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Tue, 30 Oct 2018 15:03:59 +0900 Subject: [PATCH 10/16] Add comment --- .../lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index f1475445695..d923c369e1c 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -52,9 +52,10 @@ ImmUkfPda::ImmUkfPda() private_nh_.param("prevent_explosion_thres", prevent_explosion_thres_, 1000); private_nh_.param("use_sukf", use_sukf_, false); private_nh_.param("is_debug", is_debug_, false); + + // rosparam for benchmark private_nh_.param("is_benchmark", is_benchmark_, false); private_nh_.param("kitti_data_dir", kitti_data_dir_, "/home/hoge/kitti/2011_09_26/2011_09_26_drive_0005_sync/"); - if(is_benchmark_) { result_file_path_ = kitti_data_dir + "benchmark_results.txt"; From af2f35aa16980d20880840d52887de55b34b72c0 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Tue, 30 Oct 2018 15:07:25 +0900 Subject: [PATCH 11/16] Fix bug --- .../lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index d923c369e1c..7fe0b07b45b 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -58,7 +58,7 @@ ImmUkfPda::ImmUkfPda() private_nh_.param("kitti_data_dir", kitti_data_dir_, "/home/hoge/kitti/2011_09_26/2011_09_26_drive_0005_sync/"); if(is_benchmark_) { - result_file_path_ = kitti_data_dir + "benchmark_results.txt"; + result_file_path_ = kitti_data_dir_ + "benchmark_results.txt"; remove(result_file_path_.c_str()); } } From 7810a3d07cb0c58a16c3c495ea1578b25b9e63bc Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Tue, 30 Oct 2018 15:21:41 +0900 Subject: [PATCH 12/16] Seperate benchmark scripts to another repo --- .../benchmark/evaluate_tracking.py | 684 ------------------ .../benchmark/parse_tracklet_xml.py | 324 --------- .../lidar_imm_ukf_pda_track/benchmark/rect.py | 29 - .../launch/benchmark.launch | 135 ---- 4 files changed, 1172 deletions(-) delete mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py delete mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py delete mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py delete mode 100644 ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py deleted file mode 100644 index 2a711461afc..00000000000 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/evaluate_tracking.py +++ /dev/null @@ -1,684 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import os -import math -import copy -from munkres import Munkres -from collections import defaultdict -from time import gmtime, strftime -try: - from ordereddict import OrderedDict # can be installed using pip -except: - from collections import OrderedDict # only included from python 2.7 on -from tqdm import tqdm -from shutil import copyfile -import datetime - -from rect import Rect -from parse_tracklet_xml import dump_frames_text_from_tracklets - -class ObjectData: - """ - Utility class to load object data. - """ - - def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\ - obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\ - cx=-1000,cy=-1000,cz=-1000,yaw=-10,score=-1000,track_id=-1): - """ - Constructor, initializes the object given the parameters. - """ - - # init object data - self.frame = frame - self.track_id = track_id - self.obj_type = obj_type - self.truncation = truncation - self.occlusion = occlusion - self.obs_angle = obs_angle - self.x1 = x1 - self.y1 = y1 - self.x2 = x2 - self.y2 = y2 - self.w = w - self.h = h - self.l = l - self.cx = cx - self.cy = cy - self.cz = cz - self.yaw = yaw - self.score = score - self.ignored = False - self.valid = False - self.tracker = -1 - - def __str__(self): - """ - Print read data. - """ - - attrs = vars(self) - return '\n'.join("%s: %s" % item for item in attrs.items()) - - -class TrackingEvaluation(object): - """ tracking statistics (CLEAR MOT, id-switches, fragments, mostly_lost/partialy_tracked/mostly_tracked, precision/recall) - id-switches - number of id switches - fragments - number of fragmentations - - mostly_tracked, partialy_tracked, mostly_lost - number of mostly tracked, partially tracked and mostly lost trajectories - - recall - recall = percentage of detected targets - precision - precision = percentage of correctly detected targets - FAR - number of false alarms per frame - falsepositives - number of false positives (FP) - missed - number of missed targets (FN) - """ - - def __init__(self, data_num, gt_path="../training", min_overlap=0.99, max_truncation = 0, min_height = 25, max_occlusion = 2): - # data and parameter - self.n_frames = data_num - self.result_data = -1 - - # statistics and numbers for evaluation - self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives - self.n_igt = 0 # number of ignored ground truth detections - self.n_gt_trajectories = 0 - self.n_tr = 0 # number of tracker detections minus ignored tracker detections - self.n_itr = 0 # number of ignored tracker detections - self.recall = 0 - self.precision = 0 - self.itp = 0 # number of ignored true positives - self.tp = 0 # number of true positives including ignored true positives! - self.fn = 0 # number of false negatives WITHOUT ignored false negatives - self.ifn = 0 # number of ignored false negatives - self.fp = 0 # number of false positives - # a bit tricky, the number of ignored false negatives and ignored true positives - # is subtracted, but if both tracker detection and ground truth detection - # are ignored this number is added again to avoid double counting - self.mme = 0 - self.fragments = 0 - self.id_switches = 0 - self.mostly_tracked = 0 - self.partialy_tracked = 0 - self.mostly_lost = 0 - - self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics - self.max_truncation = max_truncation # maximum truncation of an object for evaluation - self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation - self.min_height = min_height # minimum height of an object for evaluation - - def load_tracked_data(self, result_file_path): - """ - Helper function to load tracker data. - """ - - try: - if(os.path.exists(result_file_path)): - if not self._load_data(result_file_path, loading_groundtruth=False): - return False - else: - print("Error: There is no result data file") - raise - except IOError: - return False - return True - - def load_ground_truth(self, gt_file_path): - """ - Helper function to load ground truth. - """ - - try: - if(os.path.exists(gt_file_path)): - self._load_data(gt_file_path, loading_groundtruth=True) - else: - print("Error: There is no groundtruth file") - raise - except IOError: - return False - return True - - def _load_data(self, file_path, min_score=-1000, loading_groundtruth=False): - """ - Generic loader for ground truth and tracking data. - Use load_ground_truth() or load_tracked_data() to load this data. - Loads detections in KITTI format from textfiles. - """ - # construct objectDetections object to hold detection data - object_data = ObjectData() - n_trajectories = 0 - file = open(file_path, "r") - f_data = [[] for x in range(self.n_frames)] # current set has only 1059 entries, sufficient length is checked anyway - ids = [] - id_frame_cache = [] - - for line in file: - # KITTI tracking benchmark data format: - # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,cx,cy,cz,yaw) - line = line.strip() - fields = line.split(" ") - - # get fields from table - object_data.frame = int(float(fields[0])) # frame - object_data.track_id = int(float(fields[1])) # id - object_data.obj_type = fields[2].lower() # object type [car, pedestrian, cyclist, ...] - object_data.truncation = int(float(fields[3])) # truncation [-1,0,1,2] - object_data.occlusion = int(float(fields[4])) # occlusion [-1,0,1,2] - object_data.obs_angle = float(fields[5]) # observation angle [rad] - object_data.x1 = float(fields[6]) # left [px] - object_data.y1 = float(fields[7]) # top [px] - object_data.x2 = float(fields[8]) # right [px] - object_data.y2 = float(fields[9]) # bottom [px] - object_data.h = float(fields[10]) # height [m] - object_data.w = float(fields[11]) # width [m] - object_data.l = float(fields[12]) # length [m] - object_data.cx = float(fields[13]) # cx [m] - object_data.cy = float(fields[14]) # cy [m] - object_data.cz = float(fields[15]) # cz [m] - object_data.yaw = float(fields[16]) # yaw angle [rad] - - # do not consider objects marked as invalid - # do not consider objects marked as invalid - if object_data.track_id is -1 and object_data.obj_type != "DontCare": - continue - idx = object_data.frame - - if object_data.frame >= self.n_frames: - continue - - try: - id_frame = (object_data.frame,object_data.track_id) - if id_frame in id_frame_cache and not loading_groundtruth: - print("track ids are not unique for frame %d" % (object_data.frame)) - print("track id %d occured at least twice for this frame" % object_data.track_id) - print("Exiting...") - #continue # this allows to evaluate non-unique result files - return False - id_frame_cache.append(id_frame) - f_data[object_data.frame].append(copy.copy(object_data)) - except: - raise - - if object_data.track_id not in ids and object_data.obj_type!="DontCare": - ids.append(object_data.track_id) - n_trajectories +=1 - file.close() - - - if not loading_groundtruth: - self.n_tr_trajectories=n_trajectories - self.result_data = f_data - else: - # split ground truth and DontCare areas - self.dcareas = [] - self.groundtruth = [] - s_g, s_dc = [],[] - for f in range(len(f_data)): - frame_gts = f_data[f] - g,dc = [],[] - for gg in frame_gts: - if gg.obj_type=="dontcare": - dc.append(gg) - else: - g.append(gg) - s_g.append(g) - s_dc.append(dc) - self.dcareas = s_dc - self.groundtruth = s_g - self.n_gt_trajectories=n_trajectories - return True - - def create_eval_dir(self, benchmark_dir): - """ - Creates directory to store evaluation results and data for visualization. - """ - - self.eval_dir = benchmark_dir - if not os.path.exists(self.eval_dir): - print ("create directory:", self.eval_dir) - os.makedirs(self.eval_dir) - print ("done") - - def compute_3rd_party_metrics(self): - """ - Computes the metrics defined in - - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics - MOTA, MOTAL, MOTP - - Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows - mostly_tracked/partialy_tracked/mostly_lost - """ - # construct Munkres object for Hungarian Method association - hm = Munkres() - max_cost = 1e9 - - # go through all frames and associate ground truth and tracker results - # groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections - seq_gt = self.groundtruth - seq_dc = self.dcareas # don't care areas - seq_result_data = self.result_data - seq_trajectories = defaultdict(list) - seq_ignored = defaultdict(list) - - last_frame_ids = [[],[]] - - for i_frame in tqdm(range(len(seq_gt))): - frame_gts = seq_gt[i_frame] - frame_dcs = seq_dc[i_frame] - - frame_results = seq_result_data[i_frame] - # counting total number of ground truth and tracker objects - self.n_gt += len(frame_gts) - self.n_tr += len(frame_results) - - # use hungarian method to associate, using boxoverlap 0..1 as cost - # build cost matrix - cost_matrix = [] - frame_ids = [[],[]] - # loop over ground truth objects in one frame - for gt in frame_gts: - # save current ids - frame_ids[0].append(gt.track_id) - frame_ids[1].append(-1) - gt.tracker = -1 - gt.id_switch = 0 - gt.fragmentation = 0 - cost_row = [] - # loop over tracked objects in one frame - for result in frame_results: - # overlap == 1 means cost == 0 - # Rect(cx, cy, l, w, angle) - r1 = Rect(gt.cx, gt.cy, gt.l, gt.w, gt.yaw) - r2 = Rect(result.cx, result.cy, result.l, result.w, result.yaw) - iou = r1.intersection_over_union(r2) - cost = 1 - iou - # gating for boxoverlap - if cost<=self.min_overlap: - cost_row.append(cost) - else: - cost_row.append(max_cost) # = 1e9 - # return - cost_matrix.append(cost_row) - # all ground truth trajectories are initially not associated - # extend groundtruth trajectories lists (merge lists) - seq_trajectories[gt.track_id].append(-1) - seq_ignored[gt.track_id].append(False) - - if len(frame_gts) is 0: - cost_matrix=[[]] - # associate - association_matrix = hm.compute(cost_matrix) - - # tmp variables for sanity checks - tmptp = 0 - tmpfp = 0 - tmpfn = 0 - - # mapping for tracker ids and ground truth ids - for row, col in association_matrix: - # apply gating on boxoverlap - c = cost_matrix[row][col] - if c < max_cost: - frame_gts[row].tracker = frame_results[col].track_id - frame_ids[1][row] = frame_results[col].track_id - frame_results[col].valid = True - frame_gts[row].distance = c - seq_trajectories[frame_gts[row].track_id][-1] = frame_results[col].track_id - - # true positives are only valid associations - self.tp += 1 - tmptp += 1 - else: - # wrong data association - frame_gts[row].tracker = -1 - self.fn += 1 - tmpfn += 1 - - # associate tracker and DontCare areas - # ignore tracker in neighboring classes - nignoredtracker = 0 # number of ignored tracker detections - ignoredtrackers = dict() # will associate the track_id with -1 - # if it is not ignored and 1 if it is - # ignored; - # this is used to avoid double counting ignored - # cases, see the next loop - - # check for ignored FN/TP (truncation or neighboring object class) - nignoredfn = 0 # the number of ignored false negatives - nignoredtp = 0 # the number of ignored true positives - - gi = 0 - for gt in frame_gts: - if gt.tracker < 0: - if gt.occlusion>self.max_occlusion or gt.truncation>self.max_truncation: - seq_ignored[gt.track_id][-1] = True - gt.ignored = True - nignoredfn += 1 - - elif gt.tracker>=0: - if gt.occlusion>self.max_occlusion or gt.truncation>self.max_truncation: - seq_ignored[gt.track_id][-1] = True - gt.ignored = True - nignoredtp += 1 - - gi += 1 - - # the below might be confusion, check the comments in __init__ - # to see what the individual statistics represent - - # correct TP by number of ignored TP due to truncation - # ignored TP are shown as tracked in visualization - tmptp -= nignoredtp - - # count the number of ignored true positives - self.itp += nignoredtp - - # adjust the number of ground truth objects considered - self.n_gt -= (nignoredfn + nignoredtp) - - # count the number of ignored ground truth objects - self.n_igt += nignoredfn + nignoredtp - - # count the number of ignored tracker objects - self.n_itr += nignoredtracker - - # false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes - tmpfn += len(frame_gts)-len(association_matrix)-nignoredfn - self.fn += len(frame_gts)-len(association_matrix)-nignoredfn - self.ifn += nignoredfn - - # false positives = tracker bboxes - associated tracker bboxes - tmpfp += len(frame_results) - tmptp - nignoredtracker - nignoredtp - self.fp += len(frame_results) - tmptp - nignoredtracker - nignoredtp - - # sanity checks - # - the number of true positives minues ignored true positives - # should be greater or equal to 0 - # - the number of false negatives should be greater or equal to 0 - # - the number of false positives needs to be greater or equal to 0 - # otherwise ignored detections might be counted double - # - the number of counted true positives (plus ignored ones) - # and the number of counted false negatives (plus ignored ones) - # should match the total number of ground truth objects - # - the number of counted true positives (plus ignored ones) - # and the number of counted false positives - # plus the number of ignored tracker detections should - # match the total number of tracker detections; note that - # nignoredpairs is subtracted here to avoid double counting - # of ignored detection sin nignoredtp and nignoredtracker - if tmptp<0: - print (tmptp, nignoredtp) - raise NameError("Something went wrong! TP is negative") - if tmpfn<0: - print (tmpfn, len(frame_gts), len(association_matrix), nignoredfn, nignoredpairs) - raise NameError("Something went wrong! FN is negative") - if tmpfp<0: - print (tmpfp, len(frame_results), tmptp, nignoredtracker, nignoredtp, nignoredpairs) - raise NameError("Something went wrong! FP is negative") - if tmptp + tmpfn is not len(frame_gts)-nignoredfn-nignoredtp: - print ("seqidx", seq_idx) - print ("frame ", f) - print ("TP ", tmptp) - print ("FN ", tmpfn) - print ("FP ", tmpfp) - print ("nGT ", len(frame_gts)) - print ("nAss ", len(association_matrix)) - print ("ign GT", nignoredfn) - print ("ign TP", nignoredtp) - raise NameError("Something went wrong! nGroundtruth is not TP+FN") - if tmptp+tmpfp+nignoredtp+nignoredtracker is not len(frame_results): - print (seq_idx, f, len(frame_results), tmptp, tmpfp) - print (len(association_matrix), association_matrix) - raise NameError("Something went wrong! nTracker is not TP+FP") - - # loop over ground truth track_id - # check for id switches or fragmentations - for i,gt_id in enumerate(frame_ids[0]): - if gt_id in last_frame_ids[0]: - idx = last_frame_ids[0].index(gt_id) - tid = frame_ids[1][i] - lid = last_frame_ids[1][idx] - if tid != lid and lid != -1 and tid != -1: - if frame_gts[i].truncation=0 else 0 - lgt = 0 if ign_g[0] else 1 - for f in range(1,len(g)): - if ign_g[f]: - last_id = -1 - continue - lgt+=1 - if last_id != g[f] and last_id != -1 and g[f] != -1 and g[f-1] != -1: - self.id_switches += 1 - if f < len(g)-1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and g[f+1] != -1: - self.fragments += 1 - if g[f] != -1: - tracked += 1 - last_id = g[f] - # handle last frame; tracked state is handled in for loop (g[f]!=-1) - if len(g)>1 and g[f-1] != g[f] and last_id != -1 and g[f] != -1 and not ign_g[f]: - self.fragments += 1 - - # compute mostly_tracked/partialy_tracked/mostly_lost - tracking_ratio = tracked / float(len(g) - sum(ign_g)) - if tracking_ratio > 0.8: - self.mostly_tracked += 1 - elif tracking_ratio < 0.2: - self.mostly_lost += 1 - else: # 0.2 <= tracking_ratio <= 0.8 - self.partialy_tracked += 1 - - if (self.n_gt_trajectories-n_ignored_tr_total)==0: - self.mostly_tracked = 0. - self.partialy_tracked = 0. - self.mostly_lost = 0. - else: - self.mostly_tracked /= float(self.n_gt_trajectories-n_ignored_tr_total) - self.partialy_tracked /= float(self.n_gt_trajectories-n_ignored_tr_total) - self.mostly_lost /= float(self.n_gt_trajectories-n_ignored_tr_total) - - # precision/recall - if (self.fp+self.tp)==0 or (self.tp+self.fn)==0: - self.recall = 0. - self.precision = 0. - else: - self.recall = self.tp/float(self.tp+self.fn) - self.precision = self.tp/float(self.fp+self.tp) - return True - - def _print_entry(self, key, val,width=(70,10)): - """ - Pretty print an entry in a table fashion. - """ - - s_out = key.ljust(width[0]) - if type(val)==int: - s = "%%%dd" % width[1] - s_out += s % val - elif type(val)==float: - s = "%%%df" % (width[1]) - s_out += s % val - else: - s_out += ("%s"%val).rjust(width[1]) - return s_out - - def create_summary(self): - """ - Generate and mail a summary of the results. - If mailpy.py is present, the summary is instead printed. - """ - - summary = "" - - summary += "tracking evaluation summary".center(80,"=") + "\n" - summary += self._print_entry("Recall", self.recall) + "\n" - summary += "\n" - summary += self._print_entry("Mostly Tracked", self.mostly_tracked) + "\n" - summary += self._print_entry("Partly Tracked", self.partialy_tracked) + "\n" - summary += self._print_entry("Mostly Lost", self.mostly_lost) + "\n" - summary += "\n" - summary += self._print_entry("True Positives", self.tp) + "\n" - summary += self._print_entry("Ignored True Positives", self.itp) + "\n" - summary += self._print_entry("False Positives", self.fp) + "\n" - summary += self._print_entry("False Negatives", self.fn) + "\n" - summary += self._print_entry("Ignored False Negatives", self.ifn) + "\n" - summary += self._print_entry("Missed Targets", self.fn) + "\n" - summary += self._print_entry("ID-switches", self.id_switches) + "\n" - summary += self._print_entry("Fragmentations", self.fragments) + "\n" - summary += "\n" - summary += self._print_entry("Ground Truth Objects (Total)", self.n_gt + self.n_igt) + "\n" - summary += self._print_entry("Ignored Ground Truth Objects", self.n_igt) + "\n" - summary += self._print_entry("Ground Truth Trajectories", self.n_gt_trajectories) + "\n" - summary += "\n" - summary += self._print_entry("Tracker Objects (Total)", self.n_tr) + "\n" - summary += self._print_entry("Ignored Tracker Objects", self.n_itr) + "\n" - summary += self._print_entry("Tracker Trajectories", self.n_tr_trajectories) + "\n" - summary += "="*80 - - return summary - - def save_to_stats(self): - """ - Save the statistics in a whitespace separate file. - """ - - # create pretty summary - summary = self.create_summary() - - # print the summary. - print(summary) - - # write summary to file summary_cls.txt - filename = os.path.join(self.eval_dir, "summary.txt" ) - dump = open(filename, "w+") - # print>>dump, summary - print(summary, end="", file=dump) - dump.close() - -def get_benchmark_dir_name(): - if datetime.datetime.now().minute < 10: - minute_str = str(0) + str(datetime.datetime.now().minute) - else: - minute_str = str(datetime.datetime.now().minute) - if datetime.datetime.now().second < 10: - second_str = str(0) + str(datetime.datetime.now().second) - else: - second_str = str(datetime.datetime.now().second) - time_file_name = str(datetime.datetime.now().year) + "_" + \ - str(datetime.datetime.now().month) + \ - str(datetime.datetime.now().day) + "_" + \ - str(datetime.datetime.now().hour) + \ - minute_str + second_str - benchmark_dir_name = "benchmark_" + time_file_name - return benchmark_dir_name - -def copy_result_to_current_time_dir(base_dir, benchmark_dir_name, result_file_path): - benchmark_dir = os.path.join(base_dir, benchmark_dir_name) - os.makedirs(benchmark_dir) - result_file_name = "benchmark_results.txt" - result_file_in_benchmark_dir = os.path.join(benchmark_dir, result_file_name) - copyfile(result_file_path, result_file_in_benchmark_dir) - -def evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir): - """ - Entry point for evaluation, will load the data and start evaluation - """ - - # start evaluation and instanciated eval object - e = TrackingEvaluation(velo_data_num) - # load tracker data and check provided classes - try: - if not e.load_tracked_data(result_file_path): - "failed to load tracked data" - return - print("Loading Results - Success") - print("Size of result data ", len(e.result_data)) - except: - print("Caught exception while loading result data.") - return - # load groundtruth data for this class - if not e.load_ground_truth(gt_file_path): - raise ValueError("Ground truth not found.") - print("Loading Groundtruth - Success") - print("Size of ground truth data ", len(e.groundtruth)) - # sanity checks - if len(e.groundtruth) is not len(e.result_data): - print("The uploaded data does not provide results for every sequence.") - return False - print("Loaded %d Sequences." % len(e.groundtruth)) - print("Start Evaluation...") - # create needed directories, evaluate and save stats - try: - e.create_eval_dir(benchmark_dir) - except: - print("Caught exception while creating results.") - if e.compute_3rd_party_metrics(): - print("Finished evaluation") - e.save_to_stats() - else: - print("There seem to be no true positives or false positives at all in the submitted data.") - - # finish - print("Thank you for participating in our benchmark!") - return True - -def main(argv): - if len(argv) == 0: - print("You have set a path to the kitti data directory") - print("Usage: python3 evaluate_tracking.py /home/hoge/2011_09_26/2011_09_26_drive_0005_sync") - return - - base_dir = argv[0] - if os.path.exists(base_dir) == 0: - print ("Error: you need to set valid path") - return - - benchmark_dir_name = get_benchmark_dir_name() - benchmark_dir = os.path.join(base_dir, benchmark_dir_name) - print (benchmark_dir) - - result_file_name = "benchmark_results.txt" - result_file_path = os.path.join(base_dir, result_file_name) - # copy benchmark_results.txt to `benchmark_dir_name`/benchmark_results.txt - copy_result_to_current_time_dir(base_dir, benchmark_dir_name, result_file_path) - - tracklet_file_name = "tracklet_labels.xml" - partial_velo_path = "velodyne_points/data" - tracklet_path = os.path.join(base_dir, tracklet_file_name) - gt_file_path = os.path.join(base_dir, "gt_frame.txt") - velo_dir = os.path.join(base_dir, partial_velo_path) - velo_data_num = len(os.listdir(velo_dir)) - - dump_frames_text_from_tracklets(velo_data_num, tracklet_path, gt_file_path) - success = evaluate(velo_data_num, result_file_path, gt_file_path, benchmark_dir) - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py deleted file mode 100644 index f0a2e4dee67..00000000000 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/parse_tracklet_xml.py +++ /dev/null @@ -1,324 +0,0 @@ -from sys import argv as cmdLineArgs -from xml.etree.ElementTree import ElementTree -import numpy as np -from warnings import warn -import os - -STATE_UNSET = 0 -STATE_INTERP = 1 -STATE_LABELED = 2 -STATEFROMTEXT = {'0':STATE_UNSET, '1':STATE_INTERP, '2':STATE_LABELED} - -OCC_UNSET = 255 # -1 as uint8 -OCC_VISIBLE = 0 -OCC_PARTLY = 1 -OCC_FULLY = 2 -OCCFROMTEXT = {'-1':OCC_UNSET, '0':OCC_VISIBLE, '1':OCC_PARTLY, '2':OCC_FULLY} - -TRUNC_UNSET = 255 # -1 as uint8, but in xml files the value '99' is used! -TRUNC_IN_IMAGE = 0 -TRUNC_TRUNCATED = 1 -TRUNC_OUT_IMAGE = 2 -TRUNC_BEHIND_IMAGE = 3 -TRUNCFROMTEXT = {'99':TRUNC_UNSET, '0':TRUNC_IN_IMAGE, '1':TRUNC_TRUNCATED, '2':TRUNC_OUT_IMAGE, '3': TRUNC_BEHIND_IMAGE} - - -class Tracklet(object): - """ - Representation an annotated object track - - Tracklets are created in function parseXML and can most conveniently used as follows: - - for trackletObj in parseXML(tracklet_file): - for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in trackletObj: - ... your code here ... - #end: for all frames - #end: for all tracklets - - absolute_frame_number is in range [first_frame, first_frame+n_frames[ - amt_occlusion and amt_borders could be None - - You can of course also directly access the fields objType (string), size (len-3 ndarray), first_frame/n_frames (int), - trans/rots (n_frames x 3 float ndarrays), states/truncs (len-n_frames uint8 ndarrays), occs (n_frames x 2 uint8 ndarray), - and for some tracklets amt_occs (n_frames x 2 float ndarray) and amt_borders (n_frames x 3 float ndarray). The last two - can be None if the xml file did not include these fields in poses - """ - - object_type = None - size = None # len-3 float array: (height, width, length) - first_frame = None - trans = None # n x 3 float array (x,y,z) - rots = None # n x 3 float array (x,y,z) - states = None # len-n uint8 array of states - occs = None # n x 2 uint8 array (occlusion, occlusion_kf) - truncs = None # len-n uint8 array of truncation - amt_occs = None # None or (n x 2) float array (amt_occlusion, amt_occlusion_kf) - amt_borders = None # None (n x 3) float array (amt_border_l / _r / _kf) - n_frames = None - - def __init__(self): - """ - Creates Tracklet with no info set - """ - self.size = np.nan*np.ones(3, dtype=float) - - def __str__(self): - """ - Returns human-readable string representation of tracklet object - - called implicitly in - print trackletObj - or in - text = str(trackletObj) - """ - return '[Tracklet over {0} frames for {1}]'.format(self.n_frames, self.object_type) - - def __iter__(self): - """ - Returns an iterator that yields tuple of all the available data for each frame - - called whenever code iterates over a tracklet object, e.g. in - for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in trackletObj: - ...do something ... - or - trackDataIter = iter(trackletObj) - """ - if self.amt_occs is None: - return zip(self.trans, self.rots, self.states, self.occs, self.truncs, - itertools.repeat(None), itertools.repeat(None), range(self.first_frame, self.first_frame+self.n_frames)) - else: - return zip(self.trans, self.rots, self.states, self.occs, self.truncs, - self.amt_occs, self.amt_borders, range(self.first_frame, self.first_frame+self.n_frames)) -#end: class Tracklet - - -def parse_xml(tracklet_file): - """ - Parses tracklet xml file and convert results to list of Tracklet objects - - :param tracklet_file: name of a tracklet xml file - :returns: list of Tracklet objects read from xml file - """ - - # convert tracklet XML data to a tree structure - e_tree = ElementTree() - print('Parsing tracklet file', tracklet_file) - with open(tracklet_file) as f: - e_tree.parse(f) - - # now convert output to list of Tracklet objects - tracklets_elem = e_tree.find('tracklets') - tracklets = [] - tracklet_idx = 0 - n_tracklets = None - for tracklet_elem in tracklets_elem: - if tracklet_elem.tag == 'count': - n_tracklets = int(tracklet_elem.text) - print('File contains', n_tracklets, 'tracklets') - elif tracklet_elem.tag == 'item_version': - pass - elif tracklet_elem.tag == 'item': - new_track = Tracklet() - is_finished = False - has_amt = False - frame_idx = None - for info in tracklet_elem: - if is_finished: - raise ValueError('more info on element after finished!') - if info.tag == 'objectType': - new_track.object_type = info.text - elif info.tag == 'h': - new_track.size[0] = float(info.text) - elif info.tag == 'w': - new_track.size[1] = float(info.text) - elif info.tag == 'l': - new_track.size[2] = float(info.text) - elif info.tag == 'first_frame': - new_track.first_frame = int(info.text) - elif info.tag == 'poses': - # this info is the possibly long list of poses - for pose in info: - if pose.tag == 'count': # this should come before the others - if new_track.n_frames is not None: - raise ValueError('there are several pose lists for a single track!') - elif frame_idx is not None: - raise ValueError('?!') - new_track.n_frames = int(pose.text) - new_track.trans = np.nan * np.ones((new_track.n_frames, 3), dtype=float) - new_track.rots = np.nan * np.ones((new_track.n_frames, 3), dtype=float) - new_track.states = np.nan * np.ones(new_track.n_frames, dtype='uint8') - new_track.occs = np.nan * np.ones((new_track.n_frames, 2), dtype='uint8') - new_track.truncs = np.nan * np.ones(new_track.n_frames, dtype='uint8') - new_track.amt_occs = np.nan * np.ones((new_track.n_frames, 2), dtype=float) - new_track.amt_borders = np.nan * np.ones((new_track.n_frames, 3), dtype=float) - frame_idx = 0 - elif pose.tag == 'item_version': - pass - elif pose.tag == 'item': - if frame_idx is None: - raise ValueError('pose item came before number of poses!') - for pose_info in pose: - if pose_info.tag == 'tx': - new_track.trans[frame_idx, 0] = float(pose_info.text) - elif pose_info.tag == 'ty': - new_track.trans[frame_idx, 1] = float(pose_info.text) - elif pose_info.tag == 'tz': - new_track.trans[frame_idx, 2] = float(pose_info.text) - elif pose_info.tag == 'rx': - new_track.rots[frame_idx, 0] = float(pose_info.text) - elif pose_info.tag == 'ry': - new_track.rots[frame_idx, 1] = float(pose_info.text) - elif pose_info.tag == 'rz': - new_track.rots[frame_idx, 2] = float(pose_info.text) - elif pose_info.tag == 'state': - new_track.states[frame_idx] = STATEFROMTEXT[pose_info.text] - elif pose_info.tag == 'occlusion': - new_track.occs[frame_idx, 0] = OCCFROMTEXT[pose_info.text] - elif pose_info.tag == 'occlusion_kf': - new_track.occs[frame_idx, 1] = OCCFROMTEXT[pose_info.text] - elif pose_info.tag == 'truncation': - new_track.truncs[frame_idx] = TRUNCFROMTEXT[pose_info.text] - elif pose_info.tag == 'amt_occlusion': - new_track.amt_occs[frame_idx,0] = float(pose_info.text) - has_amt = True - elif pose_info.tag == 'amt_occlusion_kf': - new_track.amt_occs[frame_idx,1] = float(pose_info.text) - has_amt = True - elif pose_info.tag == 'amt_border_l': - new_track.amt_borders[frame_idx,0] = float(pose_info.text) - has_amt = True - elif pose_info.tag == 'amt_border_r': - new_track.amt_borders[frame_idx,1] = float(pose_info.text) - has_amt = True - elif pose_info.tag == 'amt_border_kf': - new_track.amt_borders[frame_idx,2] = float(pose_info.text) - has_amt = True - else: - raise ValueError('unexpected tag in poses item: {0}!'.format(pose_info.tag)) - frame_idx += 1 - else: - raise ValueError('unexpected pose info: {0}!'.format(pose.tag)) - elif info.tag == 'finished': - is_finished = True - else: - raise ValueError('unexpected tag in tracklets: {0}!'.format(info.tag)) - #end: for all fields in current tracklet - - # some final consistency checks on new tracklet - if not is_finished: - warn('tracklet {0} was not finished!'.format(tracklet_idx)) - if new_track.n_frames is None: - warn('tracklet {0} contains no information!'.format(tracklet_idx)) - elif frame_idx != new_track.n_frames: - warn('tracklet {0} is supposed to have {1} frames, but perser found {1}!'.format(tracklet_idx, new_track.n_frames, frame_idx)) - if np.abs(new_track.rots[:,:2]).sum() > 1e-16: - warn('track contains rotation other than yaw!') - - # if amt_occs / amt_borders are not set, set them to None - if not has_amt: - new_track.amt_occs = None - new_track.amt_borders = None - - # add new tracklet to list - tracklets.append(new_track) - tracklet_idx += 1 - - else: - raise ValueError('unexpected tracklet info') - #end: for tracklet list items - - print('Loaded', tracklet_idx, 'tracklets.') - - # final consistency check - if tracklet_idx != n_tracklets: - warn('according to xml information the file has {0} tracklets, but parser found {1}!'.format(n_tracklets, tracklet_idx)) - - return tracklets -#end: function parseXML - -def dump_frames_text_from_tracklets(n_frames, xml_path, gt_file_path): - """ - Loads dataset labels also referred to as tracklets, saving them individually for each frame. - - Parameters - ---------- - n_frames : Number of frames in the dataset. - xml_path : Path to the tracklets XML. - - Returns - ------- - Tuple of dictionaries with integer keys corresponding to absolute frame numbers and arrays as values. First array - contains coordinates of bounding box vertices for each object in the frame, and the second array contains objects - types as strings. - """ - tracklets = parse_xml(xml_path) - - frame_num = {} #1 - frame_track_id = {} #1 - frame_obj_type = {} #1 - frame_truncated = {} #1 - frame_occluded = {} #1 - frame_dimensions = {} #3 - frame_location = {} #3 - frame_rotation_yaw = {} #1 - for i in range(n_frames): - frame_num[i] = [] - frame_track_id[i] = [] - frame_obj_type[i] = [] - frame_truncated[i] = [] - frame_occluded[i] = [] - frame_dimensions[i] = [] - frame_location[i] = [] - frame_rotation_yaw[i] = [] - - # loop over tracklets - for i, tracklet in enumerate(tracklets): - # this part is inspired by kitti object development kit matlab code: computeBox3D - h, w, l = tracklet.size - # loop over all data in tracklet - for translation, rotation, state, occlusion, truncation, amt_occlusion, amt_borders, absolute_frame_number in tracklet: - yaw = rotation[2] # other rotations are supposedly 0 - assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!' - - # concate data - frame_num[absolute_frame_number] = frame_num[absolute_frame_number] + [absolute_frame_number] - frame_track_id[absolute_frame_number] = frame_track_id[absolute_frame_number] + [i] - frame_obj_type[absolute_frame_number] = frame_obj_type[absolute_frame_number] + [tracklet.object_type] - frame_truncated[absolute_frame_number] = frame_truncated[absolute_frame_number] + [truncation] - frame_occluded[absolute_frame_number] = frame_occluded[absolute_frame_number] + [occlusion[0]] - frame_dimensions[absolute_frame_number] = frame_dimensions[absolute_frame_number] + [np.array([l, w, h])] - frame_location[absolute_frame_number] = frame_location[absolute_frame_number] + [np.array(translation)] - frame_rotation_yaw[absolute_frame_number] = frame_rotation_yaw[absolute_frame_number] + [yaw] - - gt_text_file = os.path.join(gt_file_path) - file = open(gt_text_file, 'w') - for i_frame in range(n_frames): - for i_object in range(len(frame_num[i_frame])): - file.write(str(frame_num[i_frame][i_object])) - file.write(' ') - file.write(str(frame_track_id[i_frame][i_object])) - file.write(' ') - file.write(frame_obj_type[i_frame][i_object]) - file.write(' ') - file.write(str(frame_truncated[i_frame][i_object])) - file.write(' ') - file.write(str(frame_occluded[i_frame][i_object])) - file.write(' ') - file.write('-10') - file.write(' ') - file.write('-1 -1 -1 -1') - file.write(' ') - file.write(str(frame_dimensions[i_frame][i_object][0])) - file.write(' ') - file.write(str(frame_dimensions[i_frame][i_object][1])) - file.write(' ') - file.write(str(frame_dimensions[i_frame][i_object][2])) - file.write(' ') - file.write(str(frame_location[i_frame][i_object][0])) - file.write(' ') - file.write(str(frame_location[i_frame][i_object][1])) - file.write(' ') - file.write(str(frame_location[i_frame][i_object][2])) - file.write(' ') - file.write(str(frame_rotation_yaw[i_frame][i_object])) - file.write('\n') diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py deleted file mode 100644 index 762baccf890..00000000000 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/benchmark/rect.py +++ /dev/null @@ -1,29 +0,0 @@ -import shapely.geometry -import shapely.affinity - -class Rect: - def __init__(self, cx, cy, l, w, angle): - self.cx = cx - self.cy = cy - self.l = l - self.w = w - self.angle = angle - - def get_contour(self): - l = self.l - w = self.w - #shapely.geometry.geo.box(minx, miny, maxx, maxy, ccw=True) - c = shapely.geometry.box(-l/2.0, -w/2.0, l/2.0, w/2.0) - rc = shapely.affinity.rotate(c, self.angle) - return shapely.affinity.translate(rc, self.cx, self.cy) - - def intersection(self, other): - return self.get_contour().intersection(other.get_contour()) - - def union(self, other): - return self.get_contour().union(other.get_contour()) - - def intersection_over_union(self, other): - intersection_area = self.intersection(other).area - union_area = self.union(other).area - return intersection_area/union_area diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch deleted file mode 100644 index 0e7f12f3b9b..00000000000 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/launch/benchmark.launch +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 83c7a744448a2a87b646bff46ea44b49de17eab9 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Tue, 30 Oct 2018 17:15:36 +0900 Subject: [PATCH 13/16] Remove unnecessary dependency --- .../packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h index ab95ebb4afd..3e3ea393f35 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/include/imm_ukf_pda.h @@ -33,8 +33,6 @@ #include -#include - #include #include #include From 9eafb9f159e01022d42ed08b4f46fa4581dd54d7 Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Thu, 1 Nov 2018 11:09:28 +0900 Subject: [PATCH 14/16] Add comment on dumpresulttext --- .../nodes/imm_ukf_pda/imm_ukf_pda.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index 7fe0b07b45b..818d13b9833 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -776,12 +776,21 @@ void ImmUkfPda::dumpResultText(autoware_msgs::DetectedObjectArray& detected_obje detected_objects.objects[i].pose.orientation.z, detected_objects.objects[i].pose.orientation.w); double roll, pitch, yaw; tf::Matrix3x3(q).getRPY(roll, pitch, yaw); + + // KITTI tracking benchmark data format: + // (frame_number,tracked_id, object type, truncation, occlusion, observation angle, x1,y1,x2,y2, h, w, l, cx, cy, cz, yaw) + // x1, y1, x2, y2 are for 2D bounding box. + // h, w, l, are for height, width, length respectively + // cx, cy, cz are for object centroid + + // Tracking benchmark is based on frame_number, tracked_id, + // bounding box dimentions and object pose(centroid and orientation) from bird-eye view outputfile << std::to_string(frame_count_) <<" " << std::to_string(detected_objects.objects[i].id) <<" " << "Unknown" <<" " << "-1" <<" " << "-1" <<" " - << "-10" <<" " + << "-1" <<" " << "-1 -1 -1 -1" <<" " << std::to_string(detected_objects.objects[i].dimensions.x) <<" " << std::to_string(detected_objects.objects[i].dimensions.y) <<" " From 7b9786f60b600633b6f2f225e0729677598de5ad Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Thu, 1 Nov 2018 18:41:37 +0900 Subject: [PATCH 15/16] Modify commented codes --- .../packages/lidar_imm_ukf_pda_track/README.md | 6 ++++++ .../nodes/imm_ukf_pda/imm_ukf_pda.cpp | 9 +++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md index d12da7d9e57..864fb5c7fd8 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md @@ -87,6 +87,12 @@ Node: visualize_detected_objects |`/detected_objects/target_id`|`visualization_msgs::Marker`|Visualize targets' id.| + ### Video [![IMM UKF PDA lidar_tracker Autoware](https://img.youtube.com/vi/tKgDVsIfH-s/0.jpg)](https://youtu.be/tKgDVsIfH-s) + + +### Benchmark +You can tune parameters by using benchmark based on KITTI dataset. +The repository is [here](https://github.com/cirpue49/kitti_tracking_benchmark). diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp index 818d13b9833..8720162d2f0 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/nodes/imm_ukf_pda/imm_ukf_pda.cpp @@ -59,7 +59,7 @@ ImmUkfPda::ImmUkfPda() if(is_benchmark_) { result_file_path_ = kitti_data_dir_ + "benchmark_results.txt"; - remove(result_file_path_.c_str()); + std::remove(result_file_path_.c_str()); } } @@ -772,11 +772,8 @@ void ImmUkfPda::dumpResultText(autoware_msgs::DetectedObjectArray& detected_obje std::ofstream outputfile(result_file_path_, std::ofstream::out | std::ofstream::app); for(size_t i = 0; i < detected_objects.objects.size(); i++) { - tf::Quaternion q(detected_objects.objects[i].pose.orientation.x, detected_objects.objects[i].pose.orientation.y, - detected_objects.objects[i].pose.orientation.z, detected_objects.objects[i].pose.orientation.w); - double roll, pitch, yaw; - tf::Matrix3x3(q).getRPY(roll, pitch, yaw); - + double yaw = tf::getYaw(detected_objects.objects[i].pose.orientation); + // KITTI tracking benchmark data format: // (frame_number,tracked_id, object type, truncation, occlusion, observation angle, x1,y1,x2,y2, h, w, l, cx, cy, cz, yaw) // x1, y1, x2, y2 are for 2D bounding box. From 138fba263e963b0220688c5ead4c82bdff83f86b Mon Sep 17 00:00:00 2001 From: Kosuke Murakami Date: Thu, 1 Nov 2018 18:47:20 +0900 Subject: [PATCH 16/16] Add readme --- .../lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md index 864fb5c7fd8..e4f1d3b5b0f 100644 --- a/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md +++ b/ros/src/computing/perception/detection/lidar_tracker/packages/lidar_imm_ukf_pda_track/README.md @@ -94,5 +94,6 @@ Node: visualize_detected_objects ### Benchmark +Please notice that benchmark scripts are in another repository. You can tune parameters by using benchmark based on KITTI dataset. The repository is [here](https://github.com/cirpue49/kitti_tracking_benchmark).