-
Notifications
You must be signed in to change notification settings - Fork 31
/
Copy pathpredict_gui.py
149 lines (121 loc) · 4.83 KB
/
predict_gui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#! /usr/bin/env python
import os
import argparse
import json
import cv2
import tensorflow as tf
from utils.utils import get_yolo_boxes, makedirs
from utils.bbox import draw_boxes
from tensorflow.keras.models import load_model
from tqdm import tqdm
import numpy as np
from object_tracking.application_util import preprocessing
from object_tracking.deep_sort import nn_matching
from object_tracking.deep_sort.detection import Detection
from object_tracking.deep_sort.tracker import Tracker
from object_tracking.application_util import generate_detections as gdet
from utils.bbox import draw_box_with_id
import warnings
warnings.filterwarnings("ignore")
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from multiprocessing import Queue
from gui_2cam import App
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=config))
def _main_(args):
app = App()
config_path = args.conf
num_cam = int(args.count)
with open(config_path) as config_buffer:
config = json.load(config_buffer)
# makedirs(output_path)
###############################
# Set some parameter
###############################
net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
obj_thresh, nms_thresh = 0.5, 0.45
###############################
# Load the model
###############################
os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
infer_model = load_model(config['train']['saved_weights_name'])
###############################
# Set up the Tracker
###############################
# Definition of the parameters
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
# deep_sort
model_filename = 'mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metrics = []
trackers = []
for i in range(num_cam):
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
trackers.append(tracker)
###############################
# Predict bounding boxes
###############################
# if 'webcam' in input_path: # do detection on the first webcam
video_readers = []
for i in range(num_cam):
video_reader = cv2.VideoCapture(i)
video_readers.append(video_reader)
# the main loop
batch_size = num_cam
images = []
values = []
messages = []
while True:
for i in range(num_cam):
ret_val, image = video_readers[i].read()
if ret_val == True: images += [image]
if (len(images) == batch_size) or (ret_val == False and len(images) > 0):
batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh,
nms_thresh)
for i in range(len(images)):
boxs = [[box1.xmin, box1.ymin, box1.xmax - box1.xmin, box1.ymax - box1.ymin] for box1 in batch_boxes[i]]
features = encoder(images[i], boxs)
message = ""
# print(features)
# score to 1.0 here).
detections = []
for j in range(len(boxs)):
label = batch_boxes[i][j].label
detections.append(Detection(boxs[j], batch_boxes[i][j].c, features[j], label))
# Call the tracker
trackers[i].predict()
trackers[i].update(detections)
n_without_helmet = 0
n_with_helmet = 0
for track in trackers[i].tracks:
message+=track.message
track.message=""
if not track.is_confirmed() or track.time_since_update > 1:
continue
if track.label == 2:
n_without_helmet += 1
if track.label == 1:
n_with_helmet += 1
bbox = track.to_tlbr()
draw_box_with_id(images[i], bbox, track.track_id, track.label, config['model']['labels'])
messages.append(message)
values.append(n_with_helmet)
values.append(n_without_helmet)
app.update(images, values,messages)
values = []
images = []
messages = []
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Predict with a trained yolo model')
argparser.add_argument('-c', '--conf', help='path to configuration file')
argparser.add_argument('-n', '--count', help='number of cameras')
args = argparser.parse_args()
_main_(args)