-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.cpp
215 lines (192 loc) · 8.82 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
//
// main.cpp
// Project 4
// Calibration and AugmentedReality
// This function shows the working of the system on press of different keys. This file enable the user to calibrate the camera and project 3D objects
// Created by Shivani Naik and Pulkit Saharan on 04/11/22.
//
#include <iostream>
#include <string>
#include <regex>
#include<opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/features2d.hpp>
#include "calibrate.hpp"
#include "OBJParser.hpp"
using namespace std;
using namespace cv;
// Main function detects targets and puts the required objects in the frame on different key presses
// Command line argument: path to 3d obj file
int main(int argc, char* argv[]) {
// Declarations
cv::VideoCapture *capdev;
cv::Mat frame, distortion_coeff, img, rotational_vec, trans_vec;
cv::Mat output,aruco_output;
bool patternfound;
string calibration_image_path = "/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/calibration_images";
cv::Mat actual = cv::imread("/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/aruco.png");
cv::Mat img_1 = cv::imread("/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/eiffel1.png");
cv::Mat img_2 = cv::imread("/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/eiffel2.png");
// command line argument for obj file
string obj_path(argv[1]);
FileStorage fs_write;
FileStorage fs_read;
std::vector<cv::Point2f> corner_set;
std::vector<cv::Vec3f> point_set; //world co-ordinates 3
std::vector<std::vector<cv::Vec3f> > point_list; // list of world co-ordinates
std::vector<std::vector<cv::Point2f> > corner_list; //list of image points
std::vector<std::vector<cv::Vec3f> > temp_point_list; // list of world co-ordinates
std::vector<std::vector<cv::Point2f> > temp_corner_list;
char pressed_key = 'o';
int min_calibrate_images = 5;
string save_path;
std::vector<cv::Point3f> vertices;
std::vector<cv::Point3f> normals;
std::vector<std::vector<int>> face_vertices;
std::vector<int> face_normals;
parse_file(obj_path,vertices, normals, face_vertices, face_normals);
capdev = new cv::VideoCapture(0);
capdev->set(cv::CAP_PROP_FRAME_WIDTH, 1280);//Setting the width of the video 1280
capdev->set(cv::CAP_PROP_FRAME_HEIGHT, 720);//Setting the height of the video// 720
if( !capdev->isOpened() ) {
printf("Unable to open video device\n");
return(-1);
}
cv::namedWindow("Video", 1); // identifies a window
// get some properties of the image
cv::Size refS( (int) capdev->get(cv::CAP_PROP_FRAME_WIDTH ),
(int) capdev->get(cv::CAP_PROP_FRAME_HEIGHT));
printf("Expected size: %d %d\n", refS.width, refS.height);
float cols = refS.width/2;
float rows = refS.height/2;
double mat_init[3][3] = {{1,0,cols},{0,1, rows},{0,0,1}};
// Create camera matrix and initialize
cv::Mat camera_matrix = cv::Mat(3,3, CV_64FC1, &mat_init);
cout << "Initialized camera matrix" << endl;
// file to save camera properties
fs_read = FileStorage("/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/intrinsic.yml", FileStorage::READ);
fs_read["camera_matrix"] >> camera_matrix;
fs_read["distortion_coeff"] >> distortion_coeff;
fs_read.release();
cout << "Read camera matrix" << endl;
for (;;) {
*capdev >> frame; // get a new frame from the camera, treat as a stream
if( frame.empty() ) {
printf("frame is empty\n");
break;
}
// see if there is a waiting keystroke
char key = cv::waitKey(10);
if(key != -1)
pressed_key = key;
bool aruco_marker_found = false;
switch(pressed_key)
{
// show extracted corners
case 'e':
img = imread(calibration_image_path+"/3.png");
extract_draw_corners(img, temp_point_list, temp_corner_list);
imshow("corner_image", img);
cv::imshow("Video", frame);
break;
// calibrate camera and save current image for calibration
case 's':
save_path = calibration_image_path+ "/10.png";
imwrite(save_path, frame);
point_list.clear();
corner_list.clear();
create_image_set(calibration_image_path, point_list, corner_list);
if(point_list.size() > min_calibrate_images)
{
calibrate_camera(point_list,corner_list,camera_matrix, distortion_coeff);
fs_write = FileStorage("/Users/shivaninaik/Documents/MSDAE/Computer Vision/Projects/Project 4/AugmentedReality/AugmentedReality/intrinsic.yml", FileStorage::WRITE);
fs_write << "camera_matrix" << camera_matrix;
fs_write << "distortion_coeff" << distortion_coeff;
fs_write.release();
}
else
cout<<"Too few images for calibration, please capture more and then calibrate";
pressed_key = 'o';
cv::imshow("Video", frame);
break;
// detect and draw axes
case 'd':
patternfound = camera_position(frame, camera_matrix, distortion_coeff, rotational_vec, trans_vec, "Axes");
if(patternfound)
{
// print the vectors in real time when detects the grid
std::cout << "Rotation Matrix: " << std::endl;
for (int i = 0; i < rotational_vec.rows; i++)
{
for (int j = 0; j < rotational_vec.cols; j++)
{
std::cout << rotational_vec.at<cv::Vec2f>(i, j) << std::endl;
}
}
std::cout << "Translation Matrix: " << std::endl;
for (int i = 0; i < trans_vec.rows; i++)
{
for (int j = 0; j < trans_vec.cols; j++)
{
std::cout << trans_vec.at<cv::Vec2f>(i, j) << std::endl;
}
}
}
cv::imshow("Video", frame);
break;
//Press 'h' to project chair on target
case 'h':
camera_position(frame, camera_matrix, distortion_coeff, rotational_vec, trans_vec, "Chair");
cv::imshow("Video", frame);
break;
//Press '3' to project Plane on target
case '3':
camera_position(frame, camera_matrix, distortion_coeff, rotational_vec, trans_vec, "Obj", vertices, face_vertices);
cv::imshow("Video", frame);
break;
//Press 'b' to project Cube on target
case 'b':
camera_position(frame, camera_matrix, distortion_coeff, rotational_vec, trans_vec, "Cube");
cv::imshow("Video", frame);
break;
//Press 't' to project table on target
case 't':
camera_position(frame, camera_matrix, distortion_coeff, rotational_vec, trans_vec, "Table");
cv::imshow("Video", frame);
break;
//Press 'a' to detect aruco markers
case 'a':
aruco_marker_detection(frame, output);
imshow("corner_image", output);
cv::imshow("Video", frame);
break;
//Press 'p' to overlay an image on the aruco markers
case 'p':
aruco_marker_found = aruco_marker_detection(frame, output);
if(aruco_marker_found)
{
aruco_out(frame, actual, aruco_output);
imshow("aruco_overlay", aruco_output);
}
cv::imshow("Video", frame);
break;
//Press 'r' to see the matching between two images usign ORB
case 'r':
ORB_matching(img_1, img_2);
cv::imshow("Video", frame);
break;
//Press '7' to detect Harris corner
case '7':
harris_corner(frame);
cv::imshow("Video", frame);
break;
default:
cv::imshow("Video", frame);
break;
}
// quit if key pressed is 'q'
if( key == 'q')
break;
}
return(0);
}