Commit f379a9e6 authored by Nadir Dalla Pozza's avatar Nadir Dalla Pozza
Browse files

Capstan detection.

parent ff2e2913
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="210mm"
height="297mm"
viewBox="0 0 210 297"
version="1.1"
id="svg5"
inkscape:export-filename="/Users/nadir/Documents/MPAI-CAE/AIMs/VideoAnalyser/input/tapeShape.png"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96"
inkscape:version="1.1.2 (b8e25be8, 2022-02-05)"
sodipodi:docname="tapeShape.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview7"
pagecolor="#000000"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0"
inkscape:pagecheckerboard="0"
inkscape:document-units="px"
showgrid="false"
inkscape:zoom="1"
inkscape:cx="370.5"
inkscape:cy="233.5"
inkscape:window-width="1920"
inkscape:window-height="1027"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="0"
inkscape:current-layer="layer1" />
<defs
id="defs2" />
<g
inkscape:label="Livello 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="font-variation-settings:'wght' 700;fill:#000000;stroke:#ffffff;stroke-width:0.264583;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none"
id="rect123"
width="163.77708"
height="10.847917"
x="0.13229164"
y="0.1322915"
inkscape:export-filename="/Users/nadir/Documents/MPAI-CAE/AIMs/VideoAnalyser/input/tapeShape.png"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96" />
</g>
</svg>
#include "vector"
#include "string.h"
#include "fstream"
#include <vector>
#include <string.h>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
......
......@@ -5,9 +5,9 @@
Tutte le informazioni necessarie all'agoritmo si possono individuare nei file XML
all'interno della cartella config.
@author Maniero Mirco, Ylenia Grava
@version 2.0
@date 01-04-2019
@author Nadir Dalla Pozza
@version 3.0
@date 29-06-2022
*/
#include <filesystem>
#include <fstream>
......@@ -54,22 +54,11 @@ VARIABLES
------------------------------------------------------------------------------
*/
const char* window = "Select ROI";
bool callback, rotated = false;
bool drag = false, pinchRollerSelection = false, savingPinchRoller = false, pinchRollerRect = false;
bool savingPinchRoller = false, pinchRollerRect = false;
bool savingBrand = false;
cv::Point point1, point2, point1b, point2b,point1c, point2c, point3, point4, point5, M;
cv::Mat myFrame;
int x_l, x_r, y_u, y_d, xM, yM, y_m1, y_m2, BD; // Processing rectangle coordinates
int xc_l, xc_r, yc_u, yc_d; // PinchRoller rectangle coordinates
int processingSelection = true;
Point2f vertices[4];
double mA, qA, mB, qB, mC, qC, mD, qD; // Rotated rectangle variables
int rotationEnabled = false;
int x_r_orig, x_l_orig, y_u_orig, y_d_orig;
float mediaPrevFrame = 0;
bool firstBrand = true; // The first frame containing brands on tape must be saved
int firstTime = 1; // First analysis
float firstBrandInstant = 0;
// config.json parameters
......@@ -87,461 +76,10 @@ json irregularityFileOutput1;
json irregularityFileOutput2;
// RotatedRect identifying the processing area
RotatedRect rect;
RotatedRect capstanRect;
/*
------------------------------------------------------------------------------
PREPROCESSING
------------------------------------------------------------------------------
*/
// void mouseHandler(int event, int x, int y, int flags, void* param) {
// // Instructions
// if (processingSelection && event == EVENT_LBUTTONUP && drag)
// std::cout << "\033[32mProcessing area identified.\033[0m\nYou can modify the rectangle by clicking within its area and dragging the mouse.\nAfterwards, you can press CTRL and then use the mouse to rotate the rectangle.\n\033[36mPress 'y' to confirm\033[0m or any other key to exit the program.\n" << std::endl;
// if (pinchRollerSelection && event == EVENT_LBUTTONUP && drag)
// std::cout << "\033[32mPinchRoller area identified.\n\033[36mPress 'y' to confirm\033[0m or any other key to exit the program.\n" << std::endl;
// // Initial drawing of a rectangle
// if (processingSelection || pinchRollerSelection) {
// if (event == EVENT_LBUTTONDOWN && !drag) {
// point1 = cv::Point(x, y);
// drag = true;
// if (pinchRollerSelection) {
// xc_l = x;
// yc_u = y;
// } else {
// x_l = x;
// y_u = y;
// }
// }
// if (event == EVENT_MOUSEMOVE && drag) {
// // Mouse dragged, ROI being selected
// cv::Mat img1 = myFrame.clone();
// if (img1.empty()) {
// std::cout << "Empty frame." << std::endl;
// }
// point2 = cv::Point(x, y);
// cv::rectangle(img1, point1, point2, cv::Scalar(255, 255, 0), 2);
// cv::imshow(window, img1);
// }
// if (event == EVENT_LBUTTONUP && drag) {
// cv::Mat img2 = myFrame.clone();
// point2 = cv::Point(x, y);
// drag = false;
// cv::rectangle(img2, point1, point2, cv::Scalar(255, 255, 0), 2);
// cv::imshow(window, img2);
// callback = true;
// processingSelection = false;
// if (pinchRollerSelection) {
// xc_r = x;
// yc_d = y;
// pinchRollerSelection = false;
// pinchRollerRect = true;
// } else {
// x_r = x;
// y_d = y;
// }
// x_l_orig = x_l;
// x_r_orig = x_r;
// y_u_orig = y_u;
// y_d_orig = y_d;
// }
// } else if (!pinchRollerRect) { // PinchRoller rectangle cannot be rotated or modified and after its definition it is not possible to modify again the processing rectangle
// // If CTRL key is pressed, the user wants to rotate the rectangle
// if (flags == cv::EVENT_FLAG_CTRLKEY && !rotationEnabled) {
// rotationEnabled = true;
// std::cout << "Rotation enabled." << std::endl;
// // Finding original rectangle center M
// xM = (x_l_orig + x_r_orig)/2;
// yM = (y_u_orig + y_d_orig)/2;
// M = cv::Point(xM,yM);
// }
// if (rotationEnabled) {
// if (event == EVENT_LBUTTONDOWN && !drag) {
// drag = true;
// point1 = cv::Point(x,y);
// }
// if (event == EVENT_MOUSEMOVE && drag) {
// cv::Mat img1 = myFrame.clone();
// point2 = cv::Point(x,y);
// RotatedRect rRect = RotatedRect(Point2f(xM, yM), Size2f(x_r_orig - x_l_orig, y_d_orig - y_u_orig), point2.x - point1.x);
// rRect.points(vertices);
// for (int i = 0; i < 4; i++)
// cv::line(img1, vertices[i], vertices[(i + 1) % 4], Scalar(255, 255, 0));
// cv::imshow(window, img1);
// }
// if (event == EVENT_LBUTTONUP && drag) {
// rotated = true;
// drag = false;
// cv::Mat img2 = myFrame.clone();
// point2 = cv::Point(x, y);
// RotatedRect rRect = RotatedRect(Point2f(xM,yM), Size2f(x_r_orig - x_l_orig, y_d_orig - y_u_orig), point2.x - point1.x);
// Point2f vertices[4];
// rRect.points(vertices);
// for (int i = 0; i < 4; i++)
// cv::line(img2, vertices[i], vertices[(i + 1) % 4], Scalar(255, 255, 0), 2);
// cv::imshow(window, img2);
// double m1, q1, m2, q2, m3, q3, m4, q4;
// findRectBound(vertices[0], vertices[1], vertices[2], vertices[3], m1, m2, m3, m4, q1, q2, q3, q4);
// // Finding vertices order
// int ordinate[4] = {(int)vertices[0].y, (int)vertices[1].y, (int)vertices[2].y, (int)vertices[3].y};
// if (*std::min_element(ordinate, ordinate + 4) == (int)vertices[0].y) {
// y_u = vertices[0].y;
// y_d = vertices[2].y;
// mA = m1; qA = q1; mD = m4; qD = q4; mB = m2; qB = q2; mC = m3; qC = q3;
// if (*std::min_element(ordinate + 1, ordinate + 3) == (int)vertices[1].y) {
// y_m1 = vertices[1].y;
// y_m2 = vertices[3].y;
// BD = true;
// } else {
// y_m1 = vertices[3].y;
// y_m2 = vertices[1].y;
// BD = false;
// }
// } else if (*std::min_element(ordinate, ordinate + 4) == (int)vertices[1].y) {
// y_u = vertices[1].y;
// y_d = vertices[3].y;
// mA = m2; qA = q2; mD = m1; qD = q1; mB = m3; qB = q3; mC = m4; qC = q4;
// if (*std::min_element(ordinate + 1, ordinate + 3) == (int)vertices[2].y) {
// y_m1 = vertices[2].y;
// y_m2 = vertices[0].y;
// BD = true;
// } else {
// y_m1 = vertices[0].y;
// y_m2 = vertices[2].y;
// BD = false;
// }
// } else if (*std::min_element(ordinate, ordinate + 4) == (int)vertices[2].y) {
// y_u = vertices[2].y;
// y_d = vertices[0].y;
// mA = m3; qA = q3; mD = m2; qD = q2; mB = m4; qB = q4; mC = m1; qC = q1;
// if (*std::min_element(ordinate + 1, ordinate + 3) == (int)vertices[3].y) {
// y_m1 = vertices[3].y;
// y_m2 = vertices[1].y;
// BD = true;
// } else {
// y_m1 = vertices[1].y;
// y_m2 = vertices[3].y;
// BD = false;
// }
// } else {
// y_u = vertices[3].y;
// y_d = vertices[1].y;
// mA = m4; qA = q4; mD = m3; qD = q3; mB = m1; qB = q1; mC = m2; qC = q2;
// if (*std::min_element(ordinate + 1, ordinate + 3) == (int)vertices[0].y) {
// y_m1 = vertices[0].y;
// y_m2 = vertices[2].y;
// BD = true;
// } else {
// y_m1 = vertices[2].y;
// y_m2 = vertices[0].y;
// BD = false;
// }
// }
// float ascisse [4] = {vertices[0].x, vertices[1].x, vertices[2].x, vertices[3].x};
// float min_ascissa = ascisse[3];
// float max_ascissa = ascisse[0];
// for (int j = 0; j < 4; j++) {
// if (ascisse[j] > max_ascissa) {
// max_ascissa = ascisse[j];
// }
// if (ascisse[j] < min_ascissa) {
// min_ascissa = ascisse[j];
// }
// }
// x_r = max_ascissa;
// x_l = min_ascissa;
// rotationEnabled = false;
// std::cout << "End rotation.\n" << std::endl;
// }
// } else if (!rotated) { // The rectangle can be modified only before its rotation
// // Here the user wants to modify the rectangle
// if (event == EVENT_LBUTTONDOWN && !drag) {
// point3 = cv::Point(x,y);
// drag = true;
// }
// if (event == EVENT_MOUSEMOVE && drag && !processingSelection) {
// // Mouse dragged, ROI being selected
// cv::Mat img4 = myFrame.clone();
// point4 = cv::Point(x, y);
// point2b = cv::Point(point4.x, y_d); //x mouse, y_d
// point1b = cv::Point(point4.x, y_u); //x mouse, y_u
// point1c = cv::Point(x_l, point4.y); //x_l, y mouse
// point2c = cv::Point(x_r, point4.y); //x_r, y mouse
// if ((point3.y >= y_u) && (point3.y <= y_d) && (point3.x >= x_l) && (point3.x <= x_r)) { // Within the rectangle
// if (point3.x >= (x_l + x_r)/2) { // Right rectangle side
// if (std::abs(point3.y - y_u) >= std::abs(point3.y - y_d)) {
// if (std::abs(point3.y - y_d) <= std::abs(point3.x - x_r)) {
// cv::rectangle(img4, point1, point2c, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point2 = point2c;
// } else {
// cv::rectangle(img4, point1, point2b, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point2 = point2b;
// }
// } else {
// if (std::abs(point3.y - y_u) <= std::abs(point3.x - x_r)) {
// cv::rectangle(img4, point1c, point2, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point1 = point1c;
// } else {
// cv::rectangle(img4, point1, point2b, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point2 = point2b;
// }
// }
// } else { // Left side of the rectangle
// if (std::abs(point3.y - y_u) >= std::abs(point3.y - y_d)) {
// if (std::abs(point3.y - y_d) <= std::abs(point3.x - x_l)) {
// cv::rectangle(img4, point1, point2c, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point2 = point2c;
// } else {
// cv::rectangle(img4, point1b, point2, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point1 = point1b;
// }
// } else {
// if (std::abs(point3.y - y_u) <= std::abs(point3.x - x_l)) {
// cv::rectangle(img4, point1c, point2, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point1 = point1c;
// } else {
// cv::rectangle(img4, point1b, point2, cv::Scalar(255,0,0), 2);
// cv::imshow(window, img4);
// point1 = point1b;
// }
// }
// }
// }
// }
// if (event == EVENT_LBUTTONUP && drag) {
// cv::Mat img5 = myFrame.clone();
// point5 = cv::Point(x, y);
// drag = false;
// point2b = cv::Point(point5.x, y_d);
// point1b = cv::Point(point5.x, y_u);
// point1c = cv::Point(x_l, point5.y);
// point2c = cv::Point(x_r, point5.y);
// if ((point3.y >= y_u) && (point3.y <= y_d) && (point3.x >= x_l) && (point3.x <= x_r)) { // Within the rectangle.
// if (point3.x >= (x_l + x_r)/2) { // Right side of the rectangle
// if (std::abs(point3.y - y_u) >= std::abs(point3.y - y_d)) {
// if (std::abs(point3.y - y_d) <= std::abs(point3.x - x_r)) {
// cv::rectangle(img5, point1, point2c, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point2 = point2c;
// y_d = point2.y;
// } else {
// cv::rectangle(img5, point1, point2b, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point2 = point2b;
// x_r = point2.x;
// }
// } else {
// if (std::abs(point3.y - y_u) <= std::abs(point3.x - x_r)) {
// cv::rectangle(img5, point1c, point2, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point1 = point1c;
// y_u = point1.y;
// } else {
// cv::rectangle(img5, point1, point2b, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point2 = point2b;
// x_r = point2.x;
// }
// }
// } else { // Left side of the rectangle
// if (std::abs(point3.y - y_u) >= std::abs(point3.y - y_d)) {
// if (std::abs(point3.y - y_d) <= std::abs(point3.x - x_l)) {
// cv::rectangle(img5, point1, point2c, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point2 = point2c;
// y_d = point2.y;
// } else {
// cv::rectangle(img5, point1b, point2, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point1 = point1b;
// x_l = point1.x;
// }
// } else {
// if (std::abs(point3.y - y_u) <= std::abs(point3.x - x_l)) {
// cv::rectangle(img5, point1c, point2, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point1 = point1c;
// y_u = point1.y;
// } else {
// cv::rectangle(img5, point1b, point2, cv::Scalar(255, 0, 0), 2);
// cv::imshow(window, img5);
// point1 = point1b;
// x_l = point1.x;
// }
// }
// }
// }
// x_l_orig = x_l;
// x_r_orig = x_r;
// y_u_orig = y_u;
// y_d_orig = y_d;
// }
// }
// }
// }
// int selectRect() {
// cv::VideoCapture videoCapture(videoPath);
// if (!videoCapture.isOpened()) {
// std::cerr << "\033[31m" << "Video unreadable." << std::endl;
// return -1;
// }
// // Get total number of frames
// int totalFrames = videoCapture.get(CAP_PROP_FRAME_COUNT);
// // Get a frame where the tape will surely be in its reading position (20 seconds since the beginning)
// if (500 <= totalFrames)
// // Set frame position
// videoCapture.set(CAP_PROP_POS_FRAMES, 500);
// else {
// std::cout << "Video too short! Exiting..." << std::endl;
// return -1;
// }
// videoCapture >> myFrame;
// cv::namedWindow(window, WINDOW_AUTOSIZE);
// cv::imshow(window, myFrame);
// cv::setMouseCallback(window, mouseHandler, 0);
// std::cout << "\nDraw with the mouse the rectangle that contains the desired processing area.\n";
// if (speed == 15)
// std::cout << "At 15 ips, it should cover \033[1mthe entire\033[0m reading head.\n";
// else
// std::cout << "At 7.5 ips, it should cover \033[1mhalf\033[0m the reading head.\n";
// std::cout << "\033[33mATTENTION: please, start drawing from the top-left corner of the rectangle, and end on the bottom-right corner.\033[0m" << std::endl;
// char c = waitKey(0);
// if (c != 'y') {
// return -1;
// }
// if (callback) {
// if (myFrame.empty()) {
// std::cout << "Empty frame." << std::endl;
// return -1;
// }
// if (rotated) {
// point1.x = x_l;
// point1.y = y_u;
// point2.x = x_r;
// point2.y = y_d;
// for (int i = 0; i < 4; i++)
// cv::line(myFrame, vertices[i], vertices[(i + 1) % 4], Scalar(0, 0, 255), 2);
// } else {
// cv::rectangle(myFrame, point1, point2, cv::Scalar(0, 0, 255), 2);
// }
// cv::imshow(window, myFrame);
// // Writing rectangle coordinates on a file
// ofstream coordFile;
// coordFile.open("../config/coordinate.txt");
// coordFile << x_r << "\n";
// coordFile << x_l << "\n";
// coordFile << y_u << "\n";
// coordFile << y_d << "\n";
// coordFile.close();
// } else {
// std::cout << "Rectangle not found." << std::endl;
// return -1;
// }
// return 0;
// }
// int selectPinchRoller() {
// pinchRollerSelection = true;
// cv::imshow(window, myFrame);
// cv::setMouseCallback(window, mouseHandler, 0);
// std::cout << "Draw the rectangle that contains the pinchRoller area.\n" << std::endl;
// char c = waitKey(0);
// if (c != 'y') {
// return -1;
// }
// if (callback) {
// if (myFrame.empty()) {
// std::cout << "Empty frame." << std::endl;
// return -1;
// }
// cv::rectangle(myFrame, point1, point2, cv::Scalar(0, 255, 0), 2);
// cv::imshow(window, myFrame);
// } else {
// std::cout << "Rectangle not found." << std::endl;
// return -1;
// }
// return 0;
// }
// int defineProcessingArea() {
// if (selectRect() != 0) {
// return -1;
// }
// std::cout << "Proceeding with the second rectangle." << std::endl;
// callback = false;
// if (selectPinchRoller() != 0) {
// return -1;
// }
// std::cout << "\033[36mPress 'y' to confirm everything and proceed with the analysis, \033[35m'm' to modify the ROIs\033[0m or any other key to exit." << std::endl;
// char c_bis = waitKey(0);
// destroyWindow(window);
// if (c_bis == 'y') {
// return 0;
// } else if (c_bis == 'm') {
// cv::namedWindow(window, WINDOW_AUTOSIZE);
// cv::imshow(window, myFrame);
// cv::setMouseCallback(window, mouseHandler, 0);
// return 1;
// } else {
// return -1;
// }
// }
bool frameDifference(cv::Mat prevFrame, cv::Mat currentFrame, int msToEnd) {
// Processing area
......@@ -622,70 +160,6 @@ bool frameDifference(cv::Mat prevFrame, cv::Mat currentFrame, int msToEnd) {
}
}
}
// if (!rotated) {
// for (int i = y_u; i < y_d; i++) {
// for (int j = x_l; j < x_r; j++) {
// totColoreCF = totColoreCF + currentFrame.at<cv::Vec3b>(i, j)[0] + currentFrame.at<cv::Vec3b>(i, j)[1] + currentFrame.at<cv::Vec3b>(i, j)[2];
// areaPixels++;
// if (differenceFrame.at<cv::Vec3b>(i, j)[0] == 0) {
// blackPixels++;
// }
// }
// }
// } else {
// int x_r_max = (y_u - qA)/mA;
// int x_l_max = (y_u - qD)/mD;
// for (int j = y_u; j < y_m1; j++) {
// double mD_pos = -mD;
// x_r_max = (j - qA)/mA;
// x_l_max = (qD - j)/mD_pos;
// for (int i = x_l_max; i < x_r_max; i++) {
// totColoreCF = totColoreCF + currentFrame.at<cv::Vec3b>(i, j)[0] + currentFrame.at<cv::Vec3b>(i, j)[1] + currentFrame.at<cv::Vec3b>(i, j)[2];
// areaPixels++;
// if (differenceFrame.at<cv::Vec3b>(j, i)[0] == 0) {
// blackPixels++;
// }
// }
// }
// // Use B and D straight lines
// for (int j = y_m1; j < y_m2; j++) {
// if (BD) { // If the right point is higher than the left one (eg: y0 y1 y3 y2)
// x_r_max = (j - qB)/mB;
// x_l_max = (j - qD)/mD;
// for (int i = x_l_max; i < x_r_max; i++) {
// totColoreCF = totColoreCF + currentFrame.at<cv::Vec3b>(i, j)[0] + currentFrame.at<cv::Vec3b>(i, j)[1] + currentFrame.at<cv::Vec3b>(i, j)[2];
// areaPixels++;
// if (differenceFrame.at<cv::Vec3b>(j, i)[0] == 0) {
// blackPixels++;
// }
// }
// } else { // AC case for eg: y0 y3 y1 y2
// x_r_max = (j - qA)/mA;
// x_l_max = (j - qC)/mC;
// for (int i = x_l_max; i < x_r_max; i++) {
// totColoreCF = totColoreCF + currentFrame.at<cv::Vec3b>(i, j)[0] + currentFrame.at<cv::Vec3b>(i, j)[1] + currentFrame.at<cv::Vec3b>(i, j)[2];
// areaPixels++;
// if (differenceFrame.at<cv::Vec3b>(j, i)[0] == 0) {
// blackPixels++;
// }
// }
// }
// }
// // Using B and C straight lines
// for (int j = y_m2; j < y_d; j++) {
// x_r_max = (j - qB)/mB;
// x_l_max = (j - qC)/mC;
// for (int i = x_l_max; i < x_r_max; i++) {
// totColoreCF = totColoreCF + currentFrame.at<cv::Vec3b>(i, j)[0] + currentFrame.at<cv::Vec3b>(i, j)[1] + currentFrame.at<cv::Vec3b>(i, j)[2];
// areaPixels++;
// if (differenceFrame.at<cv::Vec3b>(j, i)[0] == 0) {
// blackPixels++;
// }
// }
// }
// }
mediaCurrFrame = totColoreCF/areaPixels;
float tsh = areaPixels * thresholdPercentual / 100;
......@@ -836,13 +310,15 @@ int processing(cv::VideoCapture videoCapture, std::string fileName) {
}
bool findProcessingArea(json configurationFile) {
bool findProcessingAreas(json configurationFile) {
/******************************************* JSON PARAMETERS *******************************************/
// Returned variable
bool found = false;
// Read parameters from JSON
int minDist, angleThresh, scaleThresh, posThresh, minDistTape, angleThreshTape, scaleThreshTape, posThreshTape;
int minDist, angleThresh, scaleThresh, posThresh, minDistTape, angleThreshTape, scaleThreshTape, posThreshTape, minDistCapstan, angleThreshCapstan, scaleThreshCapstan, posThreshCapstan;
try {
minDist = configurationFile["MinDist"];
angleThresh = configurationFile["AngleThresh"];
......@@ -852,14 +328,27 @@ bool findProcessingArea(json configurationFile) {
angleThreshTape = configurationFile["AngleThreshTape"];
scaleThreshTape = configurationFile["ScaleThreshTape"];
posThreshTape = configurationFile["PosThreshTape"];
minDistCapstan = configurationFile["MinDistCapstan"];
angleThreshCapstan = configurationFile["AngleThreshCapstan"];
scaleThreshCapstan = configurationFile["ScaleThreshCapstan"];
posThreshCapstan = configurationFile["PosThreshCapstan"];
} catch (nlohmann::detail::type_error e) {
std::cerr << "\033[1;31mconfig.json error!\033[0;31m\n" << e.what() << std::endl;
return -1;
}
/******************************************* READING HEAD DETECTION *******************************************/
// Obtain grayscale version of myFrame
Mat myFrameGrayscale;
cvtColor(myFrame, myFrameGrayscale, COLOR_BGR2GRAY);
// Rect capstanAreaRect(590, 270, 130, 250);
// // Obtain grayscale version of tapeProcessingArea
// Mat capstan = myFrameGrayscale(capstanAreaRect);
// imshow("myFrame", myFrameGrayscale);
// imshow("tapeProcessingAreaGrayscale", capstan);
// imwrite("/users/nadir/desktop/capstanBERIO058prova.png", capstan);
// waitKey();
// Downsample myFrameGrayscale in half pixels
Mat myFrameGrayscaleHalf;
pyrDown(myFrameGrayscale, myFrameGrayscaleHalf, Size(myFrame.cols/2, myFrame.rows/2));
......@@ -886,8 +375,8 @@ bool findProcessingArea(json configurationFile) {
alg -> setAngleStep(1);
alg -> setAngleThresh(angleThresh);
alg -> setMinScale(0.8);
alg -> setMaxScale(1.2);
alg -> setMinScale(0.9);
alg -> setMaxScale(1.1);
alg -> setScaleStep(0.1);
alg -> setScaleThresh(scaleThresh);
......@@ -907,7 +396,6 @@ bool findProcessingArea(json configurationFile) {
// By inspecting different angles (only between +10 and -10 degrees of maximum inclination) or increasing the position threshold,
// the algorithm should eventually identify only one region.
while (positions.size() != 1) {
std::cout << "Proceeding with positive angles." << endl;
alg -> setMinAngle(0);
alg -> setMaxAngle(10);
alg -> detect(processingImage, positions);
......@@ -915,7 +403,6 @@ bool findProcessingArea(json configurationFile) {
if (positions.size() == 1)
break;
std::cout << "Proceeding with negative angles." << endl;
alg -> setMinAngle(350);
alg -> setMaxAngle(360);
alg -> detect(processingImage, positions);
......@@ -923,7 +410,6 @@ bool findProcessingArea(json configurationFile) {
if (positions.size() == 1)
break;
std::cout << "Increasing position threshold." << endl;
oldPosThresh += 10;
alg -> setPosThresh(oldPosThresh);
}
......@@ -948,6 +434,8 @@ bool findProcessingArea(json configurationFile) {
line(myFrame, pts[1], pts[2], Scalar(0, 0, 255), 2);
line(myFrame, pts[2], pts[3], Scalar(0, 0, 255), 2);
line(myFrame, pts[3], pts[0], Scalar(0, 0, 255), 2);
/******************************************* TAPE AREA DETECTION *******************************************/
// Defining the processing area for identifying the tape under the reading head.
//
......@@ -959,12 +447,10 @@ bool findProcessingArea(json configurationFile) {
int tapeProcessingAreaHeight = max(pts[3].y-pts[1].y, pts[0].y-pts[2].y);
Rect tapeProcessingAreaRect(tapeProcessingAreaX, tapeProcessingAreaY, tapeProcessingAreaWidth, tapeProcessingAreaHeight);
// Obtain the new image
Mat tapeProcessingArea = myFrame(tapeProcessingAreaRect);
// Obtain grayscale version of tapeProcessingArea
Mat tapeProcessingAreaGrayscale = myFrameGrayscale(tapeProcessingAreaRect);
// Read template image - it is smaller than before, therefore there is no need to downsample
templateImage = imread("../input/tapeArea.png", IMREAD_GRAYSCALE);
templateShape = imread("../input/tapeArea.png", IMREAD_GRAYSCALE);
// Reset algorithm and set parameters
alg = createGeneralizedHoughGuil();
......@@ -987,13 +473,13 @@ bool findProcessingArea(json configurationFile) {
alg -> setCannyLowThresh(100);
alg -> setCannyHighThresh(300);
alg -> setTemplate(templateImage);
alg -> setTemplate(templateShape);
oldPosThresh = posThreshTape;
tm.reset();
tm.start();
for (int i = 0; i < 10; i++) {
std::cout << "Proceeding with positive angles." << endl;
alg -> setMinAngle(0);
alg -> setMaxAngle(10);
alg -> detect(tapeProcessingAreaGrayscale, positions2);
......@@ -1003,7 +489,6 @@ bool findProcessingArea(json configurationFile) {
break;
}
std::cout << "Proceeding with negative angles." << endl;
alg -> setMinAngle(350);
alg -> setMaxAngle(360);
alg -> detect(tapeProcessingAreaGrayscale, positions2);
......@@ -1013,7 +498,6 @@ bool findProcessingArea(json configurationFile) {
break;
}
std::cout << "Increasing position value." << endl;
oldPosThresh += 10;
alg -> setPosThresh(oldPosThresh);
}
......@@ -1047,6 +531,158 @@ bool findProcessingArea(json configurationFile) {
line(myFrame, pts[3], pts[0], Scalar(0, 255, 0), 2);
}
/******************************************* CAPSTAN DETECTION *******************************************/
int capstanProcessingAreaRectX = myFrame.cols*3/4;
int capstanProcessingAreaRectY = myFrame.rows/2;
int capstanProcessingAreaRectWidth = myFrame.cols/4;
int capstanProcessingAreaRectHeight = myFrame.rows/2;
Rect capstanProcessingAreaRect(capstanProcessingAreaRectX, capstanProcessingAreaRectY, capstanProcessingAreaRectWidth, capstanProcessingAreaRectHeight);
// Examine myFrameGrayscale only in its left half
Mat capstanProcessingAreaGrayscale = myFrameGrayscale(capstanProcessingAreaRect);
// imshow("capstanProcessingAreaGrayscale", capstanProcessingAreaGrayscale);
// Read template image - it is smaller than before, therefore there is no need to downsample
templateShape = imread("../input/capstanBERIO058prova.png", IMREAD_GRAYSCALE);
// imshow("capstanProcessingAreaGrayscale", capstanProcessingAreaGrayscale);
// imshow("capstan", templateShape);
// Obtain Canny versions of templateImage and capstanProcessingArea
Mat templateImageCanny, capstanProcessingAreaCanny, templateImageCannyCanny;
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 1, 50);
// imshow("1 50", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 20, 100);
// imshow("20 100", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 25, 150);
// imshow("25 150", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 50, 150);
// imshow("50 150", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 50, 250);
// imshow("50 250", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 100, 250);
// imshow("100 250", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 150, 300);
// imshow("50 200", capstanProcessingAreaCanny);
// Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 200, 300);
// imshow("200 300", capstanProcessingAreaCanny);
// Canny(templateShape, templateImageCanny, 50, 250);
// imshow("capstanCanny", templateImageCanny);
// Reset algorithm and set parameters
alg = createGeneralizedHoughGuil();
alg -> setMinDist(minDistCapstan);
alg -> setLevels(360);
alg -> setDp(2);
alg -> setMaxBufferSize(1000);
alg -> setAngleStep(1);
alg -> setAngleThresh(angleThreshCapstan);
alg -> setMinScale(0.9);
alg -> setMaxScale(1.1);
alg -> setScaleStep(0.05);
alg -> setScaleThresh(scaleThreshCapstan);
alg -> setPosThresh(posThreshCapstan);
alg -> setCannyLowThresh(100);
alg -> setCannyHighThresh(250);
alg -> setTemplate(templateShape);
oldPosThresh = posThreshCapstan;
vector<Vec4f> positionsC1pos, positionsC1neg, positionsC2, positionsC3, positionsC4, positionsC5, positionsC6, positionsC7, positionsC8;
Mat votesC1pos, votesC1neg, votesC2, votesC3, votesC4, votesC5, votesC6, votesC7, votesC8;
tm.reset();
tm.start();
cout << "capstanBERIO058 blue" << endl;
detectShape(alg, templateShape, posThreshCapstan, positionsC1pos, votesC1pos, positionsC1neg, votesC1neg, capstanProcessingAreaGrayscale);
drawShapes(myFrame, positionsC1pos, Scalar(255, 0, 0), templateShape, capstanProcessingAreaRectX, capstanProcessingAreaRectY);
drawShapes(myFrame, positionsC1neg, Scalar(255, 255, 0), templateShape, capstanProcessingAreaRectX, capstanProcessingAreaRectY);
// cout << "capstanBERIO060 light blue" << endl;
// templateShape = imread("../input/capstanBERIO060.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC2, votesC2, capstanProcessingAreaGrayscale);
// cout << "capstanBERIO236 yellow" << endl;
// templateShape = imread("../input/capstanBERIO236.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC3, votesC3, capstanProcessingAreaGrayscale);
// cout << "capstanBERIO297 violet" << endl;
// templateShape = imread("../input/capstanBERIO297.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC4, votesC4, capstanProcessingAreaGrayscale);
// cout << "capstanBERIO319 gray" << endl;
// templateShape = imread("../input/capstanBERIO319.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC5, votesC5, capstanProcessingAreaGrayscale);
// cout << "capstanBERIO333 pink" << endl;
// templateShape = imread("../input/capstanBERIO333.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC6, votesC6, capstanProcessingAreaGrayscale);
// cout << "capstanBERIO415 indigo" << endl;
// templateShape = imread("../input/capstanBERIO415.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC7, votesC7, capstanProcessingAreaGrayscale);
// cout << "capstanLNONO044 dark violet" << endl;
// templateShape = imread("../input/capstanLNONO044.png", IMREAD_GRAYSCALE);
// detectShape(alg, templateShape, posThreshCapstan, positionsC8, votesC8, capstanProcessingAreaGrayscale);
tm.stop();
std::cout << "Capstan detection time : " << tm.getTimeMilli() << " ms" << endl;
// Draw in white the one with highest position value
// int positionVote = 0;
// Vec4f finalPosition;
// for (int i = 0; i < positionsC1.size(); i++) {
// if (votesC1.at<int>(3*i) > positionVote)
// finalPosition = positionsC1[i];
// }
// for (int i = 0; i < positionsC2.size(); i++) {
// if (votes2.at<int>(3*i) > positionVote)
// finalPosition = positionsC2[i];
// }
// for (int i = 0; i < positionsC3.size(); i++) {
// if (votes3.at<int>(3*i) > positionVote)
// finalPosition = positionsC3[i];
// }
// for (int i = 0; i < positionsC4.size(); i++) {
// if (votes4.at<int>(3*i) > positionVote)
// finalPosition = positionsC4[i];
// }
// for (int i = 0; i < positionsC5.size(); i++) {
// if (votes5.at<int>(3*i) > positionVote)
// finalPosition = positionsC5[i];
// }
// for (int i = 0; i < positionsC6.size(); i++) {
// if (votes6.at<int>(3*i) > positionVote)
// finalPosition = positionsC6[i];
// }
// for (int i = 0; i < positionsC7.size(); i++) {
// if (votes7.at<int>(3*i) > positionVote)
// finalPosition = positionsC7[i];
// }
// for (int i = 0; i < positionsC8.size(); i++) {
// if (votes8.at<int>(3*i) > positionVote)
// finalPosition = positionsC8[i];
// }
// Point2f posFinal(finalPosition[0]+capstanProcessingAreaRectX, finalPosition[1]+capstanProcessingAreaRectY);
// scale = finalPosition[2];
// angle = finalPosition[3];
// RotatedRect finalRect;
// finalRect.center = posFinal;
// finalRect.size = Size2f(templateShape.cols * scale, templateShape.rows * scale);
// finalRect.angle = angle;
// finalRect.points(pts);
// Update points with tape processing area coordinates
// pts[0] = Point2f(pts[0].x, pts[0].y);
// pts[1] = Point2f(pts[1].xRectX, pts[1].y+capstanProcessingAreaRectY);
// pts[2] = Point2f(pts[2].x+capstanProcessingAreaRectX, pts[2].y+capstanProcessingAreaRectY);
// pts[3] = Point2f(pts[3].x+capstanProcessingAreaRectX, pts[3].y+capstanProcessingAreaRectY);
// // Update capstanRect
// capstanRect = RotatedRect(pts[0], pts[1], pts[2]);
// line(myFrame, pts[0], pts[1], Scalar(255, 255, 255), 2);
// line(myFrame, pts[1], pts[2], Scalar(255, 255, 255), 2);
// line(myFrame, pts[2], pts[3], Scalar(255, 255, 255), 2);
// line(myFrame, pts[3], pts[0], Scalar(255, 255, 255), 2);
imshow("Tape area(s)", myFrame);
waitKey();
......@@ -1110,26 +746,6 @@ int main(int argc, char** argv) {
// Read input JSON
iJSON >> irregularityFileInput;
/**************************************** DEFINE THE PROCESSING AREA ****************************************/
// bool processingAreaDefined = false;
// while (!processingAreaDefined) {
// // Reset variables
// savingPinchRoller = false;
// pinchRollerRect = false;
// processingSelection = true;
// rotated = false;
// int defineProcArea = defineProcessingArea();
// if (defineProcArea == -1) {
// std::cout << "Exit." << std::endl;
// return -1;
// } else if (defineProcArea == 1) {
// std::cout << "Modifying the area again." << std::endl;
// } else {
// processingAreaDefined = true;
// }
// }
/******************************************* TAPE AREA DETECTION *******************************************/
cv::VideoCapture videoCapture(videoPath);
......@@ -1146,7 +762,7 @@ int main(int argc, char** argv) {
videoCapture >> myFrame;
// Find the processing area corresponding to the tape area over the reading head
bool found = findProcessingArea(configurationFile);
bool found = findProcessingAreas(configurationFile);
// Reset frame position
videoCapture.set(CAP_PROP_POS_FRAMES, 0);
......
#include "rapidxml-1.13/rapidxml.hpp"
#include "vector"
#include "string.h"
#include "fstream"
#include <vector>
#include <string.h>
#include <fstream>
namespace fs = std::__fs::filesystem;
using namespace cv;
using namespace rapidxml;
using namespace std;
......@@ -184,6 +184,123 @@ int findFileName(std::string videoPath, std::string &fileName, std::string &exte
COMPUTER VISION FUNCTIONS
------------------------------------------------------------------------------ */
// Function to detect shape in frame
void detectShape(Ptr<GeneralizedHoughGuil> alg, Mat templateShape, int posThresh, vector<Vec4f> &positivePositions, Mat &positiveVotes, vector<Vec4f> &negativePositions, Mat &negativeVotes, Mat processingArea) {
alg -> setPosThresh(posThresh);
alg -> setTemplate(templateShape);
int oldSizePositive = 0;
int i = 0;
int maxVote = 0;
// Process shapes with positive angles
alg -> setMinAngle(0);
alg -> setMaxAngle(5);
while (true) {
alg -> detect(processingArea, positivePositions, positiveVotes);
int currentSize = positivePositions.size();
if (currentSize == 1) {
// We detected the most interesting shape
break;
} else if (currentSize == 0 && oldSizePositive > 0) {
// It is not possible to detect only one shape with the current parameters
alg -> setPosThresh(posThresh+i-1); // Decrease position value
alg -> detect(processingArea, positivePositions, positiveVotes); // Detect all available shapes
break;
} else if (currentSize == 0 && oldSizePositive == 0) {
// Impossible to found with these parameters
cout << "Not found." << endl;
break;
}
oldSizePositive = currentSize;
// Find maximum vote
for (int j = 0; j < positiveVotes.cols / 3; j++) {
if (positiveVotes.at<int>(3*j) > maxVote)
maxVote = positiveVotes.at<int>(3*j);
}
if (currentSize > 10) {
i += 5; // To speed up computation when there are too many matches
} else if (maxVote - (posThresh + i) > 100) {
i += 100; // To speed up computation when there are few super high matches
} else {
i++;
}
alg -> setPosThresh(posThresh+i);
cout << "\rPositive CurrentSize: " << currentSize << flush;
}
cout << endl;
int oldSizeNegative = 0;
// Reset incremental position value
i = 0;
maxVote = 0;
// Process shapes with negative angles
alg -> setMinAngle(355);
alg -> setMaxAngle(360);
while (true) {
alg -> detect(processingArea, negativePositions, negativeVotes);
int currentSize = negativePositions.size();
if (currentSize == 1) {
// We detected the most interesting shape
break;
} else if (currentSize == 0 && oldSizeNegative > 0) {
// It is not possible to detect only one shape with the current parameters
alg -> setPosThresh(posThresh+i-1); // Decrease position value
alg -> detect(processingArea, negativePositions, negativeVotes); // Detect all available shapes
break;
} else if (currentSize == 0 && oldSizeNegative == 0) {
// Impossible to found with these parameters
cout << "Not found." << endl;
break;
}
oldSizeNegative = currentSize;
// Find maximum vote
for (int j = 0; j < positiveVotes.cols / 3; j++) {
if (positiveVotes.at<int>(3*j) > maxVote)
maxVote = positiveVotes.at<int>(3*j);
}
if (currentSize > 10) {
i += 5; // To speed up computation when there are too many matches
} else if (maxVote - (posThresh + i) > 100) {
i += 100; // To speed up computation when there are few super high matches
} else {
i++;
}
alg -> setPosThresh(posThresh+i);
cout << "\rNegative CurrentSize: " << currentSize << flush;
}
cout << endl;
}
// Function to draw detected shapes in a frame
void drawShapes(Mat frame, vector<Vec4f> &positions, Scalar color, Mat templateShape, int offsetX, int offsetY) {
RotatedRect rr;
Point2f rrpts[4];
for (int i = 0; i < positions.size(); i++) {
Point2f pos(positions[i][0]+offsetX+11, positions[i][1]+offsetY+46);
float scale = positions[i][2];
float angle = positions[i][3];
rr.center = pos;
rr.size = Size2f((templateShape.cols-22) * scale, (templateShape.rows-92) * scale);
rr.angle = angle;
rr.points(rrpts);
line(frame, rrpts[0], rrpts[1], color, 2);
line(frame, rrpts[1], rrpts[2], color, 2);
line(frame, rrpts[2], rrpts[3], color, 2);
line(frame, rrpts[3], rrpts[0], color, 2);
}
}
// Function to separate even and odd frame half planes
void separateFrame(cv::Mat frame, cv::Mat &frame_dispari, cv::Mat &frame_pari) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment