/* Questo script esegue l'analisi di un video fornito per rilevare le discontinuità che vengono trovate. Tutte le informazioni necessarie all'agoritmo si possono individuare nei file XML all'interno della cartella config. @author Nadir Dalla Pozza @version 3.0 @date 29-06-2022 */ #include #include #include #include #include #include #include #include #include #include #include #include #include // uuid class #include // generators #include // streaming operators etc. #include #include #include #include #include #include #include "rapidxml-1.13/rapidxml.hpp" #include "utility.h" #include "forAudioAnalyser.h" namespace fs = std::__fs::filesystem; using namespace cv; using namespace rapidxml; using namespace std; using json = nlohmann::json; /* ------------------------------------------------------------------------------ VARIABLES ------------------------------------------------------------------------------ */ bool savingPinchRoller = false, pinchRollerRect = false; bool savingBrand = false; cv::Mat myFrame; float mediaPrevFrame = 0; bool firstBrand = true; // The first frame containing brands on tape must be saved float firstBrandInstant = 0; // config.json parameters bool brands; std::string irregularityFileInputPath; std::string outputPath; std::string videoPath; float speed; float thresholdPercentual; float thresholdPercentualPinchRoller; // JSON files json configurationFile; json irregularityFileInput; json irregularityFileOutput1; json irregularityFileOutput2; // RotatedRect identifying the processing area RotatedRect rect; RotatedRect capstanRect; bool frameDifference(cv::Mat prevFrame, cv::Mat currentFrame, int msToEnd) { // Processing area int areaPixels = rect.size.width * rect.size.height; float differentPixelsThreshold = areaPixels * thresholdPercentual / 100; // // PinchRoller area // int pixelsNumberPinchRoller = (xc_r - xc_l) * (yc_d - yc_u); // float differentPixelsThresholdPinchRoller = (thresholdPercentualPinchRoller * pixelsNumberPinchRoller)/100; /***************** Extract matrices corresponding to the processing area *********************/ // CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/ // matrices we'll use Mat M, rotatedPrevFrame, croppedPrevFrame, rotatedCurrentFrame, croppedCurrentFrame; // get angle and size from the bounding box float angle = rect.angle; Size rect_size = rect.size; // thanks to http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/ if (rect.angle < -45.) { angle += 90.0; swap(rect_size.width, rect_size.height); } // get the rotation matrix M = getRotationMatrix2D(rect.center, angle, 1.0); // perform the affine transformation warpAffine(prevFrame, rotatedPrevFrame, M, prevFrame.size(), INTER_CUBIC); warpAffine(currentFrame, rotatedCurrentFrame, M, currentFrame.size(), INTER_CUBIC); // crop the resulting image getRectSubPix(rotatedPrevFrame, rect_size, rect.center, croppedPrevFrame); getRectSubPix(rotatedCurrentFrame, rect_size, rect.center, croppedCurrentFrame); // imshow("Current frame", currentFrame); // imshow("Cropped Current Frame", croppedCurrentFrame); // waitKey(); // END CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/ cv::Mat differenceFrame = difference(croppedPrevFrame, croppedCurrentFrame); int blackPixelsPinchRoller = 0; int decEnd = (msToEnd % 1000) / 100; int secEnd = (msToEnd - (msToEnd % 1000)) / 1000; int minEnd = secEnd / 60; secEnd = secEnd % 60; /******************************* PinchRoller analysis ****************************************/ // // In the last minute of the video, check for pinchRoller position for endTape event // if ((msToEnd < 60000) && pinchRollerRect) { // for (int i = yc_u; i < yc_d; i++) { // for (int j = xc_l; j < xc_r; j++) { // if (differenceFrame.at(i, j)[0] == 0) { // // There is a black pixel, then there is a difference between previous and current frames // blackPixelsPinchRoller++; // } // } // } // if (blackPixelsPinchRoller > differentPixelsThresholdPinchRoller) { // savingPinchRoller = true; // return true; // } else { // savingPinchRoller = false; // } // } /****************************** Segment analysis ****************************************/ int blackPixels = 0; float mediaCurrFrame; int totColoreCF = 0; for (int i = 0; i < croppedCurrentFrame.rows; i++) { for (int j = 0; j < croppedCurrentFrame.cols; j++) { totColoreCF += croppedCurrentFrame.at(i, j)[0] + croppedCurrentFrame.at(i, j)[1] + croppedCurrentFrame.at(i, j)[2]; if (differenceFrame.at(i, j)[0] == 0) { blackPixels++; } } } mediaCurrFrame = totColoreCF/areaPixels; float tsh = areaPixels * thresholdPercentual / 100; if (blackPixels > tsh) { if (brands) { if (mediaPrevFrame > (mediaCurrFrame + 10) || mediaPrevFrame < (mediaCurrFrame - 10)) { // They are not similar for color average // Update mediaPrevFrame mediaPrevFrame = mediaCurrFrame; firstBrandInstant = msToEnd; return true; } // If the above condition is not verified, update anyway mediaPrevFrame mediaPrevFrame = mediaCurrFrame; // At the beginning of the video, wait at least 1 second before the next Irregularity to consider it as a brand. // It is not guaranteed that it will be the first brand, but it is generally a safe approach to have a correct image if (firstBrand && (firstBrandInstant - msToEnd > 1000)) { firstBrand = false; savingBrand = true; return true; } } else { return true; } } return false; } int processing(cv::VideoCapture videoCapture, std::string fileName) { // Video duration int frameNumbers_v = videoCapture.get(CAP_PROP_FRAME_COUNT); float fps_v = videoCapture.get(CAP_PROP_FPS); // FPS can be non-integers!!! float videoLength = (float) frameNumbers_v / fps_v; // [s] int videoLength_ms = videoLength * 1000; int savedFrames = 0, unsavedFrames = 0; float lastSaved = -160; int savingRate = 0; // [ms] // Whenever we find an Irregularity, we want to skip a lenght equal to the reading head (3 cm = 1.18 inches) if (speed == 7.5) savingRate = 157; // Time taken to cross 3 cm at 7.5 ips else if (speed == 15) savingRate = 79; // Time taken to cross 3 cm at 15 ips // The first frame of the video won't be processed cv::Mat prevFrame; videoCapture >> prevFrame; firstBrandInstant = videoLength_ms - videoCapture.get(CAP_PROP_POS_MSEC); while (videoCapture.isOpened()) { cv::Mat frame; videoCapture >> frame; if (!frame.empty()) { int ms = videoCapture.get(CAP_PROP_POS_MSEC); int msToEnd = videoLength_ms - ms; if (ms == 0) // With OpenCV library, this happens at the last few frames of the video before realising that "frame" is empty. break; int secToEnd = msToEnd / 1000; int minToEnd = (secToEnd / 60) % 60; secToEnd = secToEnd % 60; std::string secStrToEnd = std::to_string(secToEnd), minStrToEnd = std::to_string(minToEnd); if (minToEnd < 10) minStrToEnd = "0" + minStrToEnd; if (secToEnd < 10) secStrToEnd = "0" + secStrToEnd; std::cout << "\rIrregularities: " << savedFrames << ". "; std::cout << "Remaining video time [mm:ss]: " << minStrToEnd << ":" << secStrToEnd << std::flush; if ((ms - lastSaved > savingRate) && frameDifference(prevFrame, frame, msToEnd)) { // An Irregularity is found! // De-interlacing frame cv::Mat oddFrame(frame.rows/2, frame.cols, CV_8UC3); cv::Mat evenFrame(frame.rows/2, frame.cols, CV_8UC3); separateFrame(frame, oddFrame, evenFrame); // Finding an image containing the whole tape Point2f pts[4]; rect.points(pts); cv::Mat subImageNastro(frame, cv::Rect(100, min(pts[1].y, pts[2].y), frame.cols - 100, static_cast(rect.size.height))); // De-interlacing the image with the whole tape cv::Mat oddSubImage(subImageNastro.rows/2, subImageNastro.cols, CV_8UC3); int evenSubImageRows = subImageNastro.rows/2; if (subImageNastro.rows % 2 != 0) // If the found rectangle is of odd height, we must increase evenSubImage height by 1, otherwise we have segmentation_fault!!! evenSubImageRows += 1; cv::Mat evenSubImage(evenSubImageRows, subImageNastro.cols, CV_8UC3); separateFrame(subImageNastro, oddSubImage, evenSubImage); std::string timeLabel = getTimeLabel(ms); std::string safeTimeLabel = getSafeTimeLabel(ms); saveIrregularityImage(safeTimeLabel, fileName, oddFrame, oddSubImage); // Append Irregularity information to JSON boost::uuids::uuid uuid = boost::uuids::random_generator()(); irregularityFileOutput1["Irregularities"] += {{ "IrregularityID", boost::lexical_cast(uuid) }, { "Source", "v" }, { "TimeLabel", timeLabel } }; irregularityFileOutput2["Irregularities"] += {{ "IrregularityID", boost::lexical_cast(uuid) }, { "Source", "v" }, { "TimeLabel", timeLabel }, { "ImageURI", pathTape224 + "/"+ fileName + "_" + safeTimeLabel + ".jpg" } }; lastSaved = ms; savedFrames++; } else { unsavedFrames++; } prevFrame = frame; } else { std::cout << "\nEmpty frame!" << std::endl; videoCapture.release(); break; } } ofstream myFile; myFile.open("log.txt", ios::app); myFile << "Saved frames are: " << savedFrames << std::endl; myFile.close(); return 0; } bool findProcessingAreas(json configurationFile) { /******************************************* JSON PARAMETERS *******************************************/ // Returned variable bool found = false; // Read parameters from JSON int minDist, angleThresh, scaleThresh, posThresh, minDistTape, angleThreshTape, scaleThreshTape, posThreshTape, minDistCapstan, angleThreshCapstan, scaleThreshCapstan, posThreshCapstan; try { minDist = configurationFile["MinDist"]; angleThresh = configurationFile["AngleThresh"]; scaleThresh = configurationFile["ScaleThresh"]; posThresh = configurationFile["PosThresh"]; minDistTape = configurationFile["MinDistTape"]; angleThreshTape = configurationFile["AngleThreshTape"]; scaleThreshTape = configurationFile["ScaleThreshTape"]; posThreshTape = configurationFile["PosThreshTape"]; minDistCapstan = configurationFile["MinDistCapstan"]; angleThreshCapstan = configurationFile["AngleThreshCapstan"]; scaleThreshCapstan = configurationFile["ScaleThreshCapstan"]; posThreshCapstan = configurationFile["PosThreshCapstan"]; } catch (nlohmann::detail::type_error e) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\n" << e.what() << std::endl; return -1; } /******************************************* READING HEAD DETECTION *******************************************/ // Obtain grayscale version of myFrame Mat myFrameGrayscale; cvtColor(myFrame, myFrameGrayscale, COLOR_BGR2GRAY); // Rect capstanAreaRect(590, 270, 130, 250); // // Obtain grayscale version of tapeProcessingArea // Mat capstan = myFrameGrayscale(capstanAreaRect); // imshow("myFrame", myFrameGrayscale); // imshow("tapeProcessingAreaGrayscale", capstan); // imwrite("/users/nadir/desktop/capstanBERIO058prova.png", capstan); // waitKey(); // Downsample myFrameGrayscale in half pixels Mat myFrameGrayscaleHalf; pyrDown(myFrameGrayscale, myFrameGrayscaleHalf, Size(myFrame.cols/2, myFrame.rows/2)); // Get input shape in grayscale Mat templateImage = imread("../input/readingHead.png", IMREAD_GRAYSCALE); // Downsample tapeShape in half pixels Mat templateImageHalf; pyrDown(templateImage, templateImageHalf, Size(templateImage.cols/2, templateImage.rows/2)); // Select the image to process Mat processingImage = myFrameGrayscaleHalf; // Select the template to be detected Mat templateShape = templateImageHalf; // Algorithm and parameters Ptr alg = createGeneralizedHoughGuil(); alg -> setMinDist(minDist); alg -> setLevels(360); alg -> setDp(2); alg -> setMaxBufferSize(1000); alg -> setAngleStep(1); alg -> setAngleThresh(angleThresh); alg -> setMinScale(0.9); alg -> setMaxScale(1.1); alg -> setScaleStep(0.1); alg -> setScaleThresh(scaleThresh); alg -> setPosThresh(posThresh); alg -> setCannyLowThresh(100); alg -> setCannyHighThresh(300); alg -> setTemplate(templateShape); vector positions, positions2; TickMeter tm; int oldPosThresh = posThresh; tm.start(); // Parameters are quite slack, therefore more than one match should be expected. // By inspecting different angles (only between +10 and -10 degrees of maximum inclination) or increasing the position threshold, // the algorithm should eventually identify only one region. while (positions.size() != 1) { alg -> setMinAngle(0); alg -> setMaxAngle(10); alg -> detect(processingImage, positions); if (positions.size() == 1) break; alg -> setMinAngle(350); alg -> setMaxAngle(360); alg -> detect(processingImage, positions); if (positions.size() == 1) break; oldPosThresh += 10; alg -> setPosThresh(oldPosThresh); } tm.stop(); std::cout << "Reading head detection time : " << tm.getTimeMilli() << " ms" << endl; Point2f pos(positions[0][0], positions[0][1]); float scale = positions[0][2]; float angle = positions[0][3]; rect.center = pos * 2; // * 2 since the processed image is half the original one rect.size = Size2f(templateShape.cols * scale * 2, templateShape.rows * scale * 2); // * 2 for the same reason rect.angle = angle; Point2f pts[4]; rect.points(pts); // Red for the reading head line(myFrame, pts[0], pts[1], Scalar(0, 0, 255), 2); line(myFrame, pts[1], pts[2], Scalar(0, 0, 255), 2); line(myFrame, pts[2], pts[3], Scalar(0, 0, 255), 2); line(myFrame, pts[3], pts[0], Scalar(0, 0, 255), 2); /******************************************* TAPE AREA DETECTION *******************************************/ // Defining the processing area for identifying the tape under the reading head. // // Parameters for extracting a rectangle containing the found rectangle completely (also if it is slightly rotated) // and with twice its height (since the tape is immediatley below the found rectangle). int tapeProcessingAreaX = min(pts[0].x, pts[1].x); int tapeProcessingAreaY = min(pts[1].y, pts[2].y) + (max(pts[0].y, pts[3].y) - min(pts[1].y, pts[2].y)) * 2/3; // Shift down the area int tapeProcessingAreaWidth = max(pts[3].x-pts[1].x, pts[2].x-pts[0].x); int tapeProcessingAreaHeight = max(pts[3].y-pts[1].y, pts[0].y-pts[2].y); Rect tapeProcessingAreaRect(tapeProcessingAreaX, tapeProcessingAreaY, tapeProcessingAreaWidth, tapeProcessingAreaHeight); // Obtain grayscale version of tapeProcessingArea Mat tapeProcessingAreaGrayscale = myFrameGrayscale(tapeProcessingAreaRect); // Read template image - it is smaller than before, therefore there is no need to downsample templateShape = imread("../input/tapeArea.png", IMREAD_GRAYSCALE); // Reset algorithm and set parameters alg = createGeneralizedHoughGuil(); alg -> setMinDist(minDistTape); alg -> setLevels(360); alg -> setDp(2); alg -> setMaxBufferSize(1000); alg -> setAngleStep(1); alg -> setAngleThresh(angleThreshTape); alg -> setMinScale(0.9); alg -> setMaxScale(1.1); alg -> setScaleStep(0.05); alg -> setScaleThresh(scaleThreshTape); alg -> setPosThresh(posThreshTape); alg -> setCannyLowThresh(100); alg -> setCannyHighThresh(300); alg -> setTemplate(templateShape); oldPosThresh = posThreshTape; tm.reset(); tm.start(); for (int i = 0; i < 10; i++) { alg -> setMinAngle(0); alg -> setMaxAngle(10); alg -> detect(tapeProcessingAreaGrayscale, positions2); if (positions2.size() == 1) { found = true; break; } alg -> setMinAngle(350); alg -> setMaxAngle(360); alg -> detect(tapeProcessingAreaGrayscale, positions2); if (positions2.size() == 1) { found = true; break; } oldPosThresh += 10; alg -> setPosThresh(oldPosThresh); } tm.stop(); std::cout << "Tape detection time : " << tm.getTimeMilli() << " ms" << endl; for (int i = 0; i < positions2.size(); i++) { Point2f pos2(positions2[i][0], positions2[i][1]); scale = positions2[i][2]; angle = positions2[i][3]; rect.center = pos2; rect.size = Size2f(templateShape.cols * scale, templateShape.rows * scale); rect.angle = angle; rect.points(pts); // Update points with tape processing area coordinates pts[0] = Point2f(pts[0].x+tapeProcessingAreaX, pts[0].y+tapeProcessingAreaY); pts[1] = Point2f(pts[1].x+tapeProcessingAreaX, pts[1].y+tapeProcessingAreaY); pts[2] = Point2f(pts[2].x+tapeProcessingAreaX, pts[2].y+tapeProcessingAreaY); pts[3] = Point2f(pts[3].x+tapeProcessingAreaX, pts[3].y+tapeProcessingAreaY); // Update rect rect = RotatedRect(pts[0], pts[1], pts[2]); line(myFrame, pts[0], pts[1], Scalar(0, 255, 0), 2); line(myFrame, pts[1], pts[2], Scalar(0, 255, 0), 2); line(myFrame, pts[2], pts[3], Scalar(0, 255, 0), 2); line(myFrame, pts[3], pts[0], Scalar(0, 255, 0), 2); } /******************************************* CAPSTAN DETECTION *******************************************/ int capstanProcessingAreaRectX = myFrame.cols*3/4; int capstanProcessingAreaRectY = myFrame.rows/2; int capstanProcessingAreaRectWidth = myFrame.cols/4; int capstanProcessingAreaRectHeight = myFrame.rows/2; Rect capstanProcessingAreaRect(capstanProcessingAreaRectX, capstanProcessingAreaRectY, capstanProcessingAreaRectWidth, capstanProcessingAreaRectHeight); // Examine myFrameGrayscale only in its left half Mat capstanProcessingAreaGrayscale = myFrameGrayscale(capstanProcessingAreaRect); // imshow("capstanProcessingAreaGrayscale", capstanProcessingAreaGrayscale); // Read template image - it is smaller than before, therefore there is no need to downsample templateShape = imread("../input/capstanBERIO058prova.png", IMREAD_GRAYSCALE); // imshow("capstanProcessingAreaGrayscale", capstanProcessingAreaGrayscale); // imshow("capstan", templateShape); // Obtain Canny versions of templateImage and capstanProcessingArea Mat templateImageCanny, capstanProcessingAreaCanny, templateImageCannyCanny; // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 1, 50); // imshow("1 50", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 20, 100); // imshow("20 100", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 25, 150); // imshow("25 150", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 50, 150); // imshow("50 150", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 50, 250); // imshow("50 250", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 100, 250); // imshow("100 250", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 150, 300); // imshow("50 200", capstanProcessingAreaCanny); // Canny(capstanProcessingAreaGrayscale, capstanProcessingAreaCanny, 200, 300); // imshow("200 300", capstanProcessingAreaCanny); // Canny(templateShape, templateImageCanny, 50, 250); // imshow("capstanCanny", templateImageCanny); // Reset algorithm and set parameters alg = createGeneralizedHoughGuil(); alg -> setMinDist(minDistCapstan); alg -> setLevels(360); alg -> setDp(2); alg -> setMaxBufferSize(1000); alg -> setAngleStep(1); alg -> setAngleThresh(angleThreshCapstan); alg -> setMinScale(0.9); alg -> setMaxScale(1.1); alg -> setScaleStep(0.05); alg -> setScaleThresh(scaleThreshCapstan); alg -> setPosThresh(posThreshCapstan); alg -> setCannyLowThresh(100); alg -> setCannyHighThresh(250); alg -> setTemplate(templateShape); oldPosThresh = posThreshCapstan; vector positionsC1pos, positionsC1neg, positionsC2, positionsC3, positionsC4, positionsC5, positionsC6, positionsC7, positionsC8; Mat votesC1pos, votesC1neg, votesC2, votesC3, votesC4, votesC5, votesC6, votesC7, votesC8; tm.reset(); tm.start(); cout << "capstanBERIO058 blue" << endl; detectShape(alg, templateShape, posThreshCapstan, positionsC1pos, votesC1pos, positionsC1neg, votesC1neg, capstanProcessingAreaGrayscale); drawShapes(myFrame, positionsC1pos, Scalar(255, 0, 0), templateShape, capstanProcessingAreaRectX, capstanProcessingAreaRectY); drawShapes(myFrame, positionsC1neg, Scalar(255, 255, 0), templateShape, capstanProcessingAreaRectX, capstanProcessingAreaRectY); // cout << "capstanBERIO060 light blue" << endl; // templateShape = imread("../input/capstanBERIO060.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC2, votesC2, capstanProcessingAreaGrayscale); // cout << "capstanBERIO236 yellow" << endl; // templateShape = imread("../input/capstanBERIO236.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC3, votesC3, capstanProcessingAreaGrayscale); // cout << "capstanBERIO297 violet" << endl; // templateShape = imread("../input/capstanBERIO297.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC4, votesC4, capstanProcessingAreaGrayscale); // cout << "capstanBERIO319 gray" << endl; // templateShape = imread("../input/capstanBERIO319.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC5, votesC5, capstanProcessingAreaGrayscale); // cout << "capstanBERIO333 pink" << endl; // templateShape = imread("../input/capstanBERIO333.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC6, votesC6, capstanProcessingAreaGrayscale); // cout << "capstanBERIO415 indigo" << endl; // templateShape = imread("../input/capstanBERIO415.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC7, votesC7, capstanProcessingAreaGrayscale); // cout << "capstanLNONO044 dark violet" << endl; // templateShape = imread("../input/capstanLNONO044.png", IMREAD_GRAYSCALE); // detectShape(alg, templateShape, posThreshCapstan, positionsC8, votesC8, capstanProcessingAreaGrayscale); tm.stop(); std::cout << "Capstan detection time : " << tm.getTimeMilli() << " ms" << endl; // Draw in white the one with highest position value // int positionVote = 0; // Vec4f finalPosition; // for (int i = 0; i < positionsC1.size(); i++) { // if (votesC1.at(3*i) > positionVote) // finalPosition = positionsC1[i]; // } // for (int i = 0; i < positionsC2.size(); i++) { // if (votes2.at(3*i) > positionVote) // finalPosition = positionsC2[i]; // } // for (int i = 0; i < positionsC3.size(); i++) { // if (votes3.at(3*i) > positionVote) // finalPosition = positionsC3[i]; // } // for (int i = 0; i < positionsC4.size(); i++) { // if (votes4.at(3*i) > positionVote) // finalPosition = positionsC4[i]; // } // for (int i = 0; i < positionsC5.size(); i++) { // if (votes5.at(3*i) > positionVote) // finalPosition = positionsC5[i]; // } // for (int i = 0; i < positionsC6.size(); i++) { // if (votes6.at(3*i) > positionVote) // finalPosition = positionsC6[i]; // } // for (int i = 0; i < positionsC7.size(); i++) { // if (votes7.at(3*i) > positionVote) // finalPosition = positionsC7[i]; // } // for (int i = 0; i < positionsC8.size(); i++) { // if (votes8.at(3*i) > positionVote) // finalPosition = positionsC8[i]; // } // Point2f posFinal(finalPosition[0]+capstanProcessingAreaRectX, finalPosition[1]+capstanProcessingAreaRectY); // scale = finalPosition[2]; // angle = finalPosition[3]; // RotatedRect finalRect; // finalRect.center = posFinal; // finalRect.size = Size2f(templateShape.cols * scale, templateShape.rows * scale); // finalRect.angle = angle; // finalRect.points(pts); // Update points with tape processing area coordinates // pts[0] = Point2f(pts[0].x, pts[0].y); // pts[1] = Point2f(pts[1].xRectX, pts[1].y+capstanProcessingAreaRectY); // pts[2] = Point2f(pts[2].x+capstanProcessingAreaRectX, pts[2].y+capstanProcessingAreaRectY); // pts[3] = Point2f(pts[3].x+capstanProcessingAreaRectX, pts[3].y+capstanProcessingAreaRectY); // // Update capstanRect // capstanRect = RotatedRect(pts[0], pts[1], pts[2]); // line(myFrame, pts[0], pts[1], Scalar(255, 255, 255), 2); // line(myFrame, pts[1], pts[2], Scalar(255, 255, 255), 2); // line(myFrame, pts[2], pts[3], Scalar(255, 255, 255), 2); // line(myFrame, pts[3], pts[0], Scalar(255, 255, 255), 2); imshow("Tape area(s)", myFrame); waitKey(); return found; } int main(int argc, char** argv) { /**************************************** CONFIGURATION FILE ****************************************/ // Read configuration file std::ifstream iConfig("../config/config.json"); iConfig >> configurationFile; // Initialise parameters try { brands = configurationFile["Brands"]; irregularityFileInputPath = configurationFile["IrregularityFileInput"]; outputPath = configurationFile["OutputPath"]; videoPath = configurationFile["PreservationAudioVisualFile"]; speed = configurationFile["Speed"]; thresholdPercentual = configurationFile["ThresholdPercentual"]; thresholdPercentualPinchRoller = configurationFile["ThresholdPercentualPinchRoller"]; } catch (nlohmann::detail::type_error e) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\n" << e.what() << std::endl; return -1; } // Input JSON check std::ifstream iJSON(irregularityFileInputPath); if (iJSON.fail()) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nIrregularityFileInput.json cannot be found or opened." << std::endl; return -1; } std::string fileName, extension; if (findFileName(videoPath, fileName, extension) == -1) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThe PreservationAudioVisualFile cannot be found or opened." << std::endl; return -1; } if (speed != 7.5 && speed != 15) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nSpeed parameter must be 7.5 or 15 ips." << std::endl; return -1; } if (thresholdPercentual < 0 || thresholdPercentual > 100) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThresholdPercentual parameter must be a percentage value." << std::endl; return -1; } if (thresholdPercentualPinchRoller < 0 || thresholdPercentualPinchRoller > 100) { std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThresholdPercentual parameter must be a percentage value." << std::endl; return -1; } if (speed == 15) thresholdPercentual += 4; std::cout << "\nParameters from config.json file:" << std::endl; std::cout << " Brands: " << brands << std::endl; std::cout << " Speed: " << speed << std::endl; std::cout << " ThresholdPercentual: " << thresholdPercentual << std::endl; std::cout << " ThresholdPercentualPinchRoller: " << thresholdPercentualPinchRoller << std::endl; // Read input JSON iJSON >> irregularityFileInput; /******************************************* TAPE AREA DETECTION *******************************************/ cv::VideoCapture videoCapture(videoPath); if (!videoCapture.isOpened()) { std::cerr << "\033[31m" << "Video unreadable." << std::endl; return -1; } // Get total number of frames int totalFrames = videoCapture.get(CAP_PROP_FRAME_COUNT); // Set frame position to half video length videoCapture.set(CAP_PROP_POS_FRAMES, totalFrames/2); // Get frame and show it videoCapture >> myFrame; // Find the processing area corresponding to the tape area over the reading head bool found = findProcessingAreas(configurationFile); // Reset frame position videoCapture.set(CAP_PROP_POS_FRAMES, 0); /**************************** WRITE USEFUL INFORMATION TO LOG FILE ***************************/ // Get now time std::time_t t = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); std::string ts = std::ctime(&t); ofstream myFile; myFile.open("log.txt", ios::app); myFile << endl << fileName << endl; myFile << "tsh: " << thresholdPercentual << " tshp: " << thresholdPercentualPinchRoller << std::endl; myFile << ts; // No endline character for avoiding middle blank line. if (found) { cout << "Processing area found!" << endl; myFile << "Processing area found!" << endl; myFile.close(); } else { cout << "Processing area not found. Try changing JSON parameters." << endl; myFile << "Processing area not found." << endl; myFile.close(); return 1; // Program terminated early } /********************************* MAKE REQUIRED DIRECTORIES *********************************/ makeDirectories(fileName, outputPath, brands); /**************************************** PROCESSING *****************************************/ std::cout << "\n\033[32mStarting processing...\033[0m\n" << std::endl; // Processing timer time_t startTimer, endTimer; startTimer = time(NULL); processing(videoCapture, fileName); endTimer = time(NULL); float min = (endTimer - startTimer) / 60; float sec = (endTimer - startTimer) % 60; std::string result("Processing elapsed time: " + std::to_string((int)min) + ":" + std::to_string((int)sec)); cout << endl << result << endl; myFile.open("log.txt", ios::app); myFile << result << std::endl << std::endl; myFile.close(); /**************************************** IRREGULARITY FILES ****************************************/ std::ofstream outputFile1; std::string outputFile1Name = outputPath + "IrregularityFileOutput1.json"; outputFile1.open(outputFile1Name); outputFile1 << irregularityFileOutput1 << std::endl; // Irregularities to extract for the AudioAnalyser and to the TapeIrregularityClassifier extractIrregularityImagesForAudio(outputPath, videoPath, irregularityFileInput, irregularityFileOutput2); std::ofstream outputFile2; std::string outputFile2Name = outputPath + "IrregularityFileOutput2.json"; outputFile2.open(outputFile2Name); outputFile2 << irregularityFileOutput2 << std::endl; return 0; }