script.cpp 25.7 KB
Newer Older
1
2
3
4
5
6
7
/*
    Questo script esegue l'analisi di un video fornito per rilevare le discontinuità
    che vengono trovate.

    Tutte le informazioni necessarie all'agoritmo si possono individuare nei file XML
    all'interno della cartella config.

Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
8
9
10
    @author Nadir Dalla Pozza
    @version 3.0
    @date 29-06-2022
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
*/
#include <filesystem>
#include <iostream>
#include <sys/timeb.h>

#include <boost/uuid/uuid.hpp>            // uuid class
#include <boost/uuid/uuid_generators.hpp> // generators
#include <boost/uuid/uuid_io.hpp>         // streaming operators etc.
#include <boost/lexical_cast.hpp>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>

#include <nlohmann/json.hpp>

#include "utility.h"
#include "forAudioAnalyser.h"

namespace fs = std::__fs::filesystem;
using namespace cv;
using namespace std;
using json = nlohmann::json;



/*
------------------------------------------------------------------------------
VARIABLES
------------------------------------------------------------------------------
*/

Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
44
bool savingPinchRoller = false, pinchRollerRect = false;
45
46
47
48
49
50
51
52
53
54
55
56
bool savingBrand = false;
cv::Mat myFrame;
float mediaPrevFrame = 0;
bool firstBrand = true;	// The first frame containing brands on tape must be saved
float firstBrandInstant = 0;

// config.json parameters
bool brands;
std::string irregularityFileInputPath;
std::string outputPath;
std::string videoPath;
float speed;
57
58
float tapeThresholdPercentual;
float capstanThresholdPercentual;
59
60
61
62
63
64
// JSON files
json configurationFile;
json irregularityFileInput;
json irregularityFileOutput1;
json irregularityFileOutput2;
// RotatedRect identifying the processing area
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
65
RotatedRect rect, rectTape, rectCapstan;
66

67
68
bool debug = true;

69
70
71
72


bool frameDifference(cv::Mat prevFrame, cv::Mat currentFrame, int msToEnd) {

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
	/********************************** Capstan analysis *****************************************/

	// In the last minute of the video, check for pinchRoller position for endTape event
	if (msToEnd < 60000) {

		// Capstan area
		int capstanAreaPixels = rectCapstan.size.width * rectCapstan.size.height;
		float capstanDifferentPixelsThreshold = capstanAreaPixels * capstanThresholdPercentual / 100;
		
		// CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/

		// matrices we'll use
		Mat M, rotatedPrevFrame, croppedPrevFrame, rotatedCurrentFrame, croppedCurrentFrame;
		// get angle and size from the bounding box
		float angle = rectCapstan.angle;
		Size rect_size = rectCapstan.size;
		// thanks to http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
		if (rectCapstan.angle < -45.) {
			angle += 90.0;
			swap(rect_size.width, rect_size.height);
		}
		// get the rotation matrix
		M = getRotationMatrix2D(rectCapstan.center, angle, 1.0);
		// perform the affine transformation
		warpAffine(prevFrame, rotatedPrevFrame, M, prevFrame.size(), INTER_CUBIC);
		warpAffine(currentFrame, rotatedCurrentFrame, M, currentFrame.size(), INTER_CUBIC);
		// crop the resulting image
		getRectSubPix(rotatedPrevFrame, rect_size, rectCapstan.center, croppedPrevFrame);
		getRectSubPix(rotatedCurrentFrame, rect_size, rectCapstan.center, croppedCurrentFrame);

		// imshow("Current frame", currentFrame);
		// imshow("Cropped Current Frame", croppedCurrentFrame);
		// waitKey();

		// END CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/

		cv::Mat differenceFrame = difference(croppedPrevFrame, croppedCurrentFrame);

		int blackPixelsCapstan = 0;

		for (int i = 0; i < croppedCurrentFrame.rows; i++) {
			for (int j = 0; j < croppedCurrentFrame.cols; j++) {
				if (differenceFrame.at<cv::Vec3b>(i, j)[0] == 0) {
					// There is a black pixel, then there is a difference between previous and current frames
					blackPixelsCapstan++;
				}
			}
		}

		if (blackPixelsCapstan > capstanDifferentPixelsThreshold) {
			savingPinchRoller = true;
			return true;
		} else {
			savingPinchRoller = false;
		}
	}
	
	/************************************ Tape analysis ******************************************/

	// Tape area
    int tapeAreaPixels = rectTape.size.width * rectTape.size.height;
	float tapeDifferentPixelsThreshold = tapeAreaPixels * tapeThresholdPercentual / 100;
135
136

	/***************** Extract matrices corresponding to the processing area *********************/
137
138

	// Tape area
139
140
141
142
143
	// CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/

	// matrices we'll use
	Mat M, rotatedPrevFrame, croppedPrevFrame, rotatedCurrentFrame, croppedCurrentFrame;
	// get angle and size from the bounding box
144
145
	float angle = rectTape.angle;
	Size rect_size = rectTape.size;
146
	// thanks to http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
147
	if (rectTape.angle < -45.) {
148
149
150
151
		angle += 90.0;
		swap(rect_size.width, rect_size.height);
	}
	// get the rotation matrix
152
	M = getRotationMatrix2D(rectTape.center, angle, 1.0);
153
154
155
156
	// perform the affine transformation
	warpAffine(prevFrame, rotatedPrevFrame, M, prevFrame.size(), INTER_CUBIC);
	warpAffine(currentFrame, rotatedCurrentFrame, M, currentFrame.size(), INTER_CUBIC);
	// crop the resulting image
157
158
	getRectSubPix(rotatedPrevFrame, rect_size, rectTape.center, croppedPrevFrame);
	getRectSubPix(rotatedCurrentFrame, rect_size, rectTape.center, croppedCurrentFrame);
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187

	// imshow("Current frame", currentFrame);
	// imshow("Cropped Current Frame", croppedCurrentFrame);
	// waitKey();

	// END CODE FROM https://answers.opencv.org/question/497/extract-a-rotatedrect-area/

	cv::Mat differenceFrame = difference(croppedPrevFrame, croppedCurrentFrame);

	int decEnd = (msToEnd % 1000) / 100;
	int secEnd = (msToEnd - (msToEnd % 1000)) / 1000;
	int minEnd = secEnd / 60;
	secEnd = secEnd % 60;


	/****************************** Segment analysis ****************************************/
			
  	int blackPixels = 0;
	float mediaCurrFrame;
	int totColoreCF = 0;

	for (int i = 0; i < croppedCurrentFrame.rows; i++) {
		for (int j = 0; j < croppedCurrentFrame.cols; j++) {
			totColoreCF += croppedCurrentFrame.at<cv::Vec3b>(i, j)[0] + croppedCurrentFrame.at<cv::Vec3b>(i, j)[1] + croppedCurrentFrame.at<cv::Vec3b>(i, j)[2];
			if (differenceFrame.at<cv::Vec3b>(i, j)[0] == 0) {
				blackPixels++;
			}
		}
	}
188
	mediaCurrFrame = totColoreCF/tapeAreaPixels;
189

190
	if (blackPixels > tapeDifferentPixelsThreshold) {
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
		if (brands) {
			if (mediaPrevFrame > (mediaCurrFrame + 10) || mediaPrevFrame < (mediaCurrFrame - 10)) { // They are not similar for color average
				// Update mediaPrevFrame
				mediaPrevFrame = mediaCurrFrame;
				firstBrandInstant = msToEnd;
				return true;
			}
			// If the above condition is not verified, update anyway mediaPrevFrame
			mediaPrevFrame = mediaCurrFrame;
			// At the beginning of the video, wait at least 1 second before the next Irregularity to consider it as a brand.
			// It is not guaranteed that it will be the first brand, but it is generally a safe approach to have a correct image
			if (firstBrand && (firstBrandInstant - msToEnd > 1000)) {
				firstBrand = false;
				savingBrand = true;
				return true;
			}
		} else {
			return true;
		}
	}
	
	return false;
}


int processing(cv::VideoCapture videoCapture, std::string fileName) {

	// Video duration
	int frameNumbers_v = videoCapture.get(CAP_PROP_FRAME_COUNT);
	float fps_v = videoCapture.get(CAP_PROP_FPS); // FPS can be non-integers!!!
	float videoLength = (float) frameNumbers_v / fps_v; // [s]
	int videoLength_ms = videoLength * 1000;
	
    int savedFrames = 0, unsavedFrames = 0;
	float lastSaved = -160;
	int savingRate = 0; // [ms]
	// Whenever we find an Irregularity, we want to skip a lenght equal to the reading head (3 cm = 1.18 inches)
	if (speed == 7.5)
		savingRate = 157; // Time taken to cross 3 cm at 7.5 ips
	else if (speed == 15)
		savingRate = 79; // Time taken to cross 3 cm at 15 ips

	// The first frame of the video won't be processed
    cv::Mat prevFrame;
	videoCapture >> prevFrame;
	firstBrandInstant = videoLength_ms - videoCapture.get(CAP_PROP_POS_MSEC);

    while (videoCapture.isOpened()) {

		cv::Mat frame;
        videoCapture >> frame;

        if (!frame.empty()) {

			int ms = videoCapture.get(CAP_PROP_POS_MSEC);
			int msToEnd = videoLength_ms - ms;
			if (ms == 0) // With OpenCV library, this happens at the last few frames of the video before realising that "frame" is empty.
				break;
			int secToEnd = msToEnd / 1000;
			int minToEnd = (secToEnd / 60) % 60;
			secToEnd = secToEnd % 60;

			std::string secStrToEnd = std::to_string(secToEnd), minStrToEnd = std::to_string(minToEnd);
			if (minToEnd < 10)
				minStrToEnd = "0" + minStrToEnd;
			if (secToEnd < 10)
				secStrToEnd = "0" + secStrToEnd;

			std::cout << "\rIrregularities: " << savedFrames << ".   ";
			std::cout << "Remaining video time [mm:ss]: " << minStrToEnd << ":" << secStrToEnd << std::flush;

			if ((ms - lastSaved > savingRate) && frameDifference(prevFrame, frame, msToEnd)) {
				
				// An Irregularity is found!

				// De-interlacing frame
				cv::Mat oddFrame(frame.rows/2, frame.cols, CV_8UC3);
				cv::Mat evenFrame(frame.rows/2, frame.cols, CV_8UC3);
				separateFrame(frame, oddFrame, evenFrame);

				// Finding an image containing the whole tape
				Point2f pts[4];
273
274
275
276
277
				if (savingPinchRoller)
					rectCapstan.points(pts);
				else
					rectTape.points(pts);
				cv::Mat subImage(frame, cv::Rect(100, min(pts[1].y, pts[2].y), frame.cols - 100, static_cast<int>(rectTape.size.height)));
278
279

				// De-interlacing the image with the whole tape
280
281
282
				cv::Mat oddSubImage(subImage.rows/2, subImage.cols, CV_8UC3);
				int evenSubImageRows = subImage.rows/2;
				if (subImage.rows % 2 != 0) // If the found rectangle is of odd height, we must increase evenSubImage height by 1, otherwise we have segmentation_fault!!!
283
					evenSubImageRows += 1;
284
285
				cv::Mat evenSubImage(evenSubImageRows, subImage.cols, CV_8UC3);
				separateFrame(subImage, oddSubImage, evenSubImage);
286
287
288
289

				std::string timeLabel = getTimeLabel(ms);
				std::string safeTimeLabel = getSafeTimeLabel(ms);

290
				saveIrregularityImage(safeTimeLabel, fileName, oddFrame, oddSubImage, savingPinchRoller);
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338

				// Append Irregularity information to JSON
				boost::uuids::uuid uuid = boost::uuids::random_generator()();
				irregularityFileOutput1["Irregularities"] += {{
						"IrregularityID", boost::lexical_cast<std::string>(uuid)
					}, {
						"Source", "v"
					}, {
						"TimeLabel", timeLabel
					}
				};
				irregularityFileOutput2["Irregularities"] += {{
						"IrregularityID", boost::lexical_cast<std::string>(uuid)
					}, {
						"Source", "v"
					}, {
						"TimeLabel", timeLabel
					}, {
						"ImageURI", pathTape224 + "/"+ fileName + "_" + safeTimeLabel + ".jpg"
					}
				};

				lastSaved = ms;
				savedFrames++;

			} else {
				unsavedFrames++;
			}

			prevFrame = frame;

	    } else {
			std::cout << "\nEmpty frame!" << std::endl;
	    	videoCapture.release();
	    	break;
	    }
	}

	ofstream myFile;
	myFile.open("log.txt", ios::app);
	myFile << "Saved frames are: " << savedFrames << std::endl;
	myFile.close();

    return 0;

}


Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
339
340
341
bool findProcessingAreas(json configurationFile) {

	/******************************************* JSON PARAMETERS *******************************************/
342
343

	// Read parameters from JSON
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
344
	int minDist, angleThresh, scaleThresh, posThresh, minDistTape, angleThreshTape, scaleThreshTape, posThreshTape, minDistCapstan, angleThreshCapstan, scaleThreshCapstan, posThreshCapstan;
345
346
347
348
349
350
351
352
353
	try {
		minDist = configurationFile["MinDist"];
		angleThresh = configurationFile["AngleThresh"];
		scaleThresh = configurationFile["ScaleThresh"];
		posThresh = configurationFile["PosThresh"];
		minDistTape = configurationFile["MinDistTape"];
		angleThreshTape = configurationFile["AngleThreshTape"];
		scaleThreshTape = configurationFile["ScaleThreshTape"];
		posThreshTape = configurationFile["PosThreshTape"];
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
354
355
356
357
		minDistCapstan = configurationFile["MinDistCapstan"];
		angleThreshCapstan = configurationFile["AngleThreshCapstan"];
		scaleThreshCapstan = configurationFile["ScaleThreshCapstan"];
		posThreshCapstan = configurationFile["PosThreshCapstan"];
358
359
360
361
	} catch (nlohmann::detail::type_error e) {
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\n" << e.what() << std::endl;
		return -1;
	}
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
362
363
	
	/******************************************* READING HEAD DETECTION *******************************************/
364
365
366
367

	// Obtain grayscale version of myFrame
	Mat myFrameGrayscale;
	cvtColor(myFrame, myFrameGrayscale, COLOR_BGR2GRAY);
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
368
	// Downsample myFrameGrayscale in half pixels for performance reasons
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
	Mat myFrameGrayscaleHalf;
	pyrDown(myFrameGrayscale, myFrameGrayscaleHalf, Size(myFrame.cols/2, myFrame.rows/2));

	// Get input shape in grayscale
	Mat templateImage = imread("../input/readingHead.png", IMREAD_GRAYSCALE);
	// Downsample tapeShape in half pixels
	Mat templateImageHalf;
	pyrDown(templateImage, templateImageHalf, Size(templateImage.cols/2, templateImage.rows/2));

	// Select the image to process
	Mat processingImage = myFrameGrayscaleHalf;
	// Select the template to be detected
	Mat templateShape = templateImageHalf;

	// Algorithm and parameters
	Ptr<GeneralizedHoughGuil> alg = createGeneralizedHoughGuil();

	alg -> setMinDist(minDist);
	alg -> setLevels(360);
	alg -> setDp(2);
	alg -> setMaxBufferSize(1000);

	alg -> setAngleStep(1);
	alg -> setAngleThresh(angleThresh);
	
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
394
395
	alg -> setMinScale(0.9);
	alg -> setMaxScale(1.1);
396
397
398
399
400
401
402
403
404
405
	alg -> setScaleStep(0.1);
	alg -> setScaleThresh(scaleThresh);

	alg -> setPosThresh(posThresh);

	alg -> setCannyLowThresh(100);
	alg -> setCannyHighThresh(300);

	alg -> setTemplate(templateShape);

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
406
407
	vector<Vec4f> positionsPos, positionsNeg;
	Mat votesPos, votesNeg;
408
409
410
    TickMeter tm;
	int oldPosThresh = posThresh;

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
411
	std::cout << "\033[1;36mReading head\033[0m" << endl;
412
	tm.start();
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
413
	detectShape(alg, templateShape, posThresh, positionsPos, votesPos, positionsNeg, votesNeg, myFrameGrayscaleHalf);
414
415
	tm.stop();
    std::cout << "Reading head detection time : " << tm.getTimeMilli() << " ms" << endl;
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
416
417
	RotatedRect rectPos = drawShapes(myFrame, positionsPos, Scalar(0, 0, 255), templateImageHalf.cols, templateImageHalf.rows, 0, 0, 2);
	RotatedRect rectNeg = drawShapes(myFrame, positionsNeg, Scalar(128, 128, 255), templateImageHalf.cols, templateImageHalf.rows, 0, 0, 2);
418
419

	Point2f pts[4];
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
	if (rectPos.size.width > 0)
		if (rectNeg.size.width > 0)
			if (votesPos.at<int>(0) > votesNeg.at<int>(0)) {
				cout << "Positive is best" << endl;
				rect = rectPos;
			} else {
				cout << "Negative is best" << endl;
				rect = rectNeg;
			}
		else {
			cout << "Positive is the only choice." << endl;
			rect = rectPos;
		}
	else if (rectNeg.size.width > 0) {
		cout << "Negative is the only choice." << endl;
		rect = rectNeg;
	} else {
		return false;
	}
	cout << endl;
440
441
442

	rect.points(pts);

Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
443
	/******************************************* TAPE AREA DETECTION *******************************************/
444
445
446
447
448
449
450
451
452
453
454
455
456
457
	
	// Defining the processing area for identifying the tape under the reading head.
	//
	// Parameters for extracting a rectangle containing the found rectangle completely (also if it is slightly rotated)
	// and with twice its height (since the tape is immediatley below the found rectangle).
	int tapeProcessingAreaX = min(pts[0].x, pts[1].x);
	int tapeProcessingAreaY = min(pts[1].y, pts[2].y) + (max(pts[0].y, pts[3].y) - min(pts[1].y, pts[2].y)) * 2/3; // Shift down the area
	int tapeProcessingAreaWidth = max(pts[3].x-pts[1].x, pts[2].x-pts[0].x);
	int tapeProcessingAreaHeight = max(pts[3].y-pts[1].y, pts[0].y-pts[2].y);

	Rect tapeProcessingAreaRect(tapeProcessingAreaX, tapeProcessingAreaY, tapeProcessingAreaWidth, tapeProcessingAreaHeight);
	// Obtain grayscale version of tapeProcessingArea
	Mat tapeProcessingAreaGrayscale = myFrameGrayscale(tapeProcessingAreaRect);
	// Read template image - it is smaller than before, therefore there is no need to downsample
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
458
	templateShape = imread("../input/tapeArea.png", IMREAD_GRAYSCALE);
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480

	// Reset algorithm and set parameters
	alg = createGeneralizedHoughGuil();

	alg -> setMinDist(minDistTape);
	alg -> setLevels(360);
	alg -> setDp(2);
	alg -> setMaxBufferSize(1000);

	alg -> setAngleStep(1);
	alg -> setAngleThresh(angleThreshTape);

	alg -> setMinScale(0.9);
	alg -> setMaxScale(1.1);
	alg -> setScaleStep(0.05);
	alg -> setScaleThresh(scaleThreshTape);

	alg -> setPosThresh(posThreshTape);

	alg -> setCannyLowThresh(100);
	alg -> setCannyHighThresh(300);

Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
481
	alg -> setTemplate(templateShape);
482
483
484

	oldPosThresh = posThreshTape;

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
485
486
487
488
	vector<Vec4f> positionsTapePos, positionsTapeNeg;
	Mat votesTapePos, votesTapeNeg;

	std::cout << "\033[1;36mTape\033[0m" << endl;
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
489
	tm.reset();
490
	tm.start();
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
491
	detectShape(alg, templateShape, posThreshTape, positionsTapePos, votesTapePos, positionsTapeNeg, votesTapeNeg, tapeProcessingAreaGrayscale);
492
493
	tm.stop();
    std::cout << "Tape detection time : " << tm.getTimeMilli() << " ms" << endl;
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
	RotatedRect rectTapePos = drawShapes(myFrame, positionsTapePos, Scalar(0, 255, 0), templateShape.cols, templateShape.rows, tapeProcessingAreaX, tapeProcessingAreaY, 1);
	RotatedRect rectTapeNeg = drawShapes(myFrame, positionsTapeNeg, Scalar(128, 255, 128), templateShape.cols, templateShape.rows, tapeProcessingAreaX, tapeProcessingAreaY, 1);
	
	if (rectTapePos.size.width > 0)
		if (rectTapeNeg.size.width > 0)
			if (votesTapePos.at<int>(0) > votesTapeNeg.at<int>(0)) {
				cout << "Positive is best" << endl;
				rectTape = rectTapePos;
			} else {
				cout << "Negative is best" << endl;
				rectTape = rectTapeNeg;
			}
		else {
			cout << "Positive is the only choice." << endl;
			rectTape = rectTapePos;
		}
	else if (rectTapeNeg.size.width > 0) {
		cout << "Negative is the only choice." << endl;
		rectTape = rectTapeNeg;
	} else {
		return false;
515
	}
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
516
	cout << endl;
517

Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
518
519
	/******************************************* CAPSTAN DETECTION *******************************************/

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
520
	// Process only right portion of the image, wherw the capstain always appears
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
	int capstanProcessingAreaRectX = myFrame.cols*3/4;
	int capstanProcessingAreaRectY = myFrame.rows/2;
	int capstanProcessingAreaRectWidth = myFrame.cols/4;
	int capstanProcessingAreaRectHeight = myFrame.rows/2;
	Rect capstanProcessingAreaRect(capstanProcessingAreaRectX, capstanProcessingAreaRectY, capstanProcessingAreaRectWidth, capstanProcessingAreaRectHeight);
	Mat capstanProcessingAreaGrayscale = myFrameGrayscale(capstanProcessingAreaRect);
	// Read template image - it is smaller than before, therefore there is no need to downsample
	templateShape = imread("../input/capstanBERIO058prova.png", IMREAD_GRAYSCALE);

	// Reset algorithm and set parameters
	alg = createGeneralizedHoughGuil();

	alg -> setMinDist(minDistCapstan);
	alg -> setLevels(360);
	alg -> setDp(2);
	alg -> setMaxBufferSize(1000);

	alg -> setAngleStep(1);
	alg -> setAngleThresh(angleThreshCapstan);

	alg -> setMinScale(0.9);
	alg -> setMaxScale(1.1);
	alg -> setScaleStep(0.05);
	alg -> setScaleThresh(scaleThreshCapstan);

	alg -> setPosThresh(posThreshCapstan);

	alg -> setCannyLowThresh(100);
	alg -> setCannyHighThresh(250);

	alg -> setTemplate(templateShape);

	oldPosThresh = posThreshCapstan;

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
555
556
	vector<Vec4f> positionsC1Pos, positionsC1Neg;
	Mat votesC1Pos, votesC1Neg;
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
557

Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
558
	std::cout << "\033[1;36mCapstan\033[0m" << endl;
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
559
560
	tm.reset();
	tm.start();
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
561
	detectShape(alg, templateShape, posThreshCapstan, positionsC1Pos, votesC1Pos, positionsC1Neg, votesC1Neg, capstanProcessingAreaGrayscale);
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
562
563
	tm.stop();
    std::cout << "Capstan detection time : " << tm.getTimeMilli() << " ms" << endl;
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
	RotatedRect rectCapstanPos = drawShapes(myFrame, positionsC1Pos, Scalar(255, 0, 0), templateShape.cols-22, templateShape.rows-92, capstanProcessingAreaRectX+11, capstanProcessingAreaRectY+46, 1);
	RotatedRect rectCapstanNeg = drawShapes(myFrame, positionsC1Neg, Scalar(255, 128, 0), templateShape.cols-22, templateShape.rows-92, capstanProcessingAreaRectX+11, capstanProcessingAreaRectY+46, 1);
	
	if (rectCapstanPos.size.width > 0)
		if (rectCapstanNeg.size.width > 0)
			if (votesC1Pos.at<int>(0) > votesC1Neg.at<int>(0)) {
				cout << "Positive is best" << endl;
				rectCapstan = rectCapstanPos;
			} else {
				cout << "Negative is best" << endl;
				rectCapstan = rectCapstanNeg;
			}
		else {
			cout << "Positive is the only choice." << endl;
			rectCapstan = rectCapstanPos;
		}
	else if (rectTapeNeg.size.width > 0) {
		cout << "Negative is the only choice." << endl;
		rectCapstan = rectCapstanNeg;
	} else {
		return false;
	}
	cout << endl;
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
587

588
589
590
	// Shows the detected areas
	// imshow("Tape area(s)", myFrame);
	// waitKey();
591

592
	return true;
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
}


int main(int argc, char** argv) {

	/**************************************** CONFIGURATION FILE ****************************************/

	// Read configuration file
	std::ifstream iConfig("../config/config.json");
	iConfig >> configurationFile;
	// Initialise parameters
	try {
		brands = configurationFile["Brands"];
		irregularityFileInputPath = configurationFile["IrregularityFileInput"];
		outputPath = configurationFile["OutputPath"];
		videoPath = configurationFile["PreservationAudioVisualFile"];
		speed = configurationFile["Speed"];
610
611
		tapeThresholdPercentual = configurationFile["ThresholdPercentualTape"];
		capstanThresholdPercentual = configurationFile["ThresholdPercentualCapstan"];
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
	} catch (nlohmann::detail::type_error e) {
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\n" << e.what() << std::endl;
		return -1;
	}
	// Input JSON check
	std::ifstream iJSON(irregularityFileInputPath);
	if (iJSON.fail()) {
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nIrregularityFileInput.json cannot be found or opened."  << std::endl;
		return -1;
	}
	std::string fileName, extension;
    if (findFileName(videoPath, fileName, extension) == -1) {
        std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThe PreservationAudioVisualFile cannot be found or opened." << std::endl;
        return -1;
    }
	if (speed != 7.5 && speed != 15) {
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nSpeed parameter must be 7.5 or 15 ips."  << std::endl;
		return -1;
	}
631
	if (tapeThresholdPercentual < 0 || tapeThresholdPercentual > 100) {
632
633
634
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThresholdPercentual parameter must be a percentage value."  << std::endl;
		return -1;
	}
635
	if (capstanThresholdPercentual < 0 || capstanThresholdPercentual > 100) {
636
637
638
639
		std::cerr << "\033[1;31mconfig.json error!\033[0;31m\nThresholdPercentual parameter must be a percentage value."  << std::endl;
		return -1;
	}

640
641
	// Speed reference = 7.5
	// If the speed is 15, then increase threshold percentual by 4 to have similar detection performance
642
	if (speed == 15)
643
		tapeThresholdPercentual += 4;
644
645
646
647

    std::cout << "\nParameters from config.json file:" << std::endl;
	std::cout << "  Brands: " << brands << std::endl;
	std::cout << "  Speed: " << speed << std::endl;
648
649
    std::cout << "  ThresholdPercentual: " << tapeThresholdPercentual << std::endl;
	std::cout << "  ThresholdPercentualCapstan: " << capstanThresholdPercentual << std::endl;
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
650
	std::cout << std::endl;
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670

	// Read input JSON
	iJSON >> irregularityFileInput;

	/******************************************* TAPE AREA DETECTION *******************************************/

	cv::VideoCapture videoCapture(videoPath);
    if (!videoCapture.isOpened()) {
        std::cerr << "\033[31m" << "Video unreadable." << std::endl;
        return -1;
    }

	// Get total number of frames
	int totalFrames = videoCapture.get(CAP_PROP_FRAME_COUNT);
	// Set frame position to half video length
	videoCapture.set(CAP_PROP_POS_FRAMES, totalFrames/2);
	// Get frame and show it
	videoCapture >> myFrame;
	
	// Find the processing area corresponding to the tape area over the reading head
Nadir Dalla Pozza's avatar
Nadir Dalla Pozza committed
671
	bool found = findProcessingAreas(configurationFile);
672
673
674
675
676
677
678
679
680
681
682
683

	// Reset frame position
	videoCapture.set(CAP_PROP_POS_FRAMES, 0);

	/**************************** WRITE USEFUL INFORMATION TO LOG FILE ***************************/

	// Get now time
	std::time_t t = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
    std::string ts = std::ctime(&t);
	ofstream myFile;
	myFile.open("log.txt", ios::app);
	myFile << endl << fileName << endl;
684
	myFile << "tsh: " << tapeThresholdPercentual << "   tshp: " << capstanThresholdPercentual << std::endl;
685
686
687
	myFile << ts; // No endline character for avoiding middle blank line.

	if (found) {
688
689
		std::cout << "Processing areas found!" << endl;
		myFile << "Processing areas found!" << endl;
690
691
		myFile.close();
	} else {
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
692
		std::cout << "Processing area not found. Try changing JSON parameters." << endl;
693
694
		myFile << "Processing area not found." << endl;
		myFile.close();
695
		return -1; // Program terminated early
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
	}

	/********************************* MAKE REQUIRED DIRECTORIES *********************************/
	
	makeDirectories(fileName, outputPath, brands);

	/**************************************** PROCESSING *****************************************/

	std::cout << "\n\033[32mStarting processing...\033[0m\n" << std::endl;

	// Processing timer
	time_t startTimer, endTimer;
	startTimer = time(NULL);

	processing(videoCapture, fileName);

	endTimer = time(NULL);
	float min = (endTimer - startTimer) / 60;
	float sec = (endTimer - startTimer) % 60;

	std::string result("Processing elapsed time: " + std::to_string((int)min) + ":" + std::to_string((int)sec));
Nadir Dalla Pozza's avatar
Update.    
Nadir Dalla Pozza committed
717
	std::cout << endl << result << endl;
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740

	myFile.open("log.txt", ios::app);
	myFile << result << std::endl << std::endl;
	myFile.close();

	/**************************************** IRREGULARITY FILES ****************************************/

	std::ofstream outputFile1;
	std::string outputFile1Name = outputPath + "IrregularityFileOutput1.json";
	outputFile1.open(outputFile1Name);
	outputFile1 << irregularityFileOutput1 << std::endl;

	// Irregularities to extract for the AudioAnalyser and to the TapeIrregularityClassifier
	extractIrregularityImagesForAudio(outputPath, videoPath, irregularityFileInput, irregularityFileOutput2);

	std::ofstream outputFile2;
	std::string outputFile2Name = outputPath + "IrregularityFileOutput2.json";
	outputFile2.open(outputFile2Name);
	outputFile2 << irregularityFileOutput2 << std::endl;
	
    return 0;

}