Commit 4f6e9774 authored by Matteo's avatar Matteo
Browse files

update

parent 864f7f04
......@@ -27,12 +27,12 @@ std::list<Irregularity> IrregularityFile::getIrregularities() const {
return this->irregularities;
}
IrregularityFile IrregularityFile::add(const Irregularity irregularity) {
IrregularityFile& IrregularityFile::add(const Irregularity irregularity) {
this->irregularities.push_back(irregularity);
return *this;
}
IrregularityFile IrregularityFile::remove_by_id(const boost::uuids::uuid id) {
IrregularityFile& IrregularityFile::remove_by_id(const boost::uuids::uuid id) {
for (auto it = this->irregularities.begin(); it != this->irregularities.end(); ++it) {
if (it->id == id) {
this->irregularities.erase(it);
......@@ -42,7 +42,7 @@ IrregularityFile IrregularityFile::remove_by_id(const boost::uuids::uuid id) {
return *this;
}
IrregularityFile IrregularityFile::sort() {
IrregularityFile& IrregularityFile::sort() {
this->irregularities.sort([](const Irregularity& a, const Irregularity& b) {
return a.time_label < b.time_label;
});
......
......@@ -22,9 +22,9 @@ public:
uint16_t getOffset() const;
std::list<Irregularity> getIrregularities() const;
IrregularityFile add(const Irregularity irregularity);
IrregularityFile remove_by_id(const boost::uuids::uuid id);
IrregularityFile sort();
IrregularityFile& add(const Irregularity irregularity);
IrregularityFile& remove_by_id(const boost::uuids::uuid id);
IrregularityFile& sort();
};
#endif // IRREGULARITY_FILE_H
\ No newline at end of file
......@@ -51,6 +51,7 @@
using namespace cv;
using namespace std;
using utility::Frame;
using json = nlohmann::json;
namespace fs = std::filesystem;
namespace po = boost::program_options;
......@@ -69,8 +70,8 @@ float firstInstant = 0;
string fileName, extension;
// Path variables
fs::path outputPath {};
fs::path irregularityImagesPath {};
static fs::path outputPath {};
static fs::path irregularityImagesPath {};
// JSON files
static json configurationFile {};
static json irregularityFileOutput1 {};
......@@ -78,8 +79,6 @@ static json irregularityFileOutput2 {};
// RotatedRect identifying the processing area
RotatedRect rect, rectTape, rectCapstan;
// Structs
// config.json parameters
struct Config {
fs::path workingPath;
......@@ -105,7 +104,7 @@ static SceneObject tape;
static SceneObject capstan;
// Constants Paths
static const string READING_HEAD_IMG = "images/reading_head.png";
static const string READING_HEAD_IMG = "input/readingHead.png";
static const string CAPSTAN_TEMPLATE_IMG = "input/capstanBERIO058prova.png";
static const string CONFIG_FILE = "config/config.json";
......@@ -117,7 +116,7 @@ static const string CONFIG_FILE = "config/config.json";
* @return true if input configuration is valid;
* @return false otherwise.
*/
bool getArguments(int argc, const char** argv) {
bool getArguments(int argc, char** argv) {
// Read configuration file
ifstream iConfig(CONFIG_FILE);
iConfig >> configurationFile;
......@@ -210,29 +209,25 @@ bool findProcessingAreas(Mat myFrame) {
/*********************************** READING HEAD DETECTION **********************************/
/*********************************************************************************************/
utility::Frame myFrameUtility(myFrame);
myFrameUtility
.convertColor(COLOR_BGR2GRAY)
.downsample(2);
utility::Frame templateImageUtility(cv::imread(READING_HEAD_IMG, IMREAD_GRAYSCALE));
templateImageUtility.downsample(2);
// Save a grayscale version of myFrame in myFrameGrayscale and downsample it in half pixels for performance reasons
Mat myFrameGrayscale, myFrameGrayscaleHalf;
cvtColor(myFrame, myFrameGrayscale, COLOR_BGR2GRAY);
pyrDown(myFrameGrayscale, myFrameGrayscaleHalf, Size(myFrame.cols/2, myFrame.rows/2));
Frame gray_current_frame = Frame(myFrame)
.convertColor(COLOR_BGR2GRAY);
Frame halved_gray_current_frame = gray_current_frame
.clone()
.downsample(2);
// Get input shape in grayscale and downsample it in half pixels
Mat templateImage = cv::imread(READING_HEAD_IMG, IMREAD_GRAYSCALE);
Mat templateImageHalf;
pyrDown(templateImage, templateImageHalf, Size(templateImage.cols/2, templateImage.rows/2));
Frame reading_head_template = Frame(cv::imread(READING_HEAD_IMG, IMREAD_GRAYSCALE)).downsample(2);
// Process only the bottom-central portion of the input video -> best results with our videos
Rect readingHeadProcessingAreaRect(myFrameGrayscaleHalf.cols/4, myFrameGrayscaleHalf.rows/2, myFrameGrayscaleHalf.cols/2, myFrameGrayscaleHalf.rows/2);
Mat processingImage = myFrameGrayscaleHalf(readingHeadProcessingAreaRect);
// Select the template to be detected
Mat templateShape = templateImageHalf;
Rect readingHeadProcessingAreaRect(
halved_gray_current_frame.cols/4,
halved_gray_current_frame.rows/2,
halved_gray_current_frame.cols/2,
halved_gray_current_frame.rows/2
);
Mat processingImage = halved_gray_current_frame(readingHeadProcessingAreaRect);
// Algorithm and parameters
// for informations about the Generalized Hough Guild interface see the tutorial at https://docs.opencv.org/4.7.0/da/ddc/tutorial_generalized_hough_ballard_guil.html
......@@ -243,7 +238,6 @@ bool findProcessingAreas(Mat myFrame) {
int oldPosThresh = tape.threshold.pos;
RotatedRect rectPos, rectNeg;
ofstream myFile;
Point2f pts[4];
// Find the best matches for positive and negative angles
// If there are more than one shapes, then choose the one with the highest score
......@@ -269,11 +263,9 @@ bool findProcessingAreas(Mat myFrame) {
alg->setCannyLowThresh(150); // Old: 100
alg->setCannyHighThresh(240); // Old: 300
alg->setTemplate(templateShape);
alg->setTemplate(reading_head_template);
cout << DARK_CYAN << "Reading head" << END << endl;
utility::detectShape(alg, templateShape, tape.threshold.pos, positionsPos, votesPos, positionsNeg, votesNeg, processingImage);
utility::detectShape(alg, reading_head_template, tape.threshold.pos, positionsPos, votesPos, positionsNeg, votesNeg, processingImage);
for (int i = 0; i < votesPos.size().width; i++) {
if (votesPos.at<int>(i) >= maxValPos) {
......@@ -291,9 +283,9 @@ bool findProcessingAreas(Mat myFrame) {
// The color is progressively darkened to emphasize that the algorithm found more than one shape
if (positionsPos.size() > 0)
rectPos = utility::drawShapes(myFrame, positionsPos[indexPos], Scalar(0, 0, 255-indexPos*64), templateImageHalf.cols, templateImageHalf.rows, myFrameGrayscaleHalf.cols/4, myFrameGrayscaleHalf.rows/2, 2);
rectPos = utility::drawShapes(myFrame, positionsPos[indexPos], Scalar(0, 0, 255-indexPos*64), reading_head_template.cols, reading_head_template.rows, halved_gray_current_frame.cols/4, halved_gray_current_frame.rows/2, 2);
if (positionsNeg.size() > 0)
rectNeg = utility::drawShapes(myFrame, positionsNeg[indexNeg], Scalar(128, 128, 255-indexNeg*64), templateImageHalf.cols, templateImageHalf.rows, myFrameGrayscaleHalf.cols/4, myFrameGrayscaleHalf.rows/2, 2);
rectNeg = utility::drawShapes(myFrame, positionsNeg[indexNeg], Scalar(128, 128, 255-indexNeg*64), reading_head_template.cols, reading_head_template.rows, halved_gray_current_frame.cols/4, halved_gray_current_frame.rows/2, 2);
myFile.open("log.txt", ios::app);
......@@ -317,9 +309,6 @@ bool findProcessingAreas(Mat myFrame) {
myFile.close();
return false;
}
cout << endl;
rect.points(pts);
/*********************************************************************************************/
/************************************ TAPE AREA DETECTION ************************************/
......@@ -357,11 +346,9 @@ bool findProcessingAreas(Mat myFrame) {
/*********************************************************************************************/
// Read template image - it is smaller than before, therefore there is no need to downsample
templateShape = imread(CAPSTAN_TEMPLATE_IMG, IMREAD_GRAYSCALE);
Mat templateShape = imread(CAPSTAN_TEMPLATE_IMG, IMREAD_GRAYSCALE);
// templateShape = imread("../input/capstanBERIO058.png", IMREAD_GRAYSCALE);
cout << DARK_CYAN << "Capstan" << END << endl;
if (useSURF) {
// Step 1: Detect the keypoints using SURF Detector, compute the descriptors
......@@ -371,7 +358,7 @@ bool findProcessingAreas(Mat myFrame) {
Mat descriptors_object, descriptors_scene;
detector->detectAndCompute(templateShape, noArray(), keypoints_object, descriptors_object);
detector->detectAndCompute(myFrameGrayscale, noArray(), keypoints_scene, descriptors_scene);
detector->detectAndCompute(gray_current_frame, noArray(), keypoints_scene, descriptors_scene);
// Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
......@@ -388,7 +375,7 @@ bool findProcessingAreas(Mat myFrame) {
}
// Draw matches
Mat img_matches;
drawMatches(templateShape, keypoints_object, myFrameGrayscale, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
drawMatches(templateShape, keypoints_object, halved_gray_current_frame, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
// Localize the object
vector<Point2f> obj;
vector<Point2f> scene;
......@@ -425,29 +412,29 @@ bool findProcessingAreas(Mat myFrame) {
int capstanProcessingAreaRectWidth = myFrame.cols/4;
int capstanProcessingAreaRectHeight = myFrame.rows/2;
Rect capstanProcessingAreaRect(capstanProcessingAreaRectX, capstanProcessingAreaRectY, capstanProcessingAreaRectWidth, capstanProcessingAreaRectHeight);
Mat capstanProcessingAreaGrayscale = myFrameGrayscale(capstanProcessingAreaRect);
Mat capstanProcessingAreaGrayscale = gray_current_frame(capstanProcessingAreaRect);
// Reset algorithm and set parameters
alg = createGeneralizedHoughGuil();
alg -> setMinDist(capstan.minDist);
alg -> setLevels(360);
alg -> setDp(2);
alg -> setMaxBufferSize(1000);
alg->setMinDist(capstan.minDist);
alg->setLevels(360);
alg->setDp(2);
alg->setMaxBufferSize(1000);
alg -> setAngleStep(1);
alg -> setAngleThresh(capstan.threshold.angle);
alg->setAngleStep(1);
alg->setAngleThresh(capstan.threshold.angle);
alg -> setMinScale(0.9);
alg -> setMaxScale(1.1);
alg -> setScaleStep(0.01);
alg -> setScaleThresh(capstan.threshold.scale);
alg->setMinScale(0.9);
alg->setMaxScale(1.1);
alg->setScaleStep(0.01);
alg->setScaleThresh(capstan.threshold.scale);
alg -> setPosThresh(capstan.threshold.pos);
alg->setPosThresh(capstan.threshold.pos);
alg -> setCannyLowThresh(150);
alg -> setCannyHighThresh(240);
alg->setCannyLowThresh(150);
alg->setCannyHighThresh(240);
alg -> setTemplate(templateShape);
alg->setTemplate(templateShape);
oldPosThresh = capstan.threshold.pos;
......
......@@ -3,6 +3,32 @@
using namespace cv;
using namespace std;
// Constructors
utility::Frame::Frame() : Mat() {}
utility::Frame::Frame(const Mat& m) : Mat(m) {}
utility::Frame::Frame(const Frame& f) : Mat(f) {}
// Operators
utility::Frame& utility::Frame::operator=(const Mat& m) {
Mat::operator=(m);
return *this;
}
utility::Frame& utility::Frame::operator=(const Frame& f) {
Mat::operator=(f);
return *this;
}
// Methods
utility::Frame utility::Frame::clone() const {
return utility::Frame(Mat::clone());
}
utility::Frame& utility::Frame::downsample(int factor) {
pyrDown(*this, *this, Size(size().width / factor, size().height / factor));
return *this;
}
utility::Frame& utility::Frame::convertColor(int code) {
cvtColor(*this, *this, code);
return *this;
}
void utility::detectShape(Ptr<GeneralizedHoughGuil> alg, Mat templateShape, int posThresh, vector<Vec4f> &positivePositions, Mat &positiveVotes, vector<Vec4f> &negativePositions, Mat &negativeVotes, Mat processingArea) {
alg -> setPosThresh(posThresh);
......
......@@ -9,30 +9,20 @@ namespace fs = std::filesystem;
namespace utility {
class Frame : Mat {
/**
* @brief Class that extends the OpenCV Mat class, adding some useful methods frequently used in the project.
*
*/
class Frame : public Mat {
public:
Frame() : Mat() {}
Frame(const Mat& m) : Mat(m) {}
Frame(const Frame& f) : Mat(f) {}
Frame& operator=(const Mat& m) {
Mat::operator=(m);
return *this;
}
Frame& operator=(const Frame& f) {
Mat::operator=(f);
return *this;
}
Frame clone() const {
return Frame(Mat::clone());
}
Frame& downsample(int factor) {
pyrDown(*this, *this, Size(size().width / factor, size().height / factor));
return *this;
}
Frame& convertColor(int code) {
cv::cvtColor(*this, *this, code);
return *this;
}
Frame();
Frame(const Mat& m);
Frame(const Frame& f);
Frame& operator=(const Mat& m);
Frame& operator=(const Frame& f);
Frame clone() const;
Frame& downsample(int factor);
Frame& convertColor(int code);
};
/**
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment