Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
MPAI-Private
MPAI-CAE
arp
Video Analyzer
Commits
8f5a80ed
Commit
8f5a80ed
authored
Jun 04, 2023
by
Matteo
Browse files
major refactoring
parent
df88fb26
Changes
25
Hide whitespace changes
Inline
Side-by-side
src/lib/time.cpp
View file @
8f5a80ed
#include
"time.h"
#include
"time.h
pp
"
std
::
string
getTimeLabel
(
int
ms
,
std
::
string
delim
)
{
int
mil
=
ms
%
1000
;
...
...
src/lib/time.h
→
src/lib/time.h
pp
View file @
8f5a80ed
...
...
@@ -16,4 +16,4 @@
*/
std
::
string
getTimeLabel
(
int
ms
,
std
::
string
delim
=
":"
);
#endif
\ No newline at end of file
#endif // GETTIMELABEL_H
\ No newline at end of file
src/main.cpp
View file @
8f5a80ed
...
...
@@ -23,59 +23,47 @@
* @copyright 2023, Audio Innova S.r.l.
* @credits Niccolò Pretto, Nadir Dalla Pozza, Sergio Canazza
* @license GPL v3.0
* @version 1.
1.3
* @version 1.
2
* @status Production
*/
#include
<stdlib.h>
#include
<sys/timeb.h>
#include
<boost/lexical_cast.hpp>
#include
<boost/program_options.hpp>
#include
<boost/uuid/uuid.hpp>
// uuid class
#include
<boost/uuid/uuid_generators.hpp>
// generators
#include
<boost/uuid/uuid_io.hpp>
// streaming operators etc.
#include
<boost/uuid/uuid.hpp>
#include
<boost/uuid/uuid_generators.hpp>
#include
<boost/uuid/uuid_io.hpp>
#include
<filesystem>
#include
<fstream>
#include
<iostream>
#include
<nlohmann/json.hpp>
#include
<opencv2/calib3d.hpp>
#include
<opencv2/core/core.hpp>
#include
<opencv2/features2d.hpp>
#include
<opencv2/highgui.hpp>
#include
<opencv2/imgcodecs.hpp>
#include
<opencv2/imgproc.hpp>
#include
<opencv2/xfeatures2d.hpp>
#include
<variant>
#include
"lib/Irregularity.hpp"
#include
"lib/IrregularityFile.hpp"
#include
"lib/colors.hpp"
#include
"lib/core.hpp"
#include
"lib/detection.hpp"
#include
"lib/files.hpp"
#include
"forAudioAnalyser.h"
#include
"lib/Irregularity.h"
#include
"lib/IrregularityFile.h"
#include
"lib/colors.h"
#include
"lib/files.h"
#include
"lib/time.h"
#include
"utility.h"
#define A_IRREG_FILE_1 "AudioAnalyser_IrregularityFileOutput1.json"
#define V_IRREG_FILE_1 "VideoAnalyser_IrregularityFileOutput1.json"
#define V_IRREG_FILE_2 "VideoAnalyser_IrregularityFileOutput2.json"
using
namespace
cv
;
#include
"lib/io.hpp"
#include
"lib/time.hpp"
#include
"utility.hpp"
using
namespace
std
;
using
utility
::
Frame
;
using
namespace
colors
;
using
json
=
nlohmann
::
json
;
using
videoanalyser
::
core
::
Frame
;
using
videoanalyser
::
io
::
pprint
;
using
videoanalyser
::
io
::
print_error_and_exit
;
namespace
fs
=
std
::
filesystem
;
namespace
po
=
boost
::
program_options
;
namespace
va
=
videoanalyser
;
/**
* @const bool g_use_surf
* @brief If true, SURF is used for capstan detection, otherwise GHT is used.
*
* For capstan detection, there are two alternative approaches:
* 1. Generalized Hough Transform
* 2. SURF.
*/
bool
g_use_surf
=
true
;
bool
g_end_tape_saved
=
false
;
bool
g_first_brand
=
true
;
// The first frame containing brands on tape must be saved
bool
g_first_brand
=
true
;
// The first frame containing brands on tape must be saved
float
g_first_instant
=
0
;
float
g_mean_prev_frame_color
=
0
;
// Average frame color
...
...
@@ -84,33 +72,19 @@ static fs::path g_irregularity_images_path{};
static
json
g_irregularity_file_1
{};
static
json
g_irregularity_file_2
{};
// RotatedRect identifying the processing area
RotatedRect
rect
,
g_rect_tape
,
g_rect_capstan
;
/**
* @fn void pprint(string text, string color)
* @brief Prints a text in a given color.
*
* @param text
* @param color
*/
void
pprint
(
string
text
,
string
color
)
{
std
::
cout
<<
color
<<
text
<<
END
<<
endl
;
}
void
print_error_and_exit
(
string
title
,
string
message
)
{
std
::
cerr
<<
RED
<<
BOLD
<<
title
<<
END
<<
endl
;
std
::
cerr
<<
RED
<<
message
<<
END
<<
endl
;
exit
(
EXIT_FAILURE
);
}
RotatedRect
g_rect_tape
,
g_rect_capstan
;
struct
Args
{
fs
::
path
working
P
ath
;
/**< The working path where all input files are stored and where all output files will be saved */
string
files
N
ame
;
/**< The name of the preservation files to be considered */
bool
brands
;
/**< True if tape presents brands on its surface */
float
speed
;
/**< The speed at which the tape was read */
working
_p
ath
;
/**< The working path where all input files are stored and where all output files will be saved */
string
files
_n
ame
;
/**< The name of the preservation files to be considered */
bool
brands
;
/**< True if tape presents brands on its surface */
float
speed
;
/**< The speed at which the tape was read */
Args
(
fs
::
path
working
P
ath
,
string
files
N
ame
,
bool
brands
,
float
speed
)
{
Args
(
fs
::
path
working
_p
ath
,
string
files
_n
ame
,
bool
brands
,
float
speed
)
{
if
(
speed
!=
7.5
&&
speed
!=
15
)
throw
invalid_argument
(
"Speed must be 7.5 or 15"
);
this
->
working
P
ath
=
working
P
ath
;
this
->
files
N
ame
=
files
N
ame
;
this
->
working
_p
ath
=
working
_p
ath
;
this
->
files
_n
ame
=
files
_n
ame
;
this
->
brands
=
brands
;
this
->
speed
=
speed
;
}
...
...
@@ -148,11 +122,11 @@ struct Args {
}
po
::
notify
(
vm
);
}
catch
(
po
::
invalid_command_line_syntax
&
e
)
{
print_error_and_exit
(
"Invalid command line syntax
!"
,
string
(
e
.
what
()));
print_error_and_exit
(
"Invalid command line syntax
: "
+
string
(
e
.
what
()));
}
catch
(
po
::
required_option
&
e
)
{
print_error_and_exit
(
"Missing required option
!"
,
string
(
e
.
what
()));
print_error_and_exit
(
"Missing required option
: "
+
string
(
e
.
what
()));
}
catch
(
nlohmann
::
detail
::
type_error
e
)
{
print_error_and_exit
(
"config.json error
!"
,
string
(
e
.
what
()));
print_error_and_exit
(
"config.json error
: "
+
string
(
e
.
what
()));
}
return
Args
(
fs
::
path
(
vm
[
"working-path"
].
as
<
string
>
()),
vm
[
"files-name"
].
as
<
string
>
(),
vm
[
"brands"
].
as
<
bool
>
(),
...
...
@@ -160,11 +134,6 @@ struct Args {
}
};
// Constants Paths
static
const
string
READING_HEAD_IMG
=
"input/readingHead.png"
;
static
const
string
CAPSTAN_TEMPLATE_IMG
=
"input/capstanBERIO058prova.png"
;
static
const
string
CONFIG_FILE
=
"config/config.json"
;
/**
* @brief Get the next frame object.
*
...
...
@@ -196,75 +165,6 @@ Frame get_next_frame(VideoCapture& cap, float speed, bool skip = false) {
float
rotated_rect_area
(
RotatedRect
rect
)
{
return
rect
.
size
.
width
*
rect
.
size
.
height
;
}
/**
* @fn std::tuple<int, int, double, double, vector<Vec4f>, vector<Vec4f>>
* find_object(Mat model, SceneObject object)
* @brief Find the model in the scene using the Generalized Hough Transform.
* It returns the best matches. Find the best matches for positive and negative
* angles. If there are more than one shape, then choose the one with the
* highest score. If there are more than one with the same highest score, then
* arbitrarily choose the latest.
*
* For informations about the Generalized Hough Guild usage see the tutorial
* at https://docs.opencv.org/4.7.0/da/ddc/tutorial_generalized_hough_ballard_guil.html
*
* @param model the template image to be searched with the Generalized Hough
* Transform
* @param object the sceneObject struct containing the parameters for the
* Generalized Hough Transform
* @return std::tuple<int, int, double, double, vector<Vec4f>, vector<Vec4f>> a
* tuple containing the best matches for positive and negative angles
*/
std
::
tuple
<
int
,
int
,
double
,
double
,
vector
<
Vec4f
>
,
vector
<
Vec4f
>>
find_object
(
Mat
model
,
SceneObject
object
,
Mat
processing_area
)
{
Ptr
<
GeneralizedHoughGuil
>
alg
=
createGeneralizedHoughGuil
();
vector
<
Vec4f
>
positive_positions
,
negative_positions
;
Mat
votesPos
,
votesNeg
;
double
maxValPos
=
0
,
maxValNeg
=
0
;
int
indexPos
=
0
,
indexNeg
=
0
;
alg
->
setMinDist
(
object
.
minDist
);
alg
->
setLevels
(
360
);
alg
->
setDp
(
2
);
alg
->
setMaxBufferSize
(
1000
);
alg
->
setAngleStep
(
1
);
alg
->
setAngleThresh
(
object
.
threshold
.
angle
);
alg
->
setMinScale
(
0.9
);
alg
->
setMaxScale
(
1.1
);
alg
->
setScaleStep
(
0.01
);
alg
->
setScaleThresh
(
object
.
threshold
.
scale
);
alg
->
setPosThresh
(
object
.
threshold
.
pos
);
alg
->
setCannyLowThresh
(
150
);
// Old: 100
alg
->
setCannyHighThresh
(
240
);
// Old: 300
alg
->
setTemplate
(
model
);
utility
::
detectShape
(
alg
,
model
,
object
.
threshold
.
pos
,
positive_positions
,
votesPos
,
negative_positions
,
votesNeg
,
processing_area
);
for
(
int
i
=
0
;
i
<
votesPos
.
size
().
width
;
i
++
)
{
if
(
votesPos
.
at
<
int
>
(
i
)
>=
maxValPos
)
{
maxValPos
=
votesPos
.
at
<
int
>
(
i
);
indexPos
=
i
;
}
}
for
(
int
i
=
0
;
i
<
votesNeg
.
size
().
width
;
i
++
)
{
if
(
votesNeg
.
at
<
int
>
(
i
)
>=
maxValNeg
)
{
maxValNeg
=
votesNeg
.
at
<
int
>
(
i
);
indexNeg
=
i
;
}
}
return
{
indexPos
,
indexNeg
,
maxValPos
,
maxValNeg
,
positive_positions
,
negative_positions
};
}
/**
* @fn bool find_processing_areas(Mat my_frame)
* @brief Identifies the Regions Of Interest (ROIs) on the video,
...
...
@@ -277,172 +177,34 @@ std::tuple<int, int, double, double, vector<Vec4f>, vector<Vec4f>> find_object(M
* @return true if some areas have been detected;
* @return false otherwise.
*/
bool
find_processing_areas
(
Mat
my_frame
,
SceneObject
tape
,
SceneObject
capstan
)
{
/************************** READING HEAD DETECTION ***********************/
// Save a grayscale version of my_frame in myFrameGrayscale and downsample it
// in half pixels for performance reasons
Frame
gray_current_frame
=
Frame
(
my_frame
).
convertColor
(
COLOR_BGR2GRAY
);
Frame
halved_gray_current_frame
=
gray_current_frame
.
clone
().
downsample
(
2
);
// Get input shape in grayscale and downsample it in half pixels
Frame
reading_head_template
=
Frame
(
cv
::
imread
(
READING_HEAD_IMG
,
IMREAD_GRAYSCALE
)).
downsample
(
2
);
// Process only the bottom-central portion of the input video -> best
// results with our videos
Rect
readingHeadProcessingAreaRect
(
halved_gray_current_frame
.
cols
/
4
,
halved_gray_current_frame
.
rows
/
2
,
halved_gray_current_frame
.
cols
/
2
,
halved_gray_current_frame
.
rows
/
2
);
Mat
processingImage
=
halved_gray_current_frame
(
readingHeadProcessingAreaRect
);
RotatedRect
rectPos
,
rectNeg
;
auto
[
indexPos
,
indexNeg
,
maxValPos
,
maxValNeg
,
positionsPos
,
positionsNeg
]
=
find_object
(
reading_head_template
,
tape
,
processingImage
);
// The color is progressively darkened to emphasize that the algorithm found
// more than one shape
if
(
positionsPos
.
size
()
>
0
)
rectPos
=
utility
::
drawShapes
(
my_frame
,
positionsPos
[
indexPos
],
Scalar
(
0
,
0
,
255
-
indexPos
*
64
),
reading_head_template
.
cols
,
reading_head_template
.
rows
,
halved_gray_current_frame
.
cols
/
4
,
halved_gray_current_frame
.
rows
/
2
,
2
);
if
(
positionsNeg
.
size
()
>
0
)
rectNeg
=
utility
::
drawShapes
(
my_frame
,
positionsNeg
[
indexNeg
],
Scalar
(
128
,
128
,
255
-
indexNeg
*
64
),
reading_head_template
.
cols
,
reading_head_template
.
rows
,
halved_gray_current_frame
.
cols
/
4
,
halved_gray_current_frame
.
rows
/
2
,
2
);
if
(
maxValPos
>
0
)
if
(
maxValNeg
>
0
)
if
(
maxValPos
>
maxValNeg
)
{
rect
=
rectPos
;
}
else
{
rect
=
rectNeg
;
}
else
{
rect
=
rectPos
;
}
else
if
(
maxValNeg
>
0
)
{
rect
=
rectNeg
;
}
else
{
bool
find_processing_areas
(
Frame
frame
,
SceneObject
tape
,
SceneObject
capstan
)
{
va
::
detection
::
SceneElement
tape_element
{
va
::
detection
::
ElementType
::
TAPE
,
tape
.
minDist
,
{
tape
.
threshold
.
percentual
,
tape
.
threshold
.
angle
,
tape
.
threshold
.
scale
,
tape
.
threshold
.
pos
}};
auto
tape_roi_result
=
va
::
detection
::
find_roi
(
frame
,
va
::
detection
::
Algorithm
::
GHT
,
tape_element
);
if
(
std
::
holds_alternative
<
va
::
Error
>
(
tape_roi_result
))
{
pprint
(
std
::
get
<
va
::
Error
>
(
tape_roi_result
),
RED
);
return
false
;
}
g_rect_tape
=
std
::
get
<
va
::
detection
::
Roi
>
(
tape_roi_result
);
/************************************ TAPE AREA DETECTION ****************/
// Compute area basing on reading head detection
Vec4f
tape_position
(
rect
.
center
.
x
,
rect
.
center
.
y
+
rect
.
size
.
height
/
2
+
20
*
(
rect
.
size
.
width
/
200
),
1
,
rect
.
angle
);
g_rect_tape
=
utility
::
drawShapes
(
my_frame
,
tape_position
,
Scalar
(
0
,
255
-
indexPos
*
64
,
0
),
rect
.
size
.
width
,
50
*
(
rect
.
size
.
width
/
200
),
0
,
0
,
1
);
/************************************* CAPSTAN DETECTION ******************/
// Read template image - it is smaller than before, therefore there is no
// need to downsample
Mat
capstan_template
=
cv
::
imread
(
CAPSTAN_TEMPLATE_IMG
,
IMREAD_GRAYSCALE
);
if
(
g_use_surf
)
{
// Step 1: Detect the keypoints using SURF Detector, compute the
// descriptors
int
min_hessian
=
100
;
Ptr
<
xfeatures2d
::
SURF
>
detector
=
xfeatures2d
::
SURF
::
create
(
min_hessian
);
vector
<
KeyPoint
>
keypoints_object
,
keypoints_scene
;
Mat
descriptors_object
,
descriptors_scene
;
detector
->
detectAndCompute
(
capstan_template
,
noArray
(),
keypoints_object
,
descriptors_object
);
detector
->
detectAndCompute
(
gray_current_frame
,
noArray
(),
keypoints_scene
,
descriptors_scene
);
// Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr
<
DescriptorMatcher
>
matcher
=
DescriptorMatcher
::
create
(
DescriptorMatcher
::
FLANNBASED
);
vector
<
vector
<
DMatch
>>
knn_matches
;
matcher
->
knnMatch
(
descriptors_object
,
descriptors_scene
,
knn_matches
,
2
);
//-- Filter matches using the Lowe's ratio test
const
float
RATIO_THRESH
=
0.75
f
;
vector
<
DMatch
>
good_matches
;
for
(
size_t
i
=
0
;
i
<
knn_matches
.
size
();
i
++
)
{
if
(
knn_matches
[
i
][
0
].
distance
<
RATIO_THRESH
*
knn_matches
[
i
][
1
].
distance
)
{
good_matches
.
push_back
(
knn_matches
[
i
][
0
]);
}
}
// Draw matches
Mat
img_matches
;
cv
::
drawMatches
(
capstan_template
,
keypoints_object
,
halved_gray_current_frame
,
keypoints_scene
,
good_matches
,
img_matches
,
Scalar
::
all
(
-
1
),
Scalar
::
all
(
-
1
),
vector
<
char
>
(),
DrawMatchesFlags
::
NOT_DRAW_SINGLE_POINTS
);
// Localize the object
vector
<
Point2f
>
obj
;
vector
<
Point2f
>
scene
;
for
(
size_t
i
=
0
;
i
<
good_matches
.
size
();
i
++
)
{
// Get the keypoints from the good matches
obj
.
push_back
(
keypoints_object
[
good_matches
[
i
].
queryIdx
].
pt
);
scene
.
push_back
(
keypoints_scene
[
good_matches
[
i
].
trainIdx
].
pt
);
}
Mat
H
=
cv
::
findHomography
(
obj
,
scene
,
RANSAC
);
// Get the corners from the image_1 ( the object to be "detected" )
vector
<
Point2f
>
obj_corners
(
4
);
obj_corners
[
0
]
=
Point2f
(
0
,
0
);
obj_corners
[
1
]
=
Point2f
((
float
)
capstan_template
.
cols
,
0
);
obj_corners
[
2
]
=
Point2f
((
float
)
capstan_template
.
cols
,
(
float
)
capstan_template
.
rows
);
obj_corners
[
3
]
=
Point2f
(
0
,
(
float
)
capstan_template
.
rows
);
vector
<
Point2f
>
scene_corners
(
4
);
cv
::
perspectiveTransform
(
obj_corners
,
scene_corners
,
H
);
// Find average
float
capstanX
=
(
scene_corners
[
0
].
x
+
scene_corners
[
1
].
x
+
scene_corners
[
2
].
x
+
scene_corners
[
3
].
x
)
/
4
;
float
capstanY
=
(
scene_corners
[
0
].
y
+
scene_corners
[
1
].
y
+
scene_corners
[
2
].
y
+
scene_corners
[
3
].
y
)
/
4
;
// In the following there are two alterations to cut the first 20
// horizontal pixels and the first 90 vertical pixels from the found
// rectangle: +10 in X for centering and -20 in width +45 in Y for
// centering and -90 in height
Vec4f
positionCapstan
(
capstanX
+
10
,
capstanY
+
45
,
1
,
0
);
g_rect_capstan
=
utility
::
drawShapes
(
my_frame
,
positionCapstan
,
Scalar
(
255
-
indexPos
*
64
,
0
,
0
),
capstan_template
.
cols
-
20
,
capstan_template
.
rows
-
90
,
0
,
0
,
1
);
}
else
{
// Process only right portion of the image, where the capstain always
// appears
int
capstanProcessingAreaRectX
=
my_frame
.
cols
*
3
/
4
;
int
capstanProcessingAreaRectY
=
my_frame
.
rows
/
2
;
int
capstanProcessingAreaRectWidth
=
my_frame
.
cols
/
4
;
int
capstanProcessingAreaRectHeight
=
my_frame
.
rows
/
2
;
Rect
capstanProcessingAreaRect
(
capstanProcessingAreaRectX
,
capstanProcessingAreaRectY
,
capstanProcessingAreaRectWidth
,
capstanProcessingAreaRectHeight
);
Mat
capstanProcessingAreaGrayscale
=
gray_current_frame
(
capstanProcessingAreaRect
);
// Reset algorithm and set parameters
auto
[
indexPos
,
indexNeg
,
maxValPos
,
maxValNeg
,
positionsC1Pos
,
positionsC1Neg
]
=
find_object
(
capstan_template
,
capstan
,
capstanProcessingAreaGrayscale
);
RotatedRect
rectCapstanPos
,
rectCapstanNeg
;
if
(
positionsC1Pos
.
size
()
>
0
)
rectCapstanPos
=
utility
::
drawShapes
(
my_frame
,
positionsC1Pos
[
indexPos
],
Scalar
(
255
-
indexPos
*
64
,
0
,
0
),
capstan_template
.
cols
-
22
,
capstan_template
.
rows
-
92
,
capstanProcessingAreaRectX
+
11
,
capstanProcessingAreaRectY
+
46
,
1
);
if
(
positionsC1Neg
.
size
()
>
0
)
rectCapstanNeg
=
utility
::
drawShapes
(
my_frame
,
positionsC1Neg
[
indexNeg
],
Scalar
(
255
-
indexNeg
*
64
,
128
,
0
),
capstan_template
.
cols
-
22
,
capstan_template
.
rows
-
92
,
capstanProcessingAreaRectX
+
11
,
capstanProcessingAreaRectY
+
46
,
1
);
if
(
maxValPos
>
0
)
if
(
maxValNeg
>
0
)
if
(
maxValPos
>
maxValNeg
)
{
g_rect_capstan
=
rectCapstanPos
;
}
else
{
g_rect_capstan
=
rectCapstanNeg
;
}
else
{
g_rect_capstan
=
rectCapstanPos
;
}
else
if
(
maxValNeg
>
0
)
{
g_rect_capstan
=
rectCapstanNeg
;
}
else
{
return
false
;
}
cv
::
rectangle
(
frame
,
g_rect_tape
.
boundingRect
(),
cv
::
Scalar
(
0
,
255
,
0
),
2
);
va
::
detection
::
SceneElement
capstan_element
{
va
::
detection
::
ElementType
::
CAPSTAN
,
capstan
.
minDist
,
{
capstan
.
threshold
.
percentual
,
capstan
.
threshold
.
angle
,
capstan
.
threshold
.
scale
,
capstan
.
threshold
.
pos
}};
auto
capstan_roi_result
=
va
::
detection
::
find_roi
(
frame
,
va
::
detection
::
Algorithm
::
SURF
,
capstan_element
);
if
(
std
::
holds_alternative
<
va
::
Error
>
(
capstan_roi_result
))
{
pprint
(
std
::
get
<
va
::
Error
>
(
capstan_roi_result
),
RED
);
return
false
;
}
g_rect_capstan
=
std
::
get
<
va
::
detection
::
Roi
>
(
capstan_roi_result
);
// Save the image containing the ROIs
cv
::
imwrite
(
g_output_path
.
string
()
+
"/tape_areas.jpg"
,
my_
frame
);
cv
::
rectangle
(
frame
,
g_rect_capstan
.
boundingRect
(),
cv
::
Scalar
(
255
,
0
,
0
),
2
);
cv
::
imwrite
(
g_output_path
.
string
()
+
"/
my_
tape_areas.jpg"
,
frame
);
return
true
;
}
...
...
@@ -461,7 +223,7 @@ RotatedRect check_skew(RotatedRect roi) {
float
angle
=
roi
.
angle
;
if
(
roi
.
angle
<
-
45.
)
{
angle
+=
90.0
;
swap
(
rect_size
.
width
,
rect_size
.
height
);
std
::
swap
(
rect_size
.
width
,
rect_size
.
height
);
}
return
RotatedRect
(
roi
.
center
,
rect_size
,
angle
);
}
...
...
@@ -478,7 +240,7 @@ RotatedRect check_skew(RotatedRect roi) {
* @return Frame the difference matrix between the two frames
*/
Frame
get_difference_for_roi
(
Frame
previous
,
Frame
current
,
RotatedRect
roi
)
{
cv
::
Mat
rotation_matrix
=
getRotationMatrix2D
(
roi
.
center
,
roi
.
angle
,
1.0
);
cv
::
Mat
rotation_matrix
=
cv
::
getRotationMatrix2D
(
roi
.
center
,
roi
.
angle
,
1.0
);
return
previous
.
warp
(
rotation_matrix
)
.
crop
(
roi
.
size
,
roi
.
center
)
...
...
@@ -534,7 +296,7 @@ bool is_frame_different(cv::Mat prev_frame, cv::Mat current_frame, int ms_to_end
RotatedRect
corrected_tape_roi
=
check_skew
(
g_rect_tape
);
Frame
cropped_current_frame
=
Frame
(
current_frame
)
.
warp
(
getRotationMatrix2D
(
corrected_tape_roi
.
center
,
corrected_tape_roi
.
angle
,
1.0
))
.
warp
(
cv
::
getRotationMatrix2D
(
corrected_tape_roi
.
center
,
corrected_tape_roi
.
angle
,
1.0
))
.
crop
(
corrected_tape_roi
.
size
,
corrected_tape_roi
.
center
);
Frame
difference_frame
=
get_difference_for_roi
(
Frame
(
prev_frame
),
Frame
(
current_frame
),
corrected_tape_roi
);
...
...
@@ -634,11 +396,11 @@ void processing(cv::VideoCapture video_capture, SceneObject capstan, SceneObject
int
sec_to_end
=
ms_to_end
/
1000
;
int
min_to_end
=
(
sec_to_end
/
60
)
%
60
;
sec_to_end
=
sec_to_end
%
60
;
string
sec
StrToE
nd
=
(
sec_to_end
<
10
?
"0"
:
""
)
+
to_string
(
sec_to_end
);
string
min
StrToE
nd
=
(
min_to_end
<
10
?
"0"
:
""
)
+
to_string
(
min_to_end
);
string
sec
_str_to_e
nd
=
(
sec_to_end
<
10
?
"0"
:
""
)
+
to_string
(
sec_to_end
);
string
min
_str_to_e
nd
=
(
min_to_end
<
10
?
"0"
:
""
)
+
to_string
(
min_to_end
);
std
::
cout
<<
"
\r
Irregularities: "
<<
num_saved_frames
<<
". "
;
std
::
cout
<<
"Remaining video time [mm:ss]: "
<<
min
StrToE
nd
<<
":"
<<
sec
StrToE
nd
<<
flush
;
std
::
cout
<<
"Remaining video time [mm:ss]: "
<<
min
_str_to_e
nd
<<
":"
<<
sec
_str_to_e
nd
<<
flush
;
irregularity_found
=
is_frame_different
(
prev_frame
,
frame
,
ms_to_end
,
capstan
,
tape
,
args
);
if
(
irregularity_found
)
{
...
...
@@ -680,23 +442,26 @@ void processing(cv::VideoCapture video_capture, SceneObject capstan, SceneObject
* @return int program status.
*/
int
main
(
int
argc
,
char
**
argv
)
{
const
string
CONFIG_FILE
=
"config/config.json"
;
const
string
A_IRREG_FILE_1
=
"AudioAnalyser_IrregularityFileOutput1.json"
;
const
string
V_IRREG_FILE_1
=
"VideoAnalyser_IrregularityFileOutput1.json"
;
const
string
V_IRREG_FILE_2
=
"VideoAnalyser_IrregularityFileOutput2.json"
;
SceneObject
capstan
=
SceneObject
::
from_file
(
CONFIG_FILE
,
ROI
::
CAPSTAN
);
SceneObject
tape
=
SceneObject
::
from_file
(
CONFIG_FILE
,
ROI
::
TAPE
);
Args
args
=
argc
>
1
?
Args
::
from_cli
(
argc
,
argv
)
:
Args
::
from_file
(
CONFIG_FILE
);
const
fs
::
path
VIDEO_PATH
=
args
.
workingPath
/
"PreservationAudioVisualFile"
/
args
.
filesName
;
const
fs
::
path
VIDEO_PATH
=
args
.
working_path
/
"PreservationAudioVisualFile"
/
args
.
files_name
;
const
auto
[
FILE_NAME
,
FILE_FORMAT
]
=
files
::
get_filename_and_extension
(
VIDEO_PATH
);
const
fs
::
path
AUDIO_IRR_FILE_PATH
=
args
.
working
P
ath
/
"temp"
/
FILE_NAME
/
A_IRREG_FILE_1
;
const
fs
::
path
AUDIO_IRR_FILE_PATH
=
args
.
working
_p
ath
/
"temp"
/
FILE_NAME
/
A_IRREG_FILE_1
;
std
::
cout
<<
"Video to be analysed: "
<<
endl
;
std
::
cout
<<
"
\t
File name: "
<<
FILE_NAME
<<
endl
;
std
::
cout
<<
"
\t
Extension: "
<<
FILE_FORMAT
<<
endl
;
if
(
FILE_FORMAT
.
compare
(
"avi"
)
!=
0
&&
FILE_FORMAT
.
compare
(
"mp4"
)
!=
0
&&
FILE_FORMAT
.
compare
(
"mov"
)
!=
0
)
print_error_and_exit
(
"Input error
"
,
"
The input file must be an AVI, MP4 or MOV file."
);
print_error_and_exit
(
"Input error
:
The input file must be an AVI, MP4 or MOV file."
);
ifstream
iJSON
(
AUDIO_IRR_FILE_PATH
);
if
(
iJSON
.
fail
())
print_error_and_exit
(
"config.json error"
,
AUDIO_IRR_FILE_PATH
.
string
()
+
" cannot be found or opened."
);
print_error_and_exit
(
"config.json error"
+
AUDIO_IRR_FILE_PATH
.
string
()
+
" cannot be found or opened."
);
json
audio_irr_file
;
iJSON
>>
audio_irr_file
;
...
...
@@ -707,14 +472,14 @@ int main(int argc, char** argv) {
}
else
if
(
!
args
.
brands
)
tape
.
threshold
.
percentual
+=
21
;
g_output_path
=
args
.
working
P
ath
/
"temp"
/
FILE_NAME
;
g_output_path
=
args
.
working
_p
ath
/
"temp"
/
FILE_NAME
;
fs
::
create_directory
(
g_output_path
);
g_irregularity_images_path
=
g_output_path
/
"IrregularityImages"
;
fs
::
create_directory
(
g_irregularity_images_path
);
cv
::
VideoCapture
video_capture
(
VIDEO_PATH
);
// Open video file
if
(
!
video_capture
.
isOpened
())
print_error_and_exit
(
"Video error
"
,
"
Video file cannot be opened."
);
if
(
!
video_capture
.
isOpened
())
print_error_and_exit
(
"Video error
:
Video file cannot be opened."
);
video_capture
.
set
(
CAP_PROP_POS_FRAMES
,
video_capture
.
get
(
CAP_PROP_FRAME_COUNT
)
/
2
);
// Set frame position to half video length
...
...
@@ -724,7 +489,7 @@ int main(int argc, char** argv) {
std
::
cout
<<
"
\t
Resolution: "
<<
middle_frame
.
cols
<<
"x"
<<
middle_frame
.
rows
<<
"
\n\n
"
;
bool
found
=
find_processing_areas
(
middle_frame
,
tape
,
capstan
);
if
(
!
found
)
print_error_and_exit
(
"Processing area not found
"
,
"
Try changing JSON parameters."
);
if
(
!
found
)
print_error_and_exit
(
"Processing area not found
:
Try changing JSON parameters."
);
pprint
(
"Processing..."
,
CYAN
);
processing
(
video_capture
,
capstan
,
tape
,
args
);
...
...
@@ -732,7 +497,7 @@ int main(int argc, char** argv) {
files
::
save_file
(
g_output_path
/
V_IRREG_FILE_1
,
g_irregularity_file_1
.
dump
(
4
));
// Irregularities to extract for the AudioAnalyser and to the TapeIrregularityClassifier
extract
I
rregularity
I
mages
ForA
udio
(
g_output_path
,
VIDEO_PATH
,
audio_irr_file
,
g_irregularity_file_2
);
extract
_i
rregularity
_i
mages
_for_a
udio
(
g_output_path
,
VIDEO_PATH
,
audio_irr_file
,
g_irregularity_file_2
);
files
::
save_file
(
g_output_path
/
V_IRREG_FILE_2
,
g_irregularity_file_2
.
dump
(
4
));
return
EXIT_SUCCESS
;
...
...
src/utility.cpp
View file @
8f5a80ed
#include
"utility.h"
#include
"utility.h
pp
"
#include
<filesystem>
#include
<fstream>
...
...
@@ -10,134 +10,88 @@ namespace fs = std::filesystem;
using
json
=
nlohmann
::
json
;
// Constructors
utility
::
Frame
::
Frame
()
:
Mat
()
{}
utility
::
Frame
::
Frame
(
const
Mat
&
m
)
:
Mat
(
m
)
{}
utility
::
Frame
::
Frame
(
const
Frame
&
f
)
:
Mat
(
f
)
{}
// Operators
utility
::
Frame
&
utility
::
Frame
::
operator
=
(
const
Mat
&
m
)
{
Mat
::
operator
=
(
m
);
return
*
this
;
}
utility
::
Frame
&
utility
::
Frame
::
operator
=
(
const
Frame
&
f
)
{
Mat
::
operator
=
(
f
);
return
*
this
;
}
// Methods
utility
::
Frame
utility
::
Frame
::
clone
()
const
{
return
utility
::
Frame
(
Mat
::
clone
());
}
utility
::
Frame
&
utility
::
Frame
::
downsample
(
int
factor
)
{
pyrDown
(
*
this
,
*
this
,
Size
(
size
().
width
/
factor
,
size
().
height
/
factor
));
return
*
this
;
}
utility
::
Frame
&
utility
::
Frame
::
convertColor
(
int
code
)
{
cvtColor
(
*
this
,
*
this
,
code
);
return
*
this
;
}
utility
::
Frame
utility
::
Frame
::
difference
(
Frame
&
f
)
{
return
Frame
(
utility
::
difference
(
*
this
,
f
));
}
utility
::
Frame
&
utility
::
Frame
::
crop
(
Size
rect_size
,
Point2f
center
)
{
cv
::
getRectSubPix
(
*
this
,
rect_size
,
center
,
*
this
);
return
*
this
;
}
utility
::
Frame
&
utility
::
Frame
::
warp
(
cv
::
Mat
rotationMatrix
)
{
cv
::
warpAffine
(
*
this
,
*
this
,
rotationMatrix
,
this
->
size
(),
INTER_CUBIC
);
return
*
this
;
}
pair
<
utility
::
Frame
,
utility
::
Frame
>
utility
::
Frame
::
deinterlace
()
const
{
Frame
odd_frame
(
cv
::
Mat
(
this
->
rows
/
2
,
this
->
cols
,
CV_8UC3
));
Frame
even_frame
(
cv
::
Mat
(
this
->
rows
/
2
,
this
->
cols
,
CV_8UC3
));
utility
::
separateFrame
(
*
this
,
odd_frame
,
even_frame
);
void
utility
::
detect_shape
(
Ptr
<
GeneralizedHoughGuil
>
alg
,
int
pos_thresh
,
vector
<
Vec4f
>&
positive_positions
,
Mat
&
positive_votes
,
vector
<
Vec4f
>&
negative_positions
,
Mat
&
negative_votes
,
Mat
processing_area
)
{
alg
->
setPosThresh
(
pos_thresh
);
return
make_pair
(
odd_frame
,
even_frame
);
}
void
utility
::
detectShape
(
Ptr
<
GeneralizedHoughGuil
>
alg
,
Mat
templateShape
,
int
posThresh
,
vector
<
Vec4f
>&
positivePositions
,
Mat
&
positiveVotes
,
vector
<
Vec4f
>&
negativePositions
,
Mat
&
negativeVotes
,
Mat
processingArea
)
{
alg
->
setPosThresh
(
posThresh
);
alg
->
setTemplate
(
templateShape
);
int
oldSizePositive
=
0
;
int
i
=
0
;
int
maxVote
=
0
;
int
num_prev_matches
=
0
;
int
threshold_increment
=
0
;
int
max_match_score
=
0
;
// Process shapes with positive angles
alg
->
setMinAngle
(
0
);
alg
->
setMaxAngle
(
3
);
while
(
true
)
{
alg
->
detect
(
processing
A
rea
,
positive
P
ositions
,
positive
V
otes
);
int
current
Size
=
positive
P
ositions
.
size
();
if
(
current
Size
==
1
)
{
alg
->
detect
(
processing
_a
rea
,
positive
_p
ositions
,
positive
_v
otes
);
int
current
_matches
=
positive
_p
ositions
.
size
();
if
(
current
_matches
==
1
||
(
current_matches
==
0
&&
num_prev_matches
==
0
)
)
{
// We detected the most interesting shape
// Impossible to find with these parameters
break
;
}
else
if
(
current
Size
==
0
&&
oldSizePositive
>
0
)
{
}
else
if
(
current
_matches
==
0
&&
num_prev_matches
>
0
)
{
// It is not possible to detect only one shape with the current
// parameters
alg
->
setPosThresh
(
posThresh
+
i
-
1
);
// Decrease position value
alg
->
detect
(
processingArea
,
positivePositions
,
positiveVotes
);
// Detect all available shapes
break
;
}
else
if
(
currentSize
==
0
&&
oldSizePositive
==
0
)
{
// Impossible to find with these parameters
alg
->
setPosThresh
(
pos_thresh
+
threshold_increment
-
1
);
// Decrease position value
alg
->
detect
(
processing_area
,
positive_positions
,
positive_votes
);
// Detect all available shapes
break
;
}
oldSizePositive
=
current
Size
;
num_prev_matches
=
current
_matches
;
// Find maximum vote
for
(
int
j
=
0
;
j
<
positive
V
otes
.
cols
/
3
;
j
++
)
{
if
(
positive
V
otes
.
at
<
int
>
(
3
*
j
)
>
max
Vote
)
maxVot
e
=
positive
V
otes
.
at
<
int
>
(
3
*
j
);
for
(
int
j
=
0
;
j
<
positive
_v
otes
.
cols
/
3
;
j
++
)
{
if
(
positive
_v
otes
.
at
<
int
>
(
3
*
j
)
>
max
_match_score
)
max_match_scor
e
=
positive
_v
otes
.
at
<
int
>
(
3
*
j
);
}
if
(
current
Size
>
10
)
{
i
+=
5
;
// To speed up computation when there are too many matches
}
else
if
(
max
Vot
e
-
(
pos
T
hresh
+
i
)
>
100
)
{
i
+=
100
;
// To speed up computation when there are few super high
// matches
if
(
current
_matches
>
10
)
{
threshold_increment
+=
5
;
// To speed up computation when there are too many matches
}
else
if
(
max
_match_scor
e
-
(
pos
_t
hresh
+
threshold_increment
)
>
100
)
{
threshold_increment
+=
100
;
// To speed up computation when there are few super high
// matches
}
else
{
i
++
;
threshold_increment
++
;
}
alg
->
setPosThresh
(
pos
T
hresh
+
i
);
alg
->
setPosThresh
(
pos
_t
hresh
+
threshold_increment
);
}
int
oldSizeNegative
=
0
;
// Reset incremental position value
i
=
0
;
maxVote
=
0
;
threshold_increment
=
0
;
num_prev_matches
=
0
;
max_match_score
=
0
;
// Process shapes with negative angles
alg
->
setMinAngle
(
357
);
alg
->
setMaxAngle
(
360
);
while
(
true
)
{
alg
->
detect
(
processing
A
rea
,
negative
P
ositions
,
negative
V
otes
);
int
current
Size
=
negative
P
ositions
.
size
();
if
(
current
Size
==
1
)
{
alg
->
detect
(
processing
_a
rea
,
negative
_p
ositions
,
negative
_v
otes
);
int
current
_matches
=
negative
_p
ositions
.
size
();
if
(
current
_matches
==
1
||
(
current_matches
==
0
&&
num_prev_matches
==
0
)
)
{
// We detected the most interesting shape
// Impossible to found with these parameters
break
;
}
else
if
(
current
Size
==
0
&&
oldSizeNegative
>
0
)
{
}
else
if
(
current
_matches
==
0
&&
num_prev_matches
>
0
)
{
// It is not possible to detect only one shape with the current
// parameters
alg
->
setPosThresh
(
posThresh
+
i
-
1
);
// Decrease position value
alg
->
detect
(
processingArea
,
negativePositions
,
negativeVotes
);
// Detect all available shapes
break
;
}
else
if
(
currentSize
==
0
&&
oldSizeNegative
==
0
)
{
// Impossible to found with these parameters
alg
->
setPosThresh
(
pos_thresh
+
threshold_increment
-
1
);
// Decrease position value
alg
->
detect
(
processing_area
,
negative_positions
,
negative_votes
);
// Detect all available shapes
break
;
}
oldSizeNegative
=
current
Size
;
num_prev_matches
=
current
_matches
;
// Find maximum vote
for
(
int
j
=
0
;
j
<
positive
V
otes
.
cols
/
3
;
j
++
)
{
if
(
positive
V
otes
.
at
<
int
>
(
3
*
j
)
>
max
Vote
)
maxVot
e
=
positive
V
otes
.
at
<
int
>
(
3
*
j
);
for
(
int
j
=
0
;
j
<
positive
_v
otes
.
cols
/
3
;
j
++
)
{
if
(
positive
_v
otes
.
at
<
int
>
(
3
*
j
)
>
max
_match_score
)
max_match_scor
e
=
positive
_v
otes
.
at
<
int
>
(
3
*
j
);
}
if
(
current
Size
>
10
)
{
i
+=
5
;
// To speed up computation when there are too many matches
}
else
if
(
max
Vot
e
-
(
pos
T
hresh
+
i
)
>
100
)
{
i
+=
100
;
// To speed up computation when there are few super high
// matches
if
(
current
_matches
>
10
)
{
threshold_increment
+=
5
;
// To speed up computation when there are too many matches
}
else
if
(
max
_match_scor
e
-
(
pos
_t
hresh
+
threshold_increment
)
>
100
)
{
threshold_increment
+=
100
;
// To speed up computation when there are few super high
// matches
}
else
{
i
++
;
threshold_increment
++
;
}
alg
->
setPosThresh
(
pos
T
hresh
+
i
);
alg
->
setPosThresh
(
pos
_t
hresh
+
threshold_increment
);
}
}
...
...
@@ -164,51 +118,6 @@ RotatedRect utility::drawShapes(Mat frame, const Vec4f& positions, Scalar color,
return
rr
;
}
void
utility
::
separateFrame
(
const
cv
::
Mat
frame
,
cv
::
Mat
&
odd_frame
,
cv
::
Mat
&
even_frame
)
{
int
i_odd_frame
=
0
;
int
i_even_frame
=
0
;
for
(
int
i
=
0
;
i
<
frame
.
rows
;
i
++
)
{
for
(
int
j
=
0
;
j
<
frame
.
cols
;
j
++
)
{
if
(
i
%
2
==
0
)
{
even_frame
.
at
<
cv
::
Vec3b
>
(
i_even_frame
,
j
)[
0
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
];
even_frame
.
at
<
cv
::
Vec3b
>
(
i_even_frame
,
j
)[
1
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
1
];
even_frame
.
at
<
cv
::
Vec3b
>
(
i_even_frame
,
j
)[
2
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
2
];
}
else
{
odd_frame
.
at
<
cv
::
Vec3b
>
(
i_odd_frame
,
j
)[
0
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
];
odd_frame
.
at
<
cv
::
Vec3b
>
(
i_odd_frame
,
j
)[
1
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
1
];
odd_frame
.
at
<
cv
::
Vec3b
>
(
i_odd_frame
,
j
)[
2
]
=
frame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
2
];
}
}
if
(
i
%
2
==
0
)
{
i_even_frame
++
;
}
else
{
i_odd_frame
++
;
}
}
return
;
}
cv
::
Mat
utility
::
difference
(
cv
::
Mat
&
prevFrame
,
cv
::
Mat
&
currentFrame
)
{
cv
::
Mat
diff
=
currentFrame
.
clone
();
for
(
int
i
=
0
;
i
<
currentFrame
.
rows
;
i
++
)
{
for
(
int
j
=
0
;
j
<
currentFrame
.
cols
;
j
++
)
{
if
(
prevFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
]
!=
currentFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
]
||
prevFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
1
]
!=
currentFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
1
]
||
prevFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
2
]
!=
currentFrame
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
2
])
{
// Different pixels
diff
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
]
=
0
;
}
else
{
// Identical pixels
diff
.
at
<
cv
::
Vec3b
>
(
i
,
j
)[
0
]
=
255
;
}
}
}
return
diff
;
}
Threshold
::
Threshold
(
float
percentual
,
int
angle
,
int
scale
,
int
pos
)
{
if
(
percentual
<
0
||
percentual
>
100
)
throw
std
::
invalid_argument
(
"Percentual must be between 0 and 100"
);
...
...
src/utility.h
→
src/utility.h
pp
View file @
8f5a80ed
...
...
@@ -13,87 +13,31 @@ namespace fs = std::filesystem;
/**
* @brief Namespace containing a set of utility functions used in the project.
* The functions are mainly used to perform operations on images.
*
*/
namespace
utility
{
/**
* @class Frame
* @brief Class that extends the OpenCV Mat class, adding some useful methods
* frequently used in the project.
*
*/
class
Frame
:
public
Mat
{
public:
Frame
();
Frame
(
const
Mat
&
m
);
Frame
(
const
Frame
&
f
);
Frame
&
operator
=
(
const
Mat
&
m
);
Frame
&
operator
=
(
const
Frame
&
f
);
Frame
clone
()
const
;
/**
* @brief Downsample the image by a given factor.
*
* @param factor The factor by which the image will be downsampled.
* @return Frame& The downsampled image.
*/
Frame
&
downsample
(
int
factor
);
/**
* @brief Convert the image to a given color space.
*
* @param code The code of the color space to which the image will be
* converted.
* @return Frame& The converted image.
*/
Frame
&
convertColor
(
int
code
);
Frame
difference
(
Frame
&
f
);
/**
* @brief Crop the image to a given size, centered in a given point.
*
* @param rect_size The size of the cropped image.
* @param center The center of the cropped image.
* @return Frame& The cropped image.
*/
Frame
&
crop
(
Size
rect_size
,
Point2f
center
);
/**
* @brief Warp the image using a given rotation matrix.
*
* @param rotationMatrix The rotation matrix used to warp the image.
* @return Frame& The warped image.
*/
Frame
&
warp
(
cv
::
Mat
rotationMatrix
);
/**
* @brief Deinterlace the image, returning two images, one containing the
* odd lines and the other containing the even lines.
*
* @return std::pair<Frame, Frame> The two images containing the odd and
* even lines.
*/
std
::
pair
<
Frame
,
Frame
>
deinterlace
()
const
;
};
/**
* @fn void detectShape(Ptr<GeneralizedHoughGuil> alg, Mat templateShape, int
* posThresh, vector<Vec4f> &positivePositions, Mat &positiveVotes,
* vector<Vec4f> &negativePositions, Mat &negativeVotes, Mat processingArea)
* @brief Detects a given shape in an image, using a the OpenCV algorithm
* @fn void detect_shape(Ptr<GeneralizedHoughGuil> alg, int
* pos_thresh, vector<Vec4f> &positive_positions, Mat &positive_votes,
* vector<Vec4f> &negative_positions, Mat &negative_votes, Mat processing_area)
* @brief Detects a shape in an image, using a the OpenCV algorithm
* GeneralizedHoughGuil.
*
* @param[in] alg the algorithm instance;
* @param[in]
templateShape the shape to detect;
*
@param[in] posThresh the position votes threshol
d;
* @param[out] positive
P
ositions vector representing the position assigned to
* @param[in]
pos_thresh the position votes threshold, which determines the minimum number of votes required to consider
*
a detection vali
d;
* @param[out] positive
_p
ositions vector representing the position assigned to
* each found rectangle for positive angles;
* @param[out] positive
V
otes vector representing the vote assigned to each found
* @param[out] positive
_v
otes vector representing the vote assigned to each found
* rectangle for positive angles;
* @param[out] negative
P
ositions vector representing the position assigned to
* @param[out] negative
_p
ositions vector representing the position assigned to
* each found rectangle for negative angles;
* @param[out] negative
V
otes vector representing the vote assigned to each found
* @param[out] negative
_v
otes vector representing the vote assigned to each found
* rectangle for negative angles;
* @param[in] processing
A
rea the image to be processed.
* @param[in] processing
_a
rea the image to be processed.
*/
void
detect
S
hape
(
Ptr
<
GeneralizedHoughGuil
>
alg
,
Mat
templateShape
,
int
pos
T
hresh
,
vector
<
Vec4f
>&
positive
P
ositions
,
Mat
&
positiveVotes
,
vector
<
Vec4f
>&
negative
P
ositions
,
Mat
&
negative
V
otes
,
Mat
processing
A
rea
);
void
detect
_s
hape
(
Ptr
<
GeneralizedHoughGuil
>
alg
,
int
pos
_t
hresh
,
vector
<
Vec4f
>&
positive
_p
ositions
,
Mat
&
positive_votes
,
vector
<
Vec4f
>&
negative
_p
ositions
,
Mat
&
negative
_v
otes
,
Mat
processing
_a
rea
);
/**
* @fn RotatedRect drawShapes(Mat frame, Vec4f &positions, Scalar color, int
...
...
@@ -112,29 +56,6 @@ void detectShape(Ptr<GeneralizedHoughGuil> alg, Mat templateShape, int posThresh
*/
RotatedRect
drawShapes
(
Mat
frame
,
const
Vec4f
&
positions
,
Scalar
color
,
int
width
,
int
height
,
int
offsetX
,
int
offsetY
,
float
processingScale
);
/**
* @fn void separateFrame(cv::Mat frame, cv::Mat &odd_frame, cv::Mat
* &even_frame)
* @brief Function to deinterlace the current image.
*
* @param[in] frame image to be processed;
* @param[out] odd_frame odd plane;
* @param[out] even_frame even plane.
*/
void
separateFrame
(
const
cv
::
Mat
frame
,
cv
::
Mat
&
odd_frame
,
cv
::
Mat
&
even_frame
);
/**
* @fn void separateFrame(cv::Mat frame, cv::Mat &odd_frame, cv::Mat
* &even_frame)
* @brief Compute the number of different pixels between two frames.
*
* @param prevFrame the first frame;
* @param currentFrame the second frame.
* @return cv::Mat A black and white frame, where black pixels represent a
* difference, while white pixels represent an equality.
*/
cv
::
Mat
difference
(
cv
::
Mat
&
prevFrame
,
cv
::
Mat
&
currentFrame
);
}
// namespace utility
/**
...
...
@@ -164,7 +85,7 @@ struct Threshold {
* @brief Enum containing the possible objects to detect.
*
*/
enum
ROI
{
TAPE
,
CAPSTAN
};
enum
class
ROI
{
TAPE
,
CAPSTAN
};
/**
* @struct SceneObject
...
...
Prev
1
2
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment