Hello,

I'm trying to determine where a small matrix would fit inside a big matrix.. But, the small matrix is not in the big matrix so I need to see how simular they are.. The one with the lowest value, is the best fit..

So my difference function is this:

``````float difference(int row, int col, vector<double> &matrix2, vector<double>& background)
{
int matrix2_width = 2;
int matrix2_height = 2;

int background_width = 4;
int background_hight = 4;

int i=0;
int j=0;
float diffSum = 0;
float percentDiff = 0;

if(col+matrix2_width >= background_width || row+matrix2_height >= background_hight)
return 1;

for(int i=0; (i < matrix2_width); i++)
{
for(int j=0; (j < matrix2_height); j++)
{
int sum1 = background[(row+j)*background_width+(col+i)];
int sum2 = matrix2[j*matrix2_width+i];
diffSum += abs(sum1 - sum2);
}
}
percentDiff = diffSum/(matrix2_width*matrix2_height*255);
return percentDiff;
}
``````

And the Algorithm that I'm using:

`````` int mat1Rows(4), mat1Cols(4), mat2Rows(2), mat2Cols(2);
const int ROW_BOUNDS(mat1Rows-mat2Rows+1);
const int COL_BOUNDS(mat1Cols-mat2Cols+1);
for(int i=0; (i < ROW_BOUNDS); i++) {
for(int j=0; (j < COL_BOUNDS); j++) {
for (int row(0); row < mat2Rows; row++){
for (int col(0); col < mat2Cols; col++){
float corr = difference(i, j, matrix2, matrix1);
}
}
}
}
``````

The only problem that I'm having is capturing the corrdiants of the matching block..

Thanks :)

2
Contributors
7
Replies
8
Views
6 Years
Discussion Span
Last Post by Lerner

add another variable (or two or three or however many it takes) to keep track of the lowest value of corr and the values of i and j that were used to calculate that value of cor. Use a control statement to help yourself out.

``````if current corr less the previous low cor
assign current corr to low cor
assign current i to low i
assign current j to low j
``````

Edited by Lerner

lp yours

Hey,

I've tried this:

``````for(int i=0; (i < ROW_BOUNDS); i++) {
for(int j=0; (j < COL_BOUNDS); j++) {
for (int row(0); row < mat2Rows; row++){
for (int col(0); col < mat2Cols; col++){
float corr = difference(i, j, matrix2, matrix1);
if(corr < corrVal || corr < corrVal2)
{
row = i;
col = j;
corrVal2 = corr;
}
}
}
}
}
``````

But it seems it doesn't find the right position.. E.g.

M1 =
0 1 1 0
0 1 1 1
1 0 0 0
0 0 0 0

M2 =
0 0
0 0

And it returns
row = 0;
col = 0;

Thanks

Edited by phorce: Updated Question

It's not row = i; it's lowRowSoFar = i;;

I don't understand, could you explain please?

fullMatrix (4x4)
targetMatrix(2x2)
lowestCorr = 300000
rowIndexULCofBestFitSubMatrixOfFullMatrix = 0
colIndexULCofBestFitSubMatrixOfFullMatrix = 0

//goal: find the indexes of fullMatrix that correspond to the upper left cell (ULC) of the subMatrix of fullMatrix with the best fit to targetMatrix
i = which row in fullMatrix for this subMatrix ULC
j = which col in fullMatrix for this subMatrix ULC
k= number of rows in targetMatrix
m = number of cols in targetMatrix

``````for(i ; i < 3; ++i)
for(j; j < 3; ++j)
corr = diff(i, j, k, m, fullMatrix, targetMatrix)
if(corr < lowestCorr)
lowestCorr = corr
rowIndexULCofBestFitSubMatrixOfFullMatrix = i
colIndexULCofBestFitSubMatrixOfFullMatrix  = j
``````

Edited by Lerner

Hey,

So here is the comparision function:

``````float compMatrix(vector<double>&matrix1, vector<double>&matrix2)
{
int sum1 = 0;
int sum2 = 0;
int diff = 0;
for(int i=0; (i < matrix2.size()); i++)
{
sum1 += matrix2[i];
}

for(int i=0; (i < matrix1.size()); i++)
{
sum2 += matrix1[i];
}
diff = sum1-sum2;

return diff;
}
``````

And the algorithm (in main)

``````int currentRow(0), currentCol(0),
minRow(0), minCol(0),
maxRow(0), maxCol(0);

int min_val = 0;

const int ROW_BOUNDS(mat1Rows-mat2Rows+1);
const int COL_BOUNDS(mat1Cols-mat2Cols+1);

for(int i=0; (i < ROW_BOUNDS); i++) {
for(int j=0; (j < COL_BOUNDS); j++) {
m3.clear();
for (int row(0); row < mat2Rows; row++){
for (int col(0); col < mat2Cols; col++){
//cout << matrix1[i*mat1Cols+row*mat1Cols+col+j] << ' ';
m3.push_back( matrix1[i*mat1Cols+row*mat1Cols+col+j] );
currentRow = i;
currentCol = j;
}
}
double comp = compMatrix(matrix2, m3);
if(comp < min_val)
{
minRow = currentRow;
minCol = currentCol;
m4 = m3;
}else if(comp == min_val)
{
minRow = currentRow;
minCol = currentCol;
m4 = m3;
}
}
}
``````

Here are the values I'm entering:

M1 =
0 0 1 0
0 0 1 0
1 1 1 0
0 1 0 0
M2 =
1 0
1 0

And when I run the program it says: Found the best between: (row): 2 And: (col): 2

But the resulting matrix is:
1 0
0 0

When it should be:
1 0
1 0

Because there is no difference at all.

Can you see where I'm going wrong?

Would really appriciate any advice :)

What are you doing with m3 and m4? I don't see a need for them.

In the compare function you are summing all the elements of M1, not just the appropriate subMatrix to compare with M2.

The sum of all elements of M2 will always be the same every time you want to use compare it to a given subMatrix, so just sum it once, and the current M2 has the sum of 2.

Using pencil and paper I get nine 2 by 2 subMatrixes of M1, with sums varying from 0-3, meaning if the compare calculates the difference of each subMatrix of M1 with M2, sumSubMatrixX - sumM2, then the range will be -2 to 1. If you don't care which sum is larger, you just want to know the difference between the two sums, then use the absolute value of the difference (using fabs() or negating the value of the difference if the difference is less than 1 or squaring the difference.

So I'd try something like this:

``````findPositionOfBestFit()
{
//find best fit of m2 within m1, using the cell in left upper corner of the appropriate
subMatrix as the identifier of the subMatrix.

Matrix m1, m2
//get values for m1 and m2

//set "constants" and defaults
minDiff = 300000; //or some very large number
minRow = 0
minCol = 0
rowBound = m1.numRows - m2.numRows + 1
colBound = m1.numCols - m2.numCols + 1
sumM2 = 0
diff

//calculate sumM2
for(i = 0; i < m2.numRows; ++i)
for(j = 0; j < m2.numCols; ++j)
sumM2 += m2[i][j]

for(i = 0; i < rowBound; ++i)
for(j = 0; j < colBound; ++j)   //get the left upper corner cell for a new subMatrix
diff = calculateSubMatrixSum(i, j, m2.numRows, m2.numCols, m1) - sumM2
if(diff < 0)
diff = -1 * diff
if(diff < minDiff)
minDiff = diff
minRow = i
minCol = j
}

calculateSubMatrixSum(int startRow, int startCol, int numRows, int numCols, Matrix & m1)
{
subMatrixSum = 0
for(int a = startRow; a < startRow + numRows; ++a)
for(int b = startCol; b < startCol + numCols; ++b)
subMatrixSum += m1[a][b]
return subMatrixSum
}
``````

Edited by Lerner

This topic has been dead for over six months. Start a new discussion instead.
Have something to contribute to this discussion? Please be thoughtful, detailed and courteous, and be sure to adhere to our posting rules.