diff --git a/src/HomogMatrix2twist.m b/src/HomogMatrix2twist.m new file mode 100644 index 0000000..2c36e85 --- /dev/null +++ b/src/HomogMatrix2twist.m @@ -0,0 +1,44 @@ +function twist = HomogMatrix2twist(H) + +% HomogMatrix2twist Convert 4x4 homogeneous matrix to twist coordinates +% +% Input: +% -H(4,4): Euclidean transformation matrix (rigid body motion) +% +% Output: +% -twist(6,1): twist coordinates. Stack linear and angular parts [v;w] +% +% Observe that the same H might be represented by different twist vectors +% Here, twist(4:6) is a rotation vector with norm in [0,pi] + +se_matrix = logm(H); + +% careful for rotations of pi; the top 3x3 submatrix of the returned +% se_matrix by logm is not skew-symmetric (bad). + +v = se_matrix(1:3,4); + +w = Matrix2Cross(se_matrix(1:3,1:3)); + +twist = [v; w]; + +end + +% MATRIX2CROSS Compute 3D vector corresponding to an antisymmetric matrix +% +% Computes the 3D vector x corresponding to an antisymmetric matrix M such +% that M*y = cross(x,y) for all 3D vectors y. +% +% Input: +% - M(3,3) : antisymmetric matrix +% +% Output: +% - x(3,1) : column vector +% +% See also CROSS2MATRIX + +function x = Matrix2Cross(M) + +x = [-M(2,3); M(1,3); -M(1,2)]; + +end diff --git a/src/createFigure.m b/src/createFigure.m index 6254803..04843a3 100644 --- a/src/createFigure.m +++ b/src/createFigure.m @@ -35,6 +35,7 @@ topViewLandmarksY = cell(1,numFrames); topViewCarX = cell(1,numFrames); topViewCarY = cell(1,numFrames); + % landmark data is given in singles % coordinates of vehicle are given in doubles (default) for i = 1:numFrames diff --git a/src/runBA.m b/src/runBA.m new file mode 100644 index 0000000..d421811 --- /dev/null +++ b/src/runBA.m @@ -0,0 +1,163 @@ +function hidden_state = runBA(hidden_state, observations, K) +%RUNBA runs the bundle adjustment algorithm, finding the landmarks and extrinsic +% parameters that minimize the reprojection error. It does so by finding a local +% optimum of the non linear least squares problem. +% INPUT: +% - hidden_state: unidimensional vector that contains all the camera positions +% as 6D vectors and the set of 3D landmarks, all unrolled. This +% are all the values that we seek to tune in order to reduce the +% reprojection error. +% [pose_1, pose_2, ..., pose_n, l_1, l_2, ..., l_m] +% - observations: unidimensional vector that contains: +% [n m O_1, O_2, ..., O_n]. Where n is the number of camera +% poses, m is the number of landmarks and O_i is the observation +% obtained at pose i and contains: +% [k_i, p_1, p_2, ..., p_k, l_1, l_2, ..., l_k], where: +% - k_i: is the number of landmarks observed in pose i. +% - p_j: 2D position in pixel space of landmark l_j. +% - l_j: index pointing to the corresponding 3D landmark in +% the set of all 3D landmarks. +% - K: camera projection matrix (3x3) +% OUTPUT: +% - hidden_state: unidimensional vector that contains all the camera positions +% landmarks as described previously. It is the result of the +% minimization of the nonlinear least squares problem. + +debug = false; +sparse_gradients = true; + +% Extract the data +% Define the error function of the BA +error_function = @(x) errorBA(x, observations, K); + +if sparse_gradients + % Get the sparse jacobian filter matrix + M = getSparseJacobianFilter(hidden_state, observations); + + % Plot the sparsity pattern of M + if debug + figure(4); + spy(M); + end + + % Tell the optimizer to only compute the gradients that are non-zero + options = optimoptions('lsqnonlin', ... + 'JacobPattern', M, ... + 'Display', 'iter', ... + 'UseParallel', false, ... + 'MaxIter', 20); +else + options = optimoptions('lsqnonlin', ... + 'Display', 'iter', ... + 'UseParallel', false, ... + 'MaxIter', 20); +end + +[hidden_state, resnorm] = lsqnonlin(error_function, hidden_state, ... + [], [], options); + +end + +function e = errorBA(hidden_state, observations, K) +%ERRORBA given the hidden state, the observations and the camera projection +%matrix, it computes the reprojection error used as the objective of the bundle +%adjustment. +% INPUT: +% - hidden_state: as described in the function RUNBA. +% - observations: as described in the function RUNBA. +% - K: camera projection matrix (3x3) + +debug = false; + +n = observations(1); +m = observations(2); +taus = hidden_state(1:(n*6)); % Camera twist (obtained from VO) +taus = reshape(taus, 6, n); % 6xn +P = hidden_state((n*6+1):end); % 3D Landmarks +P = reshape(P, 3, m); % 3xm + +% The number of observations is the same as the number of frames +curr_id = 3; +Y = []; +f_x = []; +for f=1:n + % Get the camera homogeneous matrix in the world frame + hs_id = (f-1)*6+1; + T_WC_i = twist2HomogMatrix(taus(:,f)); + T_CW_i = inv(T_WC_i); + + % Get the number of keypoints for this frame + ki = observations(curr_id); + % Generate the vector of reference 2D points + % The observed points are in (row, cols) and they should be in x, y + aux_y = flipud(reshape(observations(curr_id+1:curr_id+2*ki), 2, ki)); + Y = [Y; aux_y(:)]; % 2 numbers per point + + % Generate the vector of reprojected 3D points from the camera twist + l_i = floor(observations(curr_id+1+2*ki:curr_id+3*ki)); % landmark ids + P_i = P(:,l_i); % retrieve the seen landmarks by order + p_i = K * T_CW_i(1:3,:) * [P_i; ones(1, size(P_i, 2))]; % Reprojection + p_i = p_i(1:2,:) ./ p_i(3,:); + f_x = [f_x; p_i(:)]; + + % DEBUG: This is used to check that the projections make sense + if debug + figure(3) + plot(p_i(1,:), p_i(2,:), 'x'); + hold on; + plot(aux_y(1,:), aux_y(2,:), 'x'); + hold off; + axis equal; + pause(0.01); + end + + % Update the current id for the next iteration + curr_id = curr_id+1+3*ki; % 2 numbers + landmark id per point +end + +e = f_x - Y; + +end + +function M = getSparseJacobianFilter(hidden_state, observations) +%GETSPARSEJACOBIANFILTER calculates the sparse matrix that represents all the +%positions where the jacobian used for the computation of the reprojection error +%are not zero. +% INPUT: +% - hidden_state: as described in RUNBA. +% - observations: as described in RUNBA. +% OUTPUT: +% - M: the sparse matrix. + +% Populate the M matrix +n = observations(1); +m = observations(2); +taus = hidden_state(1:(n*6)); % Camera twist (obtained from VO) +taus = reshape(taus, 6, n); % 6xn +P = hidden_state((n*6+1):end); % 3D Landmarks +P = reshape(P, 3, m); % 3xm +numel_obs = (numel(observations)-2-n)/3; +% Prepare the sparse jacobian matrix +M = spalloc(numel_obs, length(hidden_state), 9*numel_obs); + +curr_id = 3; +n_obs = 1; +for f=1:n + ki = observations(curr_id); + l_i = floor(observations(curr_id+1+2*ki:curr_id+3*ki)); + + % Dependence on the camera pose + M(n_obs:n_obs+ki*2-1, (f-1)*6+1:(f-1)*6+6) = 1; + + % Dependence on the landmark + for l_id=1:numel(l_i) + M(n_obs+2*(l_id-1):n_obs-1+l_id*2, ... + n*6+3*(l_i(l_id)-1)+1:n*6+3*l_i(l_id)) = 1; + end + + % Update the current id for the next iteration + n_obs = n_obs + ki*2; + curr_id = curr_id+1+3*ki; % 2 numbers + landmark id per point +end + +end diff --git a/src/twist2MatOperations.m b/src/twist2MatOperations.m new file mode 100644 index 0000000..6569cc2 --- /dev/null +++ b/src/twist2MatOperations.m @@ -0,0 +1,36 @@ +function H = twist2HomogMatrix(twist) + +% twist2HomogMatrix Convert twist coordinates to 4x4 homogeneous matrix +% +% Input: +% -twist(6,1): twist coordinates. Stack linear and angular parts [v;w] +% Output: +% -H(4,4): Euclidean transformation matrix (rigid body motion) + +v = twist(1:3); % linear part +w = twist(4:6); % angular part + +se_matrix = [Cross2Matrix(w), v; 0 0 0 0]; % Lie algebra matrix +H = expm(se_matrix); + +end + +% CROSS2MATRIX Antisymmetric matrix corresponding to a 3D vector +% +% Computes the antisymmetric matrix M corresponding to a 3D vector x such +% that M*y = cross(x,y) for all 3D vectors y. +% +% Input: +% - x(3,1) : vector +% +% Output: +% - M(3,3) : antisymmetric matrix +% +% See also MATRIX2CROSS + +function M = Cross2Matrix(x) + +M = [0,-x(3),x(2); x(3),0,-x(1);-x(2),x(1),0]; + +end + diff --git a/src/updateFigure.m b/src/updateFigure.m index 9f29cde..8c0cb13 100644 --- a/src/updateFigure.m +++ b/src/updateFigure.m @@ -36,7 +36,6 @@ addpoints(fig.numLandmarksData, x-(max(x)+1), y); addpoints(fig.numLandmarksData, x(end)+1, numLandmarksPoint) - % Bottom right corner: position of vehicle over time? addpoints(fig.fullTrajectoryData, fullTrajectoryPoints(:,1), fullTrajectoryPoints(:,2))