Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

599 fix cob measurement noise #329

Merged
merged 5 commits into from
Oct 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -39,30 +39,28 @@ def mapState(state, input_camera):
Kinv = np.linalg.inv(K)

rHat_BN_C = Kinv @ np.array([state[0], state[1], 1])
norm_COB_vector = np.linalg.norm(rHat_BN_C)
rHat_BN_C = -rHat_BN_C / np.linalg.norm(rHat_BN_C)

return rHat_BN_C
return rHat_BN_C, norm_COB_vector


def mapCovar(pixels, input_camera):
def mapCovar(pixels, input_camera, norm_COB_vector):
"""Secondary method to map the covariance in pixel space to position"""
K = compute_camera_calibration_matrix(input_camera)
d_x = K[0, 0]
d_y = K[1, 1]
X = 1 / d_x
Y = 1 / d_y

if pixels > 0:
scale_factor = np.sqrt(pixels) / (2 * np.pi)
else:
scale_factor = 1 # prevent division by zero
scale_factor = np.sqrt(pixels / (4 * np.pi)) / (norm_COB_vector ** 2)

covar = np.zeros([3, 3])
covar[0, 0] = X ** 2
covar[1, 1] = Y ** 2
covar[2, 2] = 1

return 1 / scale_factor * covar
return scale_factor * covar


def compute_camera_calibration_matrix(input_camera):
Expand Down Expand Up @@ -121,7 +119,10 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
testProc = unitTestSim.CreateNewProcess(unitProcessName)
testProc.addTask(unitTestSim.CreateNewTask(unitTaskName, testProcessRate))
R_object = 25. * 1e3
att_sigma = 0.001
covar_att_B = np.diag([att_sigma**2, (0.9*att_sigma)**2, (0.95*att_sigma)**2])
module = cobConverter.CobConverter(method, R_object)
module.setAttitudeCovariance(covar_att_B)
unitTestSim.AddModelToTask(unitTaskName, module, module)

# Create the input messages.
Expand Down Expand Up @@ -183,13 +184,16 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
dcm_NC = np.dot(dcm_CB, dcm_BN).T

# Center of Brightness Unit Vector
rhat_COB_C_true = mapState(cob_true, inputCamera)
covar_COB_C_true = mapCovar(num_pixels, inputCamera)
[rhat_COB_C_true, norm_COB_vector] = mapState(cob_true, inputCamera)
covar_COB_C_true = mapCovar(num_pixels, inputCamera, norm_COB_vector)
rhat_COB_N_true = np.dot(dcm_NC, rhat_COB_C_true) * valid_COB_true # multiple by validity to get zero vector if bad
covar_COB_N_true = np.dot(dcm_NC, np.dot(covar_COB_C_true, dcm_NC.T)).flatten() * valid_COB_true
timeTag_true_ns = inputCob.timeTag * valid_COB_true
timeTag_true = timeTag_true_ns * macros.NANO2SEC

covar_COB_B_true = np.dot(dcm_CB.T, np.dot(covar_COB_C_true, dcm_CB))
covar_B_true = covar_COB_B_true + covar_att_B
covar_N_true = np.dot(dcm_BN.T, np.dot(covar_B_true, dcm_BN)).flatten() * valid_COB_true

# Center of Mass Message and Unit Vector
alpha = np.arccos(np.dot(r_BdyZero_N.T / np.linalg.norm(r_BdyZero_N), vehSunPntN)) # phase angle
gamma = phase_angle_correction(alpha, method) # COB/COM offset factor
Expand All @@ -202,7 +206,7 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
com_true = [None] * 2 # COM location in image
com_true[0] = cob_true[0] - gamma * Rc * np.cos(phi) * valid_COB_true
com_true[1] = cob_true[1] - gamma * Rc * np.sin(phi) * valid_COB_true
rhat_COM_C_true = mapState(com_true, inputCamera)
[rhat_COM_C_true, norm_COM_vector] = mapState(com_true, inputCamera)
if valid_COB_true and (method == binary or method == lambertian):
valid_COM_true = True
rhat_COM_N_true = np.dot(dcm_NC, rhat_COM_C_true)
Expand All @@ -212,7 +216,7 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN

# module output
rhat_COB_N = dataLogUnitVecCOB.rhat_BN_N[0]
covar_COB_N = dataLogUnitVecCOB.covar_N[0]
covar_N = dataLogUnitVecCOB.covar_N[0]
time_COB = dataLogUnitVecCOB.timeTag[0]
com = dataLogCOM.centerOfMass[0]
time_COM_ns = dataLogCOM.timeTag[0]
Expand All @@ -228,11 +232,11 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
err_msg='Variable: rhat_COB_N',
verbose=True)

np.testing.assert_allclose(covar_COB_N,
covar_COB_N_true,
np.testing.assert_allclose(covar_N,
covar_N_true,
rtol=0,
atol=tolerance,
err_msg='Variable: covar_COB_N',
err_msg='Variable: covar_N',
verbose=True)

np.testing.assert_allclose(time_COB,
Expand Down
78 changes: 50 additions & 28 deletions src/fswAlgorithms/opticalNavigation/cobConverter/cobConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,49 +138,54 @@ void CobConverter::UpdateState(uint64_t CurrentSimNanos)
centerOfMass[2] = 1.0;

/*! - Get the heading in the image plane */
Eigen::Vector3d rhat_COB_C = cameraCalibrationMatrixInverse * centerOfBrightness;
Eigen::Vector3d rhat_COM_C = cameraCalibrationMatrixInverse * centerOfMass;
Eigen::Vector3d rhatCOB_C = cameraCalibrationMatrixInverse * centerOfBrightness;
Eigen::Vector3d rhatCOM_C = cameraCalibrationMatrixInverse * centerOfMass;

/*! - Retrieve the vector from target to camera and normalize */
rhat_COB_C *= - 1;
rhat_COB_C.normalize();
rhat_COM_C *= - 1;
rhat_COM_C.normalize();
rhatCOB_C *= - 1;
double rhatCOBNorm = rhatCOB_C.norm();
rhatCOB_C.normalize();
rhatCOM_C *= - 1;
rhatCOM_C.normalize();

/*! - Rotate the vector into frames of interest */
Eigen::Vector3d rhat_COB_N = dcm_NC * rhat_COB_C;
Eigen::Vector3d rhat_COB_B = dcm_CB.transpose() * rhat_COB_C;
Eigen::Vector3d rhat_COM_N = dcm_NC * rhat_COM_C;
Eigen::Vector3d rhat_COM_B = dcm_CB.transpose() * rhat_COM_C;

/*! - Define diagonal terms of the covariance */
Eigen::Matrix3d covar_C;
covar_C.setZero();
covar_C(0,0) = pow(X,2);
covar_C(1,1) = pow(Y,2);
covar_C(2,2) = 1;
/*! - define and rotate covariance using number of pixels found */
double scaleFactor = sqrt(cobMsgBuffer.pixelsFound)/(2*M_PI);
covar_C *= 1./scaleFactor;
Eigen::Matrix3d covar_N = dcm_NC * covar_C * dcm_NC.transpose();
Eigen::Matrix3d covar_B = dcm_CB.transpose() * covar_C * dcm_CB;
Eigen::Vector3d rhatCOB_N = dcm_NC * rhatCOB_C;
Eigen::Vector3d rhatCOB_B = dcm_CB.transpose() * rhatCOB_C;
Eigen::Vector3d rhatCOM_N = dcm_NC * rhatCOM_C;
Eigen::Vector3d rhatCOM_B = dcm_CB.transpose() * rhatCOM_C;

/*! - define diagonal terms of the COB covariance */
Eigen::Matrix3d covarCob_C;
covarCob_C.setZero();
covarCob_C(0,0) = pow(X,2);
covarCob_C(1,1) = pow(Y,2);
covarCob_C(2,2) = 1;
/*! - scale covariance using number of pixels found and rotate into B frame */
double scaleFactor = sqrt(cobMsgBuffer.pixelsFound / (4 * M_PI)) / pow(rhatCOBNorm, 2);
covarCob_C *= scaleFactor;
Eigen::Matrix3d covarCob_B = dcm_CB.transpose() * covarCob_C * dcm_CB;
/*! - add attitude error covariance in B frame to get total covariance of unit vector measurements */
Eigen::Matrix3d covar_B = covarCob_B + this->covarAtt_BN_B;
/*! - rotate total covariance into all remaining frames */
Eigen::Matrix3d covar_N = dcm_BN.transpose() * covar_B * dcm_BN;
Eigen::Matrix3d covar_C = dcm_CB.transpose() * covar_B * dcm_CB;

/*! - output messages */
eigenMatrix3d2CArray(covar_N, uVecCOBMsgBuffer.covar_N);
eigenMatrix3d2CArray(covar_C, uVecCOBMsgBuffer.covar_C);
eigenMatrix3d2CArray(covar_B, uVecCOBMsgBuffer.covar_B);
eigenVector3d2CArray(rhat_COB_N, uVecCOBMsgBuffer.rhat_BN_N);
eigenVector3d2CArray(rhat_COB_C, uVecCOBMsgBuffer.rhat_BN_C);
eigenVector3d2CArray(rhat_COB_B, uVecCOBMsgBuffer.rhat_BN_B);
eigenVector3d2CArray(rhatCOB_N, uVecCOBMsgBuffer.rhat_BN_N);
eigenVector3d2CArray(rhatCOB_C, uVecCOBMsgBuffer.rhat_BN_C);
eigenVector3d2CArray(rhatCOB_B, uVecCOBMsgBuffer.rhat_BN_B);
uVecCOBMsgBuffer.timeTag = (double) cobMsgBuffer.timeTag * NANO2SEC;
uVecCOBMsgBuffer.valid = true;

eigenMatrix3d2CArray(covar_N, uVecCOMMsgBuffer.covar_N);
eigenMatrix3d2CArray(covar_C, uVecCOMMsgBuffer.covar_C);
eigenMatrix3d2CArray(covar_B, uVecCOMMsgBuffer.covar_B);
eigenVector3d2CArray(rhat_COM_N, uVecCOMMsgBuffer.rhat_BN_N);
eigenVector3d2CArray(rhat_COM_C, uVecCOMMsgBuffer.rhat_BN_C);
eigenVector3d2CArray(rhat_COM_B, uVecCOMMsgBuffer.rhat_BN_B);
eigenVector3d2CArray(rhatCOM_N, uVecCOMMsgBuffer.rhat_BN_N);
eigenVector3d2CArray(rhatCOM_C, uVecCOMMsgBuffer.rhat_BN_C);
eigenVector3d2CArray(rhatCOM_B, uVecCOMMsgBuffer.rhat_BN_B);
uVecCOMMsgBuffer.timeTag = (double) cobMsgBuffer.timeTag * NANO2SEC;
uVecCOMMsgBuffer.valid = validCOM;

Expand Down Expand Up @@ -215,3 +220,20 @@ void CobConverter::setRadius(const double radius){
double CobConverter::getRadius() const {
return this->objectRadius;
}

/*! Set the attitude error covariance matrix in body frame B, for unit vector measurements
@param cov_att_BN_B
@return void
*/
void CobConverter::setAttitudeCovariance(const Eigen::Matrix3d covAtt_BN_B)
{
this->covarAtt_BN_B = covAtt_BN_B;
}

/*! Get the attitude error covariance matrix in body frame B, for unit vector measurements
@return Eigen::Matrix3d cov_att_BN_B
*/
Eigen::Matrix3d CobConverter::getAttitudeCovariance() const
{
return this->covarAtt_BN_B;
}
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ class CobConverter: public SysModel {

void setRadius(const double radius);
double getRadius() const;
void setAttitudeCovariance(const Eigen::Matrix3d covAtt_BN_B);
Eigen::Matrix3d getAttitudeCovariance() const;

public:
Message<OpNavUnitVecMsgPayload> opnavUnitVecCOBOutMsg;
Expand All @@ -67,6 +69,7 @@ class CobConverter: public SysModel {
private:
PhaseAngleCorrectionMethod phaseAngleCorrectionMethod;
double objectRadius{};
Eigen::Matrix3d covarAtt_BN_B{};
};

#endif
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ from Basilisk.architecture.swig_common_model import *
%include "std_string.i"
%include "sys_model.h"
%include "swig_conly_data.i"
%include "swig_eigen.i"

%include "cobConverter.h"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,17 @@ where :math:`\mathbf{\bar{u}}_{COB} = [\mathrm{cob}_x, \mathrm{cob}_y, 1]^T` wit
brightmess :math:`\mathrm{cob}_x` and :math:`\mathrm{cob}_y`, :math:`[K]` is the camera calibration matrix and
:math:`\mathbf{r}_{COB}^N` is the unit vector describing the physical heading to the target in the camera frame.

The covariance is found using the number of detected pixels and the camera parameters, given by:
The covariance of the COB error is found using the number of detected pixels and the camera parameters, given by:

.. math::

P = \frac{2 \ pi}{\sqrt{\mathrm{numPixels}}} \left( \begin{bmatrix} d_x^2 & 0 & 0 \\ 0 & d_y^2 & 0
P = \frac{\mathrm{numPixels}}{4 \pi \cdot ||\mathbf{\bar{u}}_{COB}||^2} \left( \begin{bmatrix} d_x^2 & 0 & 0 \\ 0 & d_y^2 & 0
\\ 0 & 0 & 1 \end{bmatrix}\right)

where :math:`d_x` and :math:`d_y` are the first and second diagonal elements of the camera calibration matrix
:math:`[K]`.
:math:`[K]`. This covariance matrix is then transformed into the body frame and added to the covariance of the attitude
error. Both individual covariance matrices, and thus the total covariance matrix, describe the measurement noise of a
unit vector.

By reading the camera orientation and the current body attitude in the inertial frame, the final step is to rotate
the covariance and heading vector in all the relevant frames for modules downstream. This is done simply by
Expand Down Expand Up @@ -142,6 +144,10 @@ This section is to outline the steps needed to setup a center of brightness conv
# module = cobConverter.CobConverter(cobConverter.PhaseAngleCorrectionMethod_Lambertian, R_obj) # Lambertian method
# module = cobConverter.CobConverter(cobConverter.PhaseAngleCorrectionMethod_Binary, R_obj) # Binary method

#. The attitude error covariance matrix is set by::

module.setAttitudeCovariance(covar_att_BN_B)

#. The object radius in units of meters for the phase angle correction can be updated by::

module.setRadius(R_obj)
Expand Down
Loading