Skip to content

Commit 099e23d

Browse files
authored
Merge pull request #329 from lasp/599-fix-cob-measurement-noise
599 fix cob measurement noise
2 parents 5dbcd3f + 6f6603d commit 099e23d

File tree

5 files changed

+82
-46
lines changed

5 files changed

+82
-46
lines changed

src/fswAlgorithms/opticalNavigation/cobConverter/_UnitTest/test_cobConverter.py

+19-15
Original file line numberDiff line numberDiff line change
@@ -39,30 +39,28 @@ def mapState(state, input_camera):
3939
Kinv = np.linalg.inv(K)
4040

4141
rHat_BN_C = Kinv @ np.array([state[0], state[1], 1])
42+
norm_COB_vector = np.linalg.norm(rHat_BN_C)
4243
rHat_BN_C = -rHat_BN_C / np.linalg.norm(rHat_BN_C)
4344

44-
return rHat_BN_C
45+
return rHat_BN_C, norm_COB_vector
4546

4647

47-
def mapCovar(pixels, input_camera):
48+
def mapCovar(pixels, input_camera, norm_COB_vector):
4849
"""Secondary method to map the covariance in pixel space to position"""
4950
K = compute_camera_calibration_matrix(input_camera)
5051
d_x = K[0, 0]
5152
d_y = K[1, 1]
5253
X = 1 / d_x
5354
Y = 1 / d_y
5455

55-
if pixels > 0:
56-
scale_factor = np.sqrt(pixels) / (2 * np.pi)
57-
else:
58-
scale_factor = 1 # prevent division by zero
56+
scale_factor = np.sqrt(pixels / (4 * np.pi)) / (norm_COB_vector ** 2)
5957

6058
covar = np.zeros([3, 3])
6159
covar[0, 0] = X ** 2
6260
covar[1, 1] = Y ** 2
6361
covar[2, 2] = 1
6462

65-
return 1 / scale_factor * covar
63+
return scale_factor * covar
6664

6765

6866
def compute_camera_calibration_matrix(input_camera):
@@ -121,7 +119,10 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
121119
testProc = unitTestSim.CreateNewProcess(unitProcessName)
122120
testProc.addTask(unitTestSim.CreateNewTask(unitTaskName, testProcessRate))
123121
R_object = 25. * 1e3
122+
att_sigma = 0.001
123+
covar_att_B = np.diag([att_sigma**2, (0.9*att_sigma)**2, (0.95*att_sigma)**2])
124124
module = cobConverter.CobConverter(method, R_object)
125+
module.setAttitudeCovariance(covar_att_B)
125126
unitTestSim.AddModelToTask(unitTaskName, module, module)
126127

127128
# Create the input messages.
@@ -183,13 +184,16 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
183184
dcm_NC = np.dot(dcm_CB, dcm_BN).T
184185

185186
# Center of Brightness Unit Vector
186-
rhat_COB_C_true = mapState(cob_true, inputCamera)
187-
covar_COB_C_true = mapCovar(num_pixels, inputCamera)
187+
[rhat_COB_C_true, norm_COB_vector] = mapState(cob_true, inputCamera)
188+
covar_COB_C_true = mapCovar(num_pixels, inputCamera, norm_COB_vector)
188189
rhat_COB_N_true = np.dot(dcm_NC, rhat_COB_C_true) * valid_COB_true # multiple by validity to get zero vector if bad
189-
covar_COB_N_true = np.dot(dcm_NC, np.dot(covar_COB_C_true, dcm_NC.T)).flatten() * valid_COB_true
190190
timeTag_true_ns = inputCob.timeTag * valid_COB_true
191191
timeTag_true = timeTag_true_ns * macros.NANO2SEC
192192

193+
covar_COB_B_true = np.dot(dcm_CB.T, np.dot(covar_COB_C_true, dcm_CB))
194+
covar_B_true = covar_COB_B_true + covar_att_B
195+
covar_N_true = np.dot(dcm_BN.T, np.dot(covar_B_true, dcm_BN)).flatten() * valid_COB_true
196+
193197
# Center of Mass Message and Unit Vector
194198
alpha = np.arccos(np.dot(r_BdyZero_N.T / np.linalg.norm(r_BdyZero_N), vehSunPntN)) # phase angle
195199
gamma = phase_angle_correction(alpha, method) # COB/COM offset factor
@@ -202,7 +206,7 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
202206
com_true = [None] * 2 # COM location in image
203207
com_true[0] = cob_true[0] - gamma * Rc * np.cos(phi) * valid_COB_true
204208
com_true[1] = cob_true[1] - gamma * Rc * np.sin(phi) * valid_COB_true
205-
rhat_COM_C_true = mapState(com_true, inputCamera)
209+
[rhat_COM_C_true, norm_COM_vector] = mapState(com_true, inputCamera)
206210
if valid_COB_true and (method == binary or method == lambertian):
207211
valid_COM_true = True
208212
rhat_COM_N_true = np.dot(dcm_NC, rhat_COM_C_true)
@@ -212,7 +216,7 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
212216

213217
# module output
214218
rhat_COB_N = dataLogUnitVecCOB.rhat_BN_N[0]
215-
covar_COB_N = dataLogUnitVecCOB.covar_N[0]
219+
covar_N = dataLogUnitVecCOB.covar_N[0]
216220
time_COB = dataLogUnitVecCOB.timeTag[0]
217221
com = dataLogCOM.centerOfMass[0]
218222
time_COM_ns = dataLogCOM.timeTag[0]
@@ -228,11 +232,11 @@ def cob_converter_test_function(show_plots, cameraResolution, sigma_CB, sigma_BN
228232
err_msg='Variable: rhat_COB_N',
229233
verbose=True)
230234

231-
np.testing.assert_allclose(covar_COB_N,
232-
covar_COB_N_true,
235+
np.testing.assert_allclose(covar_N,
236+
covar_N_true,
233237
rtol=0,
234238
atol=tolerance,
235-
err_msg='Variable: covar_COB_N',
239+
err_msg='Variable: covar_N',
236240
verbose=True)
237241

238242
np.testing.assert_allclose(time_COB,

src/fswAlgorithms/opticalNavigation/cobConverter/cobConverter.cpp

+50-28
Original file line numberDiff line numberDiff line change
@@ -138,49 +138,54 @@ void CobConverter::UpdateState(uint64_t CurrentSimNanos)
138138
centerOfMass[2] = 1.0;
139139

140140
/*! - Get the heading in the image plane */
141-
Eigen::Vector3d rhat_COB_C = cameraCalibrationMatrixInverse * centerOfBrightness;
142-
Eigen::Vector3d rhat_COM_C = cameraCalibrationMatrixInverse * centerOfMass;
141+
Eigen::Vector3d rhatCOB_C = cameraCalibrationMatrixInverse * centerOfBrightness;
142+
Eigen::Vector3d rhatCOM_C = cameraCalibrationMatrixInverse * centerOfMass;
143143

144144
/*! - Retrieve the vector from target to camera and normalize */
145-
rhat_COB_C *= - 1;
146-
rhat_COB_C.normalize();
147-
rhat_COM_C *= - 1;
148-
rhat_COM_C.normalize();
145+
rhatCOB_C *= - 1;
146+
double rhatCOBNorm = rhatCOB_C.norm();
147+
rhatCOB_C.normalize();
148+
rhatCOM_C *= - 1;
149+
rhatCOM_C.normalize();
149150

150151
/*! - Rotate the vector into frames of interest */
151-
Eigen::Vector3d rhat_COB_N = dcm_NC * rhat_COB_C;
152-
Eigen::Vector3d rhat_COB_B = dcm_CB.transpose() * rhat_COB_C;
153-
Eigen::Vector3d rhat_COM_N = dcm_NC * rhat_COM_C;
154-
Eigen::Vector3d rhat_COM_B = dcm_CB.transpose() * rhat_COM_C;
155-
156-
/*! - Define diagonal terms of the covariance */
157-
Eigen::Matrix3d covar_C;
158-
covar_C.setZero();
159-
covar_C(0,0) = pow(X,2);
160-
covar_C(1,1) = pow(Y,2);
161-
covar_C(2,2) = 1;
162-
/*! - define and rotate covariance using number of pixels found */
163-
double scaleFactor = sqrt(cobMsgBuffer.pixelsFound)/(2*M_PI);
164-
covar_C *= 1./scaleFactor;
165-
Eigen::Matrix3d covar_N = dcm_NC * covar_C * dcm_NC.transpose();
166-
Eigen::Matrix3d covar_B = dcm_CB.transpose() * covar_C * dcm_CB;
152+
Eigen::Vector3d rhatCOB_N = dcm_NC * rhatCOB_C;
153+
Eigen::Vector3d rhatCOB_B = dcm_CB.transpose() * rhatCOB_C;
154+
Eigen::Vector3d rhatCOM_N = dcm_NC * rhatCOM_C;
155+
Eigen::Vector3d rhatCOM_B = dcm_CB.transpose() * rhatCOM_C;
156+
157+
/*! - define diagonal terms of the COB covariance */
158+
Eigen::Matrix3d covarCob_C;
159+
covarCob_C.setZero();
160+
covarCob_C(0,0) = pow(X,2);
161+
covarCob_C(1,1) = pow(Y,2);
162+
covarCob_C(2,2) = 1;
163+
/*! - scale covariance using number of pixels found and rotate into B frame */
164+
double scaleFactor = sqrt(cobMsgBuffer.pixelsFound / (4 * M_PI)) / pow(rhatCOBNorm, 2);
165+
covarCob_C *= scaleFactor;
166+
Eigen::Matrix3d covarCob_B = dcm_CB.transpose() * covarCob_C * dcm_CB;
167+
/*! - add attitude error covariance in B frame to get total covariance of unit vector measurements */
168+
Eigen::Matrix3d covar_B = covarCob_B + this->covarAtt_BN_B;
169+
/*! - rotate total covariance into all remaining frames */
170+
Eigen::Matrix3d covar_N = dcm_BN.transpose() * covar_B * dcm_BN;
171+
Eigen::Matrix3d covar_C = dcm_CB.transpose() * covar_B * dcm_CB;
167172

168173
/*! - output messages */
169174
eigenMatrix3d2CArray(covar_N, uVecCOBMsgBuffer.covar_N);
170175
eigenMatrix3d2CArray(covar_C, uVecCOBMsgBuffer.covar_C);
171176
eigenMatrix3d2CArray(covar_B, uVecCOBMsgBuffer.covar_B);
172-
eigenVector3d2CArray(rhat_COB_N, uVecCOBMsgBuffer.rhat_BN_N);
173-
eigenVector3d2CArray(rhat_COB_C, uVecCOBMsgBuffer.rhat_BN_C);
174-
eigenVector3d2CArray(rhat_COB_B, uVecCOBMsgBuffer.rhat_BN_B);
177+
eigenVector3d2CArray(rhatCOB_N, uVecCOBMsgBuffer.rhat_BN_N);
178+
eigenVector3d2CArray(rhatCOB_C, uVecCOBMsgBuffer.rhat_BN_C);
179+
eigenVector3d2CArray(rhatCOB_B, uVecCOBMsgBuffer.rhat_BN_B);
175180
uVecCOBMsgBuffer.timeTag = (double) cobMsgBuffer.timeTag * NANO2SEC;
176181
uVecCOBMsgBuffer.valid = true;
177182

178183
eigenMatrix3d2CArray(covar_N, uVecCOMMsgBuffer.covar_N);
179184
eigenMatrix3d2CArray(covar_C, uVecCOMMsgBuffer.covar_C);
180185
eigenMatrix3d2CArray(covar_B, uVecCOMMsgBuffer.covar_B);
181-
eigenVector3d2CArray(rhat_COM_N, uVecCOMMsgBuffer.rhat_BN_N);
182-
eigenVector3d2CArray(rhat_COM_C, uVecCOMMsgBuffer.rhat_BN_C);
183-
eigenVector3d2CArray(rhat_COM_B, uVecCOMMsgBuffer.rhat_BN_B);
186+
eigenVector3d2CArray(rhatCOM_N, uVecCOMMsgBuffer.rhat_BN_N);
187+
eigenVector3d2CArray(rhatCOM_C, uVecCOMMsgBuffer.rhat_BN_C);
188+
eigenVector3d2CArray(rhatCOM_B, uVecCOMMsgBuffer.rhat_BN_B);
184189
uVecCOMMsgBuffer.timeTag = (double) cobMsgBuffer.timeTag * NANO2SEC;
185190
uVecCOMMsgBuffer.valid = validCOM;
186191

@@ -215,3 +220,20 @@ void CobConverter::setRadius(const double radius){
215220
double CobConverter::getRadius() const {
216221
return this->objectRadius;
217222
}
223+
224+
/*! Set the attitude error covariance matrix in body frame B, for unit vector measurements
225+
@param cov_att_BN_B
226+
@return void
227+
*/
228+
void CobConverter::setAttitudeCovariance(const Eigen::Matrix3d covAtt_BN_B)
229+
{
230+
this->covarAtt_BN_B = covAtt_BN_B;
231+
}
232+
233+
/*! Get the attitude error covariance matrix in body frame B, for unit vector measurements
234+
@return Eigen::Matrix3d cov_att_BN_B
235+
*/
236+
Eigen::Matrix3d CobConverter::getAttitudeCovariance() const
237+
{
238+
return this->covarAtt_BN_B;
239+
}

src/fswAlgorithms/opticalNavigation/cobConverter/cobConverter.h

+3
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ class CobConverter: public SysModel {
5151

5252
void setRadius(const double radius);
5353
double getRadius() const;
54+
void setAttitudeCovariance(const Eigen::Matrix3d covAtt_BN_B);
55+
Eigen::Matrix3d getAttitudeCovariance() const;
5456

5557
public:
5658
Message<OpNavUnitVecMsgPayload> opnavUnitVecCOBOutMsg;
@@ -67,6 +69,7 @@ class CobConverter: public SysModel {
6769
private:
6870
PhaseAngleCorrectionMethod phaseAngleCorrectionMethod;
6971
double objectRadius{};
72+
Eigen::Matrix3d covarAtt_BN_B{};
7073
};
7174

7275
#endif

src/fswAlgorithms/opticalNavigation/cobConverter/cobConverter.i

+1
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ from Basilisk.architecture.swig_common_model import *
2929
%include "std_string.i"
3030
%include "sys_model.h"
3131
%include "swig_conly_data.i"
32+
%include "swig_eigen.i"
3233

3334
%include "cobConverter.h"
3435

src/fswAlgorithms/opticalNavigation/cobConverter/cobConverter.rst

+9-3
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,17 @@ where :math:`\mathbf{\bar{u}}_{COB} = [\mathrm{cob}_x, \mathrm{cob}_y, 1]^T` wit
7575
brightmess :math:`\mathrm{cob}_x` and :math:`\mathrm{cob}_y`, :math:`[K]` is the camera calibration matrix and
7676
:math:`\mathbf{r}_{COB}^N` is the unit vector describing the physical heading to the target in the camera frame.
7777

78-
The covariance is found using the number of detected pixels and the camera parameters, given by:
78+
The covariance of the COB error is found using the number of detected pixels and the camera parameters, given by:
7979

8080
.. math::
8181
82-
P = \frac{2 \ pi}{\sqrt{\mathrm{numPixels}}} \left( \begin{bmatrix} d_x^2 & 0 & 0 \\ 0 & d_y^2 & 0
82+
P = \frac{\mathrm{numPixels}}{4 \pi \cdot ||\mathbf{\bar{u}}_{COB}||^2} \left( \begin{bmatrix} d_x^2 & 0 & 0 \\ 0 & d_y^2 & 0
8383
\\ 0 & 0 & 1 \end{bmatrix}\right)
8484
8585
where :math:`d_x` and :math:`d_y` are the first and second diagonal elements of the camera calibration matrix
86-
:math:`[K]`.
86+
:math:`[K]`. This covariance matrix is then transformed into the body frame and added to the covariance of the attitude
87+
error. Both individual covariance matrices, and thus the total covariance matrix, describe the measurement noise of a
88+
unit vector.
8789

8890
By reading the camera orientation and the current body attitude in the inertial frame, the final step is to rotate
8991
the covariance and heading vector in all the relevant frames for modules downstream. This is done simply by
@@ -142,6 +144,10 @@ This section is to outline the steps needed to setup a center of brightness conv
142144
# module = cobConverter.CobConverter(cobConverter.PhaseAngleCorrectionMethod_Lambertian, R_obj) # Lambertian method
143145
# module = cobConverter.CobConverter(cobConverter.PhaseAngleCorrectionMethod_Binary, R_obj) # Binary method
144146

147+
#. The attitude error covariance matrix is set by::
148+
149+
module.setAttitudeCovariance(covar_att_BN_B)
150+
145151
#. The object radius in units of meters for the phase angle correction can be updated by::
146152

147153
module.setRadius(R_obj)

0 commit comments

Comments
 (0)