Replies: 2 comments 1 reply
-
The current code in |
Beta Was this translation helpful? Give feedback.
-
I want to implement such function of wapper in C++: class _FullyFusedProjection(torch.autograd.Function):
"""Projects Gaussians to 2D."""
@staticmethod
def forward(
ctx,
means: Tensor, # [N, 3]
covars: Tensor, # [N, 6] or None
quats: Tensor, # [N, 4] or None
scales: Tensor, # [N, 3] or None
viewmats: Tensor, # [C, 4, 4]
Ks: Tensor, # [C, 3, 3]
width: int,
height: int,
eps2d: float,
near_plane: float,
far_plane: float,
radius_clip: float,
calc_compensations: bool,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
# "covars" and {"quats", "scales"} are mutually exclusive
radii, means2d, depths, conics, compensations = _make_lazy_cuda_func(
"fully_fused_projection_fwd"
)(
means,
covars,
quats,
scales,
viewmats,
Ks,
width,
height,
eps2d,
near_plane,
far_plane,
radius_clip,
calc_compensations,
)
if not calc_compensations:
compensations = None
ctx.save_for_backward(
means, covars, quats, scales, viewmats, Ks, radii, conics, compensations
)
ctx.width = width
ctx.height = height
ctx.eps2d = eps2d
return radii, means2d, depths, conics, compensations
@staticmethod
def backward(ctx, v_radii, v_means2d, v_depths, v_conics, v_compensations):
(
means,
covars,
quats,
scales,
viewmats,
Ks,
radii,
conics,
compensations,
) = ctx.saved_tensors
width = ctx.width
height = ctx.height
eps2d = ctx.eps2d
if v_compensations is not None:
v_compensations = v_compensations.contiguous()
v_means, v_covars, v_quats, v_scales, v_viewmats = _make_lazy_cuda_func(
"fully_fused_projection_bwd"
)(
means,
covars,
quats,
scales,
viewmats,
Ks,
width,
height,
eps2d,
radii,
conics,
compensations,
v_means2d.contiguous(),
v_depths.contiguous(),
v_conics.contiguous(),
v_compensations,
ctx.needs_input_grad[4], # viewmats_requires_grad
)
if not ctx.needs_input_grad[0]:
v_means = None
if not ctx.needs_input_grad[1]:
v_covars = None
if not ctx.needs_input_grad[2]:
v_quats = None
if not ctx.needs_input_grad[3]:
v_scales = None
if not ctx.needs_input_grad[4]:
v_viewmats = None
return (
v_means,
v_covars,
v_quats,
v_scales,
v_viewmats,
None,
None,
None,
None,
None,
None,
None,
None,
) But I notice that variable_list
FullRenderGaussians::forward(AutogradContext *ctx, torch::Tensor means, at::optional<torch::Tensor> covars, torch::Tensor quats,
torch::Tensor scales, torch::Tensor viewmats, torch::Tensor Ks, int width, int height,
float eps2d, float near_plane, float far_plane, float radius_clip,
bool calc_compensations) {
auto t = fully_fused_projection_fwd_tensor(means,
covars,
quats,
scales,
viewmats,
Ks,
width,
height,
eps2d,
near_plane,
far_plane,
radius_clip,
calc_compensations);
torch::Tensor radii = std::get<0>(t);
torch::Tensor means2d = std::get<1>(t);
torch::Tensor depths = std::get<2>(t);
torch::Tensor conics = std::get<3>(t);
torch::Tensor compensations = std::get<4>(t);
// save_for_backward only accept torch::tensor, I use torch::tensor({0}) when the tensor is None in python
if (!calc_compensations)
compensations = torch::tensor({0});
torch::Tensor saved_covars = torch::tensor({0});
std::cout << "depths mean: " << depths.mean() << std::endl;
if(covars.has_value())
saved_covars = covars.value();
ctx->save_for_backward(
{means, saved_covars, quats, scales, viewmats, Ks, radii, conics, compensations}
);
ctx->saved_data["width"] = width;
ctx->saved_data["height"] = height;
ctx->saved_data["eps2d"] = eps2d;
return {radii, means2d, depths, conics, compensations};
} also in tensor_list FullRenderGaussians::backward(AutogradContext *ctx, tensor_list grad_outputs) {
torch::Tensor v_radii = grad_outputs[0];
torch::Tensor v_means2d = grad_outputs[1];
torch::Tensor v_depths = grad_outputs[2];
torch::Tensor v_conics = grad_outputs[3];
at::optional<torch::Tensor> v_compensations;
if (!torch::allclose(grad_outputs[4], torch::tensor({0})))
v_compensations = grad_outputs[4].values();
variable_list saved = ctx->get_saved_variables();
torch::Tensor means = saved[0];
at::optional<torch::Tensor> covars, compensations;
if (!torch::allclose(saved[1], torch::tensor({0})))
covars = saved[1].values();
torch::Tensor quats = saved[2];
torch::Tensor scales = saved[3];
torch::Tensor viewmats = saved[4];
torch::Tensor Ks = saved[5];
torch::Tensor radii = saved[6];
torch::Tensor conics = saved[7];
if (!torch::allclose(saved[8], torch::tensor({0})))
compensations = saved[9].values();
int width = ctx->saved_data["width"].toInt();
int height = ctx->saved_data["height"].toInt();
float eps2d = float(ctx->saved_data["eps2d"].toDouble());
bool viewmats_requires_grad = viewmats.requires_grad();
auto t = fully_fused_projection_bwd_tensor(means,
covars,
quats,
scales,
viewmats,
Ks,
width,
height,
eps2d,
radii,
conics,
compensations,
v_means2d.contiguous(),
v_depths.contiguous(),
v_conics.contiguous(),
v_compensations,
viewmats_requires_grad);
torch::Tensor none, v_means, v_covars, v_quats, v_scales, v_viewmats;
if (means.requires_grad())
v_means = std::get<0>(t);
if (!torch::allclose(saved[1], torch::tensor({0})) && covars.value().requires_grad())
v_covars = std::get<1>(t);
if (quats.requires_grad())
v_quats = std::get<2>(t);
if (scales.requires_grad())
v_scales = std::get<3>(t);
if (viewmats.requires_grad())
v_viewmats = std::get<4>(t);
return {
v_means,
v_covars,
v_quats,
v_scales,
v_viewmats,
none,
none,
none,
none,
none,
none,
none,
none,
};
} I do not know if my understanding of |
Beta Was this translation helpful? Give feedback.
-
Hello, I noticed that the rasterizer module is based on gsplat and gsplat offers many extra features, including depth rendering(see Rasterization for details). How can I edit current rasterizer module to enable depth rendering and optimization like original gsplat?
Beta Was this translation helpful? Give feedback.
All reactions