Skip to content

Commit b1a02f1

Browse files
authored
Fixes for clang17 errors/warnings (#815)
1 parent 4445bc2 commit b1a02f1

File tree

9 files changed

+13
-15
lines changed

9 files changed

+13
-15
lines changed

include/matx/operators/base_operator.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ namespace matx
7373
tp->Exec(ex);
7474
}
7575
else if constexpr (is_matx_set_op<T>()) {
76-
if constexpr (static_cast<const T *>(this)->IsTransformSet()) {
76+
if constexpr (is_matx_transform_op<typename T::op_type>() && is_tensor_view_v<typename T::tensor_type>) {
7777
tp->TransformExec(tp->Shape(), ex);
7878
}
7979
else {

include/matx/operators/concat.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ namespace matx
7979
{
8080
static_assert(RANK > 0, "Cannot concatenate rank-0 tensors");
8181
static_assert(sizeof...(Ts) > 1, "Must have more than one tensor to concatenate");
82-
static_assert((... && (RANK == ts.Rank())), "concatenated ops must have the same rank");
82+
static_assert((... && (RANK == Ts::Rank())), "concatenated ops must have the same rank");
8383

8484
for (int32_t i = 0; i < RANK; i++) {
8585
if(i == axis_) {

include/matx/operators/isclose.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ namespace matx
5555
__MATX_INLINE__ IsCloseOp(const Op1 &op1, const Op2 &op2, double rtol, double atol) :
5656
op1_(op1), op2_(op2), rtol_(static_cast<inner_type>(rtol)), atol_(static_cast<inner_type>(atol))
5757
{
58-
static_assert(op1.Rank() == op2.Rank(), "Operator ranks must match in isclose()");
58+
static_assert(Op1::Rank() == Op2::Rank(), "Operator ranks must match in isclose()");
5959
ASSERT_COMPATIBLE_OP_SIZES(op1);
6060
ASSERT_COMPATIBLE_OP_SIZES(op2);
6161
}

include/matx/operators/stack.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ namespace matx
7777
__MATX_INLINE__ StackOp(int axis, const Ts&... ts) : ops_(ts...), axis_(axis)
7878
{
7979
static_assert(sizeof...(Ts) > 1, "Must have more than one tensor to stack");
80-
static_assert((... && (RANK == ts.Rank())), "stacked ops must have the same rank");
80+
static_assert((... && (RANK == Ts::Rank())), "stacked ops must have the same rank");
8181

8282
for (int32_t i = 0; i < RANK; i++) {
8383
MATX_ASSERT_STR(((ts.Size(i) == pp_get<0>(ts).Size(i)) && ...)

include/matx/transforms/fft/fft_cuda.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ __MATX_INLINE__ auto getCufft1DSupportedTensor( const Op &in, cudaStream_t strea
643643
template <typename Op>
644644
__MATX_INLINE__ auto getCufft2DSupportedTensor( const Op &in, cudaStream_t stream) {
645645
// This would be better as a templated lambda, but we don't have those in C++17 yet
646-
const auto support_func = [&in]() {
646+
const auto support_func = [&]() {
647647
if constexpr (is_tensor_view_v<Op>) {
648648
if ( in.Stride(Op::Rank()-2) != in.Stride(Op::Rank()-1) * in.Size(Op::Rank()-1)) {
649649
return false;

include/matx/transforms/fft/fft_fftw.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@ template<typename OutTensorType, typename InTensorType> class matxFFTWPlan_t {
500500
template <typename Op>
501501
__MATX_INLINE__ auto getFFTW1DSupportedTensor(const Op &in) {
502502
// This would be better as a templated lambda, but we don't have those in C++17 yet
503-
const auto support_func = [&in]() {
503+
const auto support_func = [&]() {
504504
if constexpr (is_tensor_view_v<Op>) {
505505
if constexpr (Op::Rank() >= 2) {
506506
if (in.Stride(Op::Rank() - 2) != in.Stride(Op::Rank() - 1) * in.Size(Op::Rank() - 1)) {
@@ -527,7 +527,7 @@ template<typename OutTensorType, typename InTensorType> class matxFFTWPlan_t {
527527
template <typename Op>
528528
__MATX_INLINE__ auto getFFTW2DSupportedTensor( const Op &in) {
529529
// This would be better as a templated lambda, but we don't have those in C++17 yet
530-
const auto support_func = [&in]() {
530+
const auto support_func = [&]() {
531531
if constexpr (is_tensor_view_v<Op>) {
532532
if ( in.Stride(Op::Rank()-2) != in.Stride(Op::Rank()-1) * in.Size(Op::Rank()-1)) {
533533
return false;

include/matx/transforms/matmul/matmul_cblas.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ __MATX_INLINE__ void matmul_dispatch(TensorTypeC &c,
438438
template <typename Op>
439439
__MATX_INLINE__ auto getCBLASSupportedTensor( const Op &in) {
440440
// This would be better as a templated lambda, but we don't have those in C++17 yet
441-
const auto support_func = [&in]() {
441+
const auto support_func = [&]() {
442442
if constexpr (is_tensor_view_v<Op>) {
443443
return !(
444444
(in.Stride(Op::Rank() - 1) != (index_t)1 && in.Stride(Op::Rank() - 2) != (index_t)1) ||

include/matx/transforms/matmul/matmul_cuda.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1102,7 +1102,7 @@ using gemm_cuda_cache_t = std::unordered_map<MatMulCUDAParams_t, std::any, MatM
11021102
template <typename Op>
11031103
__MATX_INLINE__ auto getCublasSupportedTensor( const Op &in, cudaStream_t stream) {
11041104
// This would be better as a templated lambda, but we don't have those in C++17 yet
1105-
const auto support_func = [&in]() {
1105+
const auto support_func = [&]() {
11061106
if constexpr (is_tensor_view_v<Op>) {
11071107
return !(
11081108
(in.Stride(Op::Rank()-1) != (index_t)1 && in.Stride(Op::Rank()-2) != (index_t)1) ||

include/matx/transforms/svd/svd_cuda.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -551,13 +551,11 @@ static __MATX_INLINE__ SVDMethod GetCUDASVDMethod(const ATensor &a) {
551551
static constexpr int RANK = ATensor::Rank();
552552
index_t m = a.Size(RANK - 2);
553553
index_t n = a.Size(RANK - 1);
554-
SVDMethod method;
555554

556-
// This assumes the matrix sizes are fairly large, in which case gesvd should win out on speed
557-
if (a.Rank() == 2) {
558-
method = detail::SVDMethod::GESVD;
559-
}
560-
else {
555+
// gesvd is a good default for non-batched
556+
SVDMethod method = detail::SVDMethod::GESVD;
557+
558+
if (a.Rank() != 2) {
561559
if (a.Size(RANK-2) <= 32 &&
562560
a.Size(RANK-1) <= 32) {
563561
if constexpr (is_tensor_view_v<ATensor>) {

0 commit comments

Comments
 (0)