From e5957ac3d7135eca95b4019eaa394703ad6112a6 Mon Sep 17 00:00:00 2001 From: Tung Le Duc Date: Tue, 24 Dec 2019 16:02:10 +0900 Subject: [PATCH] [mlir] Fix the wrong computation of dynamic strides for lowering AllocOp to LLVM Leftover change from before the MLIR merge, reviewed at accepted at https://github.com/tensorflow/mlir/pull/338. --- .../Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp | 9 +++++---- .../Conversion/StandardToLLVM/convert-memref-ops.mlir | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp index 0c96cc5e9c7d..c456b00987d7 100644 --- a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp @@ -1054,14 +1054,15 @@ struct AllocOpLowering : public LLVMLegalizationPattern { // Iterate strides in reverse order, compute runningStride and strideValues. auto nStrides = strides.size(); SmallVector strideValues(nStrides, nullptr); - for (auto indexedStride : llvm::enumerate(llvm::reverse(strides))) { - int64_t index = nStrides - 1 - indexedStride.index(); + for (unsigned i = 0; i < nStrides; ++i) { + int64_t index = nStrides - 1 - i; if (strides[index] == MemRefType::getDynamicStrideOrOffset()) // Identity layout map is enforced in the match function, so we compute: - // `runningStride *= sizes[index]` + // `runningStride *= sizes[index + 1]` runningStride = runningStride - ? rewriter.create(loc, runningStride, sizes[index]) + ? rewriter.create(loc, runningStride, + sizes[index + 1]) : createIndexConstant(rewriter, loc, 1); else runningStride = createIndexConstant(rewriter, loc, strides[index]); diff --git a/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir index d92ded7f3aa9..706e2de2691f 100644 --- a/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir @@ -101,8 +101,8 @@ func @mixed_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 // CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }"> // CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[st1:.*]] = llvm.mul %{{.*}}, %[[c42]] : !llvm.i64 -// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[M]] : !llvm.i64 +// CHECK-NEXT: %[[st1:.*]] = llvm.mul %{{.*}}, %[[N]] : !llvm.i64 +// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[c42]] : !llvm.i64 // CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }"> // CHECK-NEXT: llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }"> // CHECK-NEXT: llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }"> @@ -142,7 +142,7 @@ func @dynamic_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 // CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 -// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[M]] : !llvm.i64 +// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[N]] : !llvm.i64 // CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK-NEXT: llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">