Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c859a16
[Fence] Add fence options for barrier_blocks
tzj-fxz Jan 30, 2026
cd4e509
[Feature] Add remote atomic-add and more scopes/semantics for wait op
tzj-fxz Feb 4, 2026
e37fea4
[Misc] Remove unused code
tzj-fxz Feb 4, 2026
12a98d0
[Example] Remove redundant buffer
tzj-fxz Feb 4, 2026
231dad1
[Example] Remove direction-related buffer
tzj-fxz Feb 4, 2026
b496a54
[Refactor] Unified scope and semantic representation in tilescale lan…
tzj-fxz Feb 4, 2026
6f072ab
[Misc] Add fence options
tzj-fxz Feb 4, 2026
268f54a
[BugFix] Intermediate buffer for each path
tzj-fxz Feb 4, 2026
e8d036a
[Lint] Block_M for alltoall
tzj-fxz Feb 4, 2026
fc98be0
[Lint]
tzj-fxz Feb 4, 2026
8404728
[Lint]
tzj-fxz Feb 4, 2026
7b00d85
[BugFix] Add fence for inner CTA memory op
tzj-fxz Feb 5, 2026
067511c
[BugFix] Fence and debug
tzj-fxz Feb 5, 2026
31a4643
[Lint]
tzj-fxz Feb 5, 2026
67065ab
[BugFix] Restore the signal to avoid duplicated sum of finish barrier
tzj-fxz Feb 5, 2026
e2d8ee3
[Lint]
tzj-fxz Feb 5, 2026
b00bdd8
[BugFix] Warp-level scheduling with active blocks and correct synchro…
tzj-fxz Feb 6, 2026
2822ced
[Example] Add benchmark options
tzj-fxz Feb 6, 2026
81af526
[Enhancement] Fully utilize blocks to send/recv data
tzj-fxz Feb 6, 2026
5f584e5
[Routing] Optimize for balanced routing direction
tzj-fxz Feb 8, 2026
1d4bbd3
[BugFix] Reinitialize the signal before benchmark
tzj-fxz Feb 9, 2026
b065199
[Feature] Add return value of wait op
tzj-fxz Feb 9, 2026
3845d36
[Routing] New version of routing
tzj-fxz Feb 10, 2026
6f570f4
[BugFix] Transfer source index before put data
tzj-fxz Feb 10, 2026
5abf3f1
[BugFix] Interface for benchmark
tzj-fxz Feb 10, 2026
a9dae4c
[Misc] Remove log
tzj-fxz Feb 10, 2026
0d1dc1c
[BugFix] Warp level communication with robust per-slot signal
tzj-fxz Feb 14, 2026
baf1fc4
[Routing] AOT routing and signal slot assignment
tzj-fxz Feb 14, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/distributed/example_allgather_gemm_overlapped.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def main(
tid = T.get_thread_binding(0)
T.clear(C_local)
if tid == 0:
T.wait_eq(signal_buffer[pid_m * block_M // M_per_rank], 1)
T.wait_eq(signal_buffer[pid_m * block_M // M_per_rank], 1, dtype="uint32")
for k in T.Pipelined(T.ceildiv(K, block_K), num_stages=3):
T.copy(A[pid_m * block_M, k * block_K], A_shared)
T.copy(B[k * block_K, pid_n * block_N], B_shared)
Expand Down
114 changes: 114 additions & 0 deletions examples/distributed/intranode/example_alltoall.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
import tilelang
import tilelang.language as T
from tilelang.distributed import init_dist
import torch
import torch.distributed as dist
import argparse


def alltoall(PE_num, M, N, block_M, block_N, threads):
assert block_N == N

@T.prim_func
def main(
src: T.Tensor((PE_num * M, N), "float16"),
dst: T.Tensor((PE_num * M, N), "float16"),
barrier: T.Tensor((PE_num), "int32"),
):
# Currently not support tiled copy
with T.Kernel(
PE_num, T.ceildiv(M, block_M), T.ceildiv(N, block_N),
threads=threads) as (bx, by, bz):
rank = T.alloc_local([1], "int32")
num_ranks = T.alloc_local([1], "int32")

dst_rank = bx
rank[0] = T.get_rank()
num_ranks[0] = T.get_num_ranks()

T.put_block(
src=T.address_of(src[dst_rank * M + by * block_M, 0]),
dst=T.address_of(dst[rank[0] * M + by * block_M, 0]),
size=block_M * block_N,
dst_pe=dst_rank,
)
T.fence_sys(sem=T.MemorySemantic.RELEASE)

return main
Comment on lines +12 to +37
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Unused barrier parameter in kernel signature.

The barrier parameter is declared in the kernel signature but never used within the kernel body. This could indicate either:

  1. A missing synchronization step that should use this barrier
  2. An unnecessary parameter that should be removed

Given this is an all-to-all operation, typically a barrier or synchronization mechanism is needed to ensure all ranks have completed their transfers before the kernel returns. Currently, only T.fence_sys is called which provides memory ordering but not inter-rank synchronization.

💡 Suggested fix: Either use the barrier or remove it

Option 1 - Add barrier synchronization:

             T.put_block(
                 src=T.address_of(src[dst_rank * M + by * block_M, 0]),
                 dst=T.address_of(dst[rank[0] * M + by * block_M, 0]),
                 size=block_M * block_N,
                 dst_pe=dst_rank,
             )
             T.fence_sys(sem=T.MemorySemantic.RELEASE)
+            T.barrier_blocks(barrier)

     return main

Option 2 - Remove unused parameter:

     `@T.prim_func`
     def main(
             src: T.Tensor((PE_num * M, N), "float16"),
             dst: T.Tensor((PE_num * M, N), "float16"),
-            barrier: T.Tensor((PE_num), "int32"),
     ):
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@T.prim_func
def main(
src: T.Tensor((PE_num * M, N), "float16"),
dst: T.Tensor((PE_num * M, N), "float16"),
barrier: T.Tensor((PE_num), "int32"),
):
# Currently not support tiled copy
with T.Kernel(
PE_num, T.ceildiv(M, block_M), T.ceildiv(N, block_N),
threads=threads) as (bx, by, bz):
rank = T.alloc_local([1], "int32")
num_ranks = T.alloc_local([1], "int32")
dst_rank = bx
rank[0] = T.get_rank()
num_ranks[0] = T.get_num_ranks()
T.put_block(
src=T.address_of(src[dst_rank * M + by * block_M, 0]),
dst=T.address_of(dst[rank[0] * M + by * block_M, 0]),
size=block_M * block_N,
dst_pe=dst_rank,
)
T.fence_sys(sem=T.MemorySemantic.RELEASE)
return main
`@T.prim_func`
def main(
src: T.Tensor((PE_num * M, N), "float16"),
dst: T.Tensor((PE_num * M, N), "float16"),
):
# Currently not support tiled copy
with T.Kernel(
PE_num, T.ceildiv(M, block_M), T.ceildiv(N, block_N),
threads=threads) as (bx, by, bz):
rank = T.alloc_local([1], "int32")
num_ranks = T.alloc_local([1], "int32")
dst_rank = bx
rank[0] = T.get_rank()
num_ranks[0] = T.get_num_ranks()
T.put_block(
src=T.address_of(src[dst_rank * M + by * block_M, 0]),
dst=T.address_of(dst[rank[0] * M + by * block_M, 0]),
size=block_M * block_N,
dst_pe=dst_rank,
)
T.fence_sys(sem=T.MemorySemantic.RELEASE)
return main
🧰 Tools
🪛 Ruff (0.14.14)

[warning] 16-16: Unused function argument: barrier

(ARG001)


[warning] 21-21: Unpacked variable bz is never used

Prefix it with an underscore or any other dummy variable pattern

(RUF059)

🤖 Prompt for AI Agents
In `@examples/distributed/intranode/example_alltoall.py` around lines 12 - 37, The
kernel declares a barrier parameter that is never used in main; either remove it
from the kernel signature (and corresponding callers) or implement inter-rank
synchronization using that barrier instead of relying solely on T.fence_sys. If
you choose to use the barrier, after T.put_block and T.fence_sys update the
barrier tensor to signal this rank’s completion and then wait/spin until all
PE_num entries are set (i.e., implement a full all-ranks barrier using the
barrier buffer), referencing the symbols main, barrier, T.put_block and
T.fence_sys; if you choose removal, delete the barrier parameter from main and
all call sites.



def run_alltoall(local_rank, num_ranks, args):
PE_num = args.PE_num
M = args.M
N = args.N
block_M = 32
block_N = N
threads = 256

local_rank, num_ranks, group_size = init_dist(local_rank, num_ranks)
allocator = tilelang.get_allocator(
size=2**34,
device="cuda",
is_distributed=True,
local_rank=local_rank,
num_local_ranks=num_ranks,
group=group_size,
)
kernel = tilelang.compile(alltoall(PE_num, M, N, block_M, block_N, threads))
kernel.initialize(allocator=allocator)
src = tilelang.tensor((PE_num * M, N), torch.float16, allocator=allocator).random_()
dst = tilelang.tensor((PE_num * M, N), torch.float16, allocator=allocator).zero_()
barrier = tilelang.tensor((PE_num), torch.int32, allocator=allocator).zero_()

torch.cuda.synchronize()
dist.barrier(group_size)

# Warmup
for _ in range(args.warmup):
kernel(src, dst, barrier)
dst.zero_()
torch.cuda.synchronize()
dist.barrier(group_size)

start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for _ in range(args.iter):
kernel(src, dst, barrier)
torch.cuda.synchronize()
dist.barrier(group_size)
end.record()
torch.cuda.synchronize()
dist.barrier(group_size)
elapsed_time = start.elapsed_time(end) / args.iter
print(
f"Rank {local_rank} Average Kernel execution time: {elapsed_time:.3f} ms, Bandwidth: {2 * PE_num * M * N / (elapsed_time * 1e6):.3f} GB/s"
)

# Torch Reference
torch.cuda.synchronize()
dst_ref = torch.zeros((PE_num * M, N), dtype=torch.float16, device="cuda")
dist.all_to_all_single(dst_ref, src, group=group_size)
torch.cuda.synchronize()

if torch.allclose(dst, dst_ref, atol=1e-2, rtol=1e-2):
print(f"Rank {local_rank} Verification Passed! ✅")
else:
max_diff = (dst - dst_ref).abs().max()
print(f"Rank {local_rank} Verification Failed! ❌ Max diff: {max_diff}")
print(f"dst: {dst}")
print(f"dst_ref: {dst_ref}")

dist.destroy_process_group()


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--PE_num", type=int, default=8)
parser.add_argument("--M", type=int, default=8192)
parser.add_argument("--N", type=int, default=7168)
parser.add_argument("--warmup", type=int, default=5, help="Number of warmup iterations")
parser.add_argument("--iter", type=int, default=10, help="Number of benchmark iterations")

args = parser.parse_args()
torch.multiprocessing.spawn(run_alltoall, args=(args.PE_num, args), nprocs=args.PE_num)
Loading