Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mini weight kernel #160

Draft
wants to merge 9 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions config/igemm_fwd_gtc_gfx1030_nchwc_fp16x8_fsr.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
[codegen]
arch = 'gfx1030'
code_object = 'cov3'
mode = 'flat'

#########################################################################################
#--------------------------- 16x512x256
[igemm_fwd_gtc]
gemm_m_per_block = 16
gemm_n_per_block = 512
gemm_k_per_block = 256
lanegroup_tile_m = 8
lanegroup_wave_m = 1
lanegroup_repeat_m = 2
lanegroup_tile_n = 8
lanegroup_wave_n = 8
lanegroup_repeat_n = 1
tensor_a_thread_lengths = [1, 1, 1, 8] # 1xCEx1xK/Vec-c
tensor_a_cluster_lengths = [1,32, 1, 16] # 1xCEx1xK
tensor_b_thread_lengths = [1, 1, 1, 8] # 1xCExNB0xVec-c
tensor_b_cluster_lengths = [1, 1, 1,512] # 1xCEx1xNB1
direction = "fwd"
precision = "fp16"
tensor_layout = 'nchwc_cyxkc'
nxb = 0
nxe = 1
wavefront_size = 64
cumode = 0
vector_c = 8
mini_weights = 1
tensor_b_pass_through = 1

#--------------------------- 16x256x128
[igemm_fwd_gtc]
gemm_m_per_block = 16
gemm_n_per_block = 256
gemm_k_per_block = 128
lanegroup_tile_m = 8
lanegroup_wave_m = 1
lanegroup_repeat_m = 2
lanegroup_tile_n = 8
lanegroup_wave_n = 8
lanegroup_repeat_n = 1
tensor_a_thread_lengths = [1, 1, 1, 8] # 1xCEx1xK/Vec-c
tensor_a_cluster_lengths = [1,16, 1, 16] # 1xCEx1xK
tensor_b_thread_lengths = [1, 1, 1, 8] # 1xCExNB0xVec-c
tensor_b_cluster_lengths = [1, 1, 1,256] # 1xCEx1xNB1
direction = "fwd"
precision = "fp16"
tensor_layout = 'nchwc_cyxkc'
nxb = 0
nxe = 1
wavefront_size = 32
cumode = 0
vector_c = 8
mini_weights = 1
tensor_b_pass_through = 1

#--------------------------- 16x64x32
[igemm_fwd_gtc]
gemm_m_per_block = 16
gemm_n_per_block = 64
gemm_k_per_block = 32
lanegroup_tile_m = 8
lanegroup_wave_m = 1
lanegroup_repeat_m = 2
lanegroup_tile_n = 8
lanegroup_wave_n = 8
lanegroup_repeat_n = 1
tensor_a_thread_lengths = [1, 1, 1, 8] # 1xCEx1xK/Vec-c
tensor_a_cluster_lengths = [1, 4, 1, 16] # 1xCEx1xK
tensor_b_thread_lengths = [1, 1, 1, 8] # 1xCExNB0xVec-c
tensor_b_cluster_lengths = [1, 1, 1, 64] # 1xCEx1xNB1
direction = "fwd"
precision = "fp16"
tensor_layout = 'nchwc_cyxkc'
nxb = 0
nxe = 1
wavefront_size = 64
cumode = 0
vector_c = 8
mini_weights = 1
tensor_b_pass_through = 1
12 changes: 11 additions & 1 deletion python/igemm/igemm_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,10 @@ def __init__(self, tunable_dict):
self.vector_c = utility_dict_with_default_t(tunable_dict)('vector_c', 1)
self.wavefront_size = utility_dict_with_default_t(tunable_dict)('wavefront_size', 64)
self.cumode = utility_dict_with_default_t(tunable_dict)('cumode', 0)

self.mini_weights = utility_dict_with_default_t(tunable_dict)('mini_weights', 0)
if self.mini_weights == 1:
self.tensor_b_pass_through = 1

assert type(self.tensor_a_thread_lengths) is list and type(self.tensor_a_cluster_lengths) is list
assert type(self.tensor_b_thread_lengths) is list and type(self.tensor_b_cluster_lengths) is list
Expand Down Expand Up @@ -383,7 +387,8 @@ def _unmerge_x1_from_e(unroll_k, nxe):
gemm_msg = f"gemm_m_per_block:{self.gemm_m_per_block} - {self.wave_tile_m}x{self.wave_step_m}x{self.wave_repeat_m}, gemm_n_per_block:{self.gemm_n_per_block} - {self.wave_tile_n}x{self.wave_step_n}x{self.wave_repeat_n}, gemm_k_per_block:{self.gemm_k_per_block}"

assert self.num_global_load_a * self.block_size == self.gemm_m_per_block * self.gemm_k_per_block, gemm_msg
assert self.num_global_load_b * self.block_size == self.gemm_n_per_block * self.gemm_k_per_block, gemm_msg
if self.mini_weights != 1:
assert self.num_global_load_b * self.block_size == self.gemm_n_per_block * self.gemm_k_per_block, gemm_msg

# LDS size
self.lds_pad_m, self.lds_pad_n = self.get_lds_pad() # LDS pad
Expand All @@ -409,6 +414,11 @@ def _unmerge_x1_from_e(unroll_k, nxe):
self.lds_total = self.lds_buffer_num * self.lds_single
# print(f"lds_a:{self.lds_a}, lds_b:{self.lds_b}, lds_a_np2:{self.lds_a_np2}, lds_b_np2:{self.lds_b_np2}, lds_single:{self.lds_single}, lds_total:{self.lds_total}")
# TODO: LDS size check

if self.mini_weights == 1:
self.lds_single = 8 * 1024
self.lds_total = 8 * 1024
self.lds_buffer_num = 1

# some parameter not in modular_conv
if self.fma_type in (IGEMM_GTC_TUNABLE_FMA_TYPE_MAC, IGEMM_GTC_TUNABLE_FMA_TYPE_DLOPS):
Expand Down
Loading