Mojo struct
VBuffer
struct VBuffer[dtype: DType, layout: Layout, address_space: AddressSpace, alignment: Int, origin: Origin[], masked: Bool, //, mma_shape: IndexList[3], k_group_size: Int, BN: Int, BK: Int, depth: Int, num_threads: Int]
Fields
- load_tile (
LayoutTensor[dtype, Layout.row_major((0 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else ((div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) + -1) if ((((rem_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) == 0) ^ True) & (((min(depth, 128) * BK) < 0) ^ ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) < 0))) else (div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) * 0 if (min(depth, 128) == 0) else ((div_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value) == 0) ^ True) & ((min(depth, 128) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value)), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
): - mma_tile (
LayoutTensor[dtype, Layout.row_major((ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size)) * 0 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else ((div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) == 0) ^ True) & ((mma_shape.__getitem__[3, DType.int64, Int](0) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value)), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
): - smem_iter (
LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value)), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True]
): - global_iterator (
LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()]
): - global_base_tile (
LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment]
):
Implemented traits
AnyType
,
UnknownDestructibility
Aliases
__del__is_trivial
alias __del__is_trivial = True
base_layout
alias base_layout = Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]())
depth_tile_size
alias depth_tile_size = min(depth, 128)
GlobalTensorType
alias GlobalTensorType = LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment]
GlobalTiledIteratorType
alias GlobalTiledIteratorType = LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()]
load_width
alias load_width = 4 if (depth == 64) else simd_width_of[dtype]()
loads_per_thread_per_depth_tile
alias loads_per_thread_per_depth_tile = 0 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else ((div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) + -1) if ((((rem_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) == 0) ^ True) & (((min(depth, 128) * BK) < 0) ^ ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) < 0))) else (div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value)
LoadTileType
alias LoadTileType = LayoutTensor[dtype, Layout.row_major((0 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else ((div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) + -1) if ((((rem_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) == 0) ^ True) & (((min(depth, 128) * BK) < 0) ^ ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) < 0))) else (div_s (min(depth, 128) * BK), 1 if ((4 if (depth == 64) else simd_width_of[dtype]() * num_threads) == 0) else (4 if (depth == 64) else simd_width_of[dtype]() * num_threads)._mlir_value) * 0 if (min(depth, 128) == 0) else ((div_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value) == 0) ^ True) & ((min(depth, 128) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (min(depth, 128) == 0) else min(depth, 128)._mlir_value)), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
MMA_K
alias MMA_K = mma_shape.__getitem__[3, DType.int64, Int](2)
MMA_M
alias MMA_M = mma_shape.__getitem__[3, DType.int64, Int](0)
MMATileType
alias MMATileType = LayoutTensor[dtype, Layout.row_major((ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size)) * 0 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else ((div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) == 0) ^ True) & ((mma_shape.__getitem__[3, DType.int64, Int](0) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value)), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
num_depth_tiles
alias num_depth_tiles = 0 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else ((div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) == 0) ^ True) & ((mma_shape.__getitem__[3, DType.int64, Int](0) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value)
num_k_tiles
alias num_k_tiles = ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))
num_repeats
alias num_repeats = 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value)
SharedIterType
alias SharedIterType = LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value)), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True]
SharedTileType
alias SharedTileType = LayoutTensor[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value)), True), MutableAnyOrigin, address_space=AddressSpace(3), layout_int_type=_get_index_type(AddressSpace(3)), linear_idx_type=_get_index_type(AddressSpace(3))]
simd_width
alias simd_width = simd_width_of[dtype]()
smem_layout
alias smem_layout = blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value)), True)
tiler_layout
alias tiler_layout = Layout.row_major(1, 0 if (simd_width_of[dtype]() == 0) else ((div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) + -1) if ((((rem_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value) == 0) ^ True) & ((simd_width_of[dtype]() < 0) ^ (BK < 0))) else (div_s BK._mlir_value, 1 if (simd_width_of[dtype]() == 0) else simd_width_of[dtype]()._mlir_value))
Methods
__init__
__init__(out self, global_tile: LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment], shared_ptr: UnsafePointer[Scalar[dtype], address_space=AddressSpace(3), alignment=alignment, mut=mut, origin=origin])
pad
load_from_dram
load_from_dram(mut self)
get_mma_tile
get_mma_tile(self) -> LayoutTensor[dtype, Layout.row_major((ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size)) * 0 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else ((div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) + -1) if ((((rem_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value) == 0) ^ True) & ((mma_shape.__getitem__[3, DType.int64, Int](0) < 0) ^ (depth < 0))) else (div_s depth._mlir_value, 1 if (mma_shape.__getitem__[3, DType.int64, Int](0) == 0) else mma_shape.__getitem__[3, DType.int64, Int](0)._mlir_value)), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
Returns:
copy_to_shared
copy_to_shared(self)
load_from_shared
load_from_shared(self)
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!