Skip to main content

Mojo struct

VBuffer

struct VBuffer[mut: Bool, dtype: DType, layout: Layout, address_space: AddressSpace, alignment: Int, origin: Origin[mut], masked: Bool, //, mma_shape: IndexList[3], k_group_size: Int, BN: Int, BK: Int, depth: Int, num_threads: Int]

Fields

  • load_tile (LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]):
  • mma_tile (LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]):
  • smem_iter (LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True]):
  • global_iterator (LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()]):
  • global_base_tile (LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment]):

Implemented traits

AnyType, UnknownDestructibility

Aliases

__del__is_trivial

alias __del__is_trivial = LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment].__del__is_trivial if LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()].__del__is_trivial if LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()].__del__is_trivial if LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial if LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial else LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)].__del__is_trivial

base_layout

alias base_layout = Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]())

depth_tile_size

alias depth_tile_size = min(depth, 128)

GlobalTensorType

alias GlobalTensorType = LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment]

GlobalTiledIteratorType

alias GlobalTiledIteratorType = LayoutTensorIter[dtype, LayoutTensor._compute_tile_layout[mut, dtype, layout, origin, address_space, Layout.__init__(IntTuple[__origin_of()](1), IntTuple[__origin_of()](1)), _get_layout_type(layout, address_space), _get_index_type(layout, address_space), masked, alignment, BK, depth]()[0], origin, address_space=address_space, axis=OptionalReg[Int]({:@stdlib::@builtin::@int::@Int {0}, 0}), layout_int_type=_get_layout_type(layout, address_space), linear_idx_type=_get_index_type(layout, address_space), masked=masked if masked else _tile_is_masked[layout, BK, depth]()]

load_width

alias load_width = 4 if (depth == 64) else simd_width_of[dtype]()

loads_per_thread_per_depth_tile

alias loads_per_thread_per_depth_tile = ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))

LoadTileType

alias LoadTileType = LayoutTensor[dtype, Layout.row_major(((depth // min(depth, 128)) * ((min(depth, 128) * BK) // (4 if (depth == 64) else simd_width_of[dtype]() * num_threads))), 4 if (depth == 64) else simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]

MMA_K

alias MMA_K = mma_shape.__getitem__[3, DType.int64, Int](2)

MMA_M

alias MMA_M = mma_shape.__getitem__[3, DType.int64, Int](0)

MMATileType

alias MMATileType = LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]

num_depth_tiles

alias num_depth_tiles = (depth // mma_shape.__getitem__[3, DType.int64, Int](0))

num_k_tiles

alias num_k_tiles = ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))

num_repeats

alias num_repeats = (BK // simd_width_of[dtype]())

SharedIterType

alias SharedIterType = LayoutTensorIter[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), circular=True]

SharedTileType

alias SharedTileType = LayoutTensor[dtype, blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True), MutableAnyOrigin, address_space=AddressSpace(3), layout_int_type=_get_index_type(AddressSpace(3)), linear_idx_type=_get_index_type(AddressSpace(3))]

simd_width

alias simd_width = simd_width_of[dtype]()

smem_layout

alias smem_layout = blocked_product(Layout.row_major(VBuffer.pad[mut, dtype, layout, address_space, alignment, origin, masked, mma_shape, k_group_size, BN, BK, depth, num_threads, depth](), simd_width_of[dtype]()), Layout.row_major(1, (BK // simd_width_of[dtype]())), True)

tiler_layout

alias tiler_layout = Layout.row_major(1, (BK // simd_width_of[dtype]()))

Methods

__init__

__init__(out self, global_tile: LayoutTensor[dtype, layout, origin, address_space=address_space, masked=masked, alignment=alignment], shared_ptr: UnsafePointer[Scalar[dtype], address_space=AddressSpace(3), mut=mut, origin=origin])

pad

static pad[dim: Int]() -> Int

Returns:

Int

load_from_dram

load_from_dram(mut self)

get_mma_tile

get_mma_tile(self) -> LayoutTensor[dtype, Layout.row_major(((depth // mma_shape.__getitem__[3, DType.int64, Int](0)) * ceildiv(BK, (mma_shape.__getitem__[3, DType.int64, Int](2) * k_group_size))), simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]

Returns:

LayoutTensor

copy_to_shared

copy_to_shared(self)

load_from_shared

load_from_shared(self)

Was this page helpful?