Skip to main content

Mojo function

naive_blockwise_scaled_fp8_grouped_matmul_kernel

naive_blockwise_scaled_fp8_grouped_matmul_kernel[c_layout: Layout, a_layout: Layout, b_layout: Layout, a_scale_layout: Layout, b_scale_layout: Layout, a_offsets_layout: Layout, expert_ids_layout: Layout, c_type: DType, a_type: DType, b_type: DType, scales_type: DType, s_type: DType, transpose_b: Bool = True, elementwise_lambda_fn: OptionalReg[fn[DType, Int, Int](IndexList[2], SIMD[$0, $1]) capturing -> None] = OptionalReg[fn[DType, Int, Int](IndexList[2], SIMD[$0, $1]) capturing -> None]({:i1 0, 1})](c: LayoutTensor[c_type, c_layout, MutableAnyOrigin], a: LayoutTensor[a_type, a_layout, MutableAnyOrigin], b: LayoutTensor[b_type, b_layout, MutableAnyOrigin], a_offsets: LayoutTensor[uint32, a_offsets_layout, MutableAnyOrigin], expert_ids: LayoutTensor[int32, expert_ids_layout, MutableAnyOrigin], a_scales: LayoutTensor[scales_type, a_scale_layout, MutableAnyOrigin], b_scales: LayoutTensor[scales_type, b_scale_layout, MutableAnyOrigin])

Was this page helpful?