Skip to main content

Mojo function

naive_blockwise_scaled_fp8_grouped_matmul

naive_blockwise_scaled_fp8_grouped_matmul[c_type: DType, a_type: DType, b_type: DType, scales_type: DType, c_layout: Layout, a_layout: Layout, b_layout: Layout, a_scale_layout: Layout, b_scale_layout: Layout, a_offsets_layout: Layout, expert_ids_layout: Layout, *, BLOCK_DIM_N: Int = 32, BLOCK_DIM_M: Int = 16, transpose_b: Bool = True, elementwise_lambda_fn: OptionalReg[fn[DType, Int, Int](IndexList[2], SIMD[$0, $1]) capturing -> None] = OptionalReg[fn[DType, Int, Int](IndexList[2], SIMD[$0, $1]) capturing -> None]({:i1 0, 1}), s_type: DType = float32](c: LayoutTensor[c_type, c_layout, MutableAnyOrigin], a: LayoutTensor[a_type, a_layout, MutableAnyOrigin], b: LayoutTensor[b_type, b_layout, MutableAnyOrigin], a_offsets: LayoutTensor[uint32, a_offsets_layout, MutableAnyOrigin], expert_ids: LayoutTensor[int32, expert_ids_layout, MutableAnyOrigin], a_scales: LayoutTensor[scales_type, a_scale_layout, MutableAnyOrigin], b_scales: LayoutTensor[scales_type, b_scale_layout, MutableAnyOrigin], max_num_tokens_per_expert: Int, num_active_experts: Int, ctx: DeviceContext)

Was this page helpful?