Skip to main content
Log in

Mojo function

generic_flash_attention_kv_cache_padded

generic_flash_attention_kv_cache_padded[collection_t: KVCollectionT, type: DType, //, *, target: StringSlice[StaticConstantOrigin], mask_str: StringSlice[StaticConstantOrigin], score_mod_str: StringSlice[StaticConstantOrigin], local_window_size: Int = -1, num_heads: Int = -1](q: NDBuffer[type, 4, origin, shape, strides], kv_collection: collection_t, layer_idx: SIMD[uint32, 1], valid_lengths: ManagedTensorSlice[io_spec, static_spec=static_spec], scale: SIMD[float32, 1], output: NDBuffer[type, 4, origin, shape, strides], context: DeviceContextPtr)