Bendi新闻
>
只需百行代码,让H100提速30%,斯坦福开源全新AI加速框架
只需百行代码,让H100提速30%,斯坦福开源全新AI加速框架
7月前
提高 GPU 利用率,就是这么简单。
简单,ThunderKittens 写起来非常简单。 可扩展性,如果用户需要 ThunderKittens 无法提供的功能,可以进行功能扩展。 速度快。
80 GB HBM3,带宽为 3 TB/s(实际上带宽会少一些); 50 MB 二级缓存,带宽 12 TB/s,在 GPU 上分成两个 25MB 的部分,通过 crossbar 连接; 132 个流多处理器 (SM,streaming multiprocessors)。
WGMMA 指令是必需的,但使用起来也非常令人恼火; 共享内存实际上并没有那么快,并且需要非常小心; 地址生成成本很高; 占用率仍然有帮助,寄存器通常是关键资源。
寄存器 tile—— 寄存器文件中的 2D 张量。 寄存器向量 —— 寄存器文件中的 1D 张量。 共享 tile—— 共享内存中的 2D 张量。 共享向量 —— 共享内存中的 1D 张量。
一元运算,如 exp 二元运算,如 mul 行 / 列操作,如 row_sum
using namespace kittens; // this kernel only handles headdim=64 for simplicity. Also n should be a multiple of 256 here.
__global__ void attend_ker64(int n, const bf16* __restrict__ __q__, const bf16* __restrict__ __k__, const bf16* __restrict__ __v__, bf16* __o__) {
auto warpid = kittens::warpid();
auto block_start = blockIdx.x*(n*64);
const bf16 *_q = __q__ + block_start, *_k = __k__ + block_start, *_v = __v__ + block_start;
bf16 *_o = __o__ + block_start;
extern __shared__ alignment_dummy __shm[]; // this is the CUDA shared memory
shared_allocator al((int*)&__shm[0]);
// K and V live in shared memory -- this is about all that will fit.
st_bf_1x4<ducks::st_layout::swizzle> (&k_smem)[NUM_WORKERS] = al.allocate<st_bf_1x4<ducks::st_layout::swizzle>, NUM_WORKERS>();
st_bf_1x4<ducks::st_layout::swizzle> (&v_smem)[NUM_WORKERS] = al.allocate<st_bf_1x4<ducks::st_layout::swizzle>, NUM_WORKERS>();
// Initialize all of the register tiles.
rt_bf_1x4<> q_reg, k_reg, v_reg; // v_reg need to be swapped into col_l
rt_fl_1x1<> att_block;
rt_bf_1x1<> att_block_mma;
rt_fl_1x4<> o_reg;
rt_fl_1x1<>::col_vec max_vec_last, max_vec; // these are column vectors for the attention block
rt_fl_1x1<>::col_vec norm_vec_last, norm_vec; // these are column vectors for the attention block
int qo_blocks = n / (q_reg.rows*NUM_WORKERS), kv_blocks = n / (q_reg.rows*NUM_WORKERS);
for(auto q_blk = 0; q_blk < qo_blocks; q_blk++) {
// each warp loads its own Q tile of 16x64, and then multiplies by 1/sqrt(d)
load(q_reg, _q + (q_blk*NUM_WORKERS + warpid)*q_reg.num_elements, q_reg.cols);
mul(q_reg, q_reg, __float2bfloat16(0.125f)); // temperature adjustment
// zero flash attention L, M, and O registers.
neg_infty(max_vec); // zero registers for the Q chunk
zero(norm_vec);
zero(o_reg);
// iterate over k, v for these q's that have been loaded
for(auto kv_idx = 0; kv_idx < kv_blocks; kv_idx++) {
// each warp loads its own chunk of k, v into shared memory
load(v_smem[warpid], _v + (kv_idx*NUM_WORKERS + warpid)*q_reg.num_elements, q_reg.cols);
load(k_smem[warpid], _k + (kv_idx*NUM_WORKERS + warpid)*q_reg.num_elements, q_reg.cols);
__syncthreads(); // we need to make sure all memory is loaded before we can begin the compute phase
// now each warp goes through all of the subtiles, loads them, and then does the flash attention internal alg.
for(int subtile = 0; subtile < NUM_WORKERS; subtile++) {
load(k_reg, k_smem[subtile]); // load k from shared into registers
zero(att_block); // zero 16x16 attention tile
mma_ABt(att_block, q_reg, k_reg, att_block); // [email protected]
copy(norm_vec_last, norm_vec);
copy(max_vec_last, max_vec);
row_max(max_vec, att_block, max_vec); // accumulate onto the max_vec
sub_row(att_block, att_block, max_vec); // subtract max from attention -- now all <=0
exp(att_block, att_block); // exponentiate the block in-place.
sub(max_vec_last, max_vec_last, max_vec); // subtract new max from old max to find the new normalization.
exp(max_vec_last, max_vec_last); // exponentiate this vector -- this is what we need to normalize by.
mul(norm_vec, norm_vec, max_vec_last); // and the norm vec is now normalized.
row_sum(norm_vec, att_block, norm_vec); // accumulate the new attention block onto the now-rescaled norm_vec
div_row(att_block, att_block, norm_vec); // now the attention block is correctly normalized
mul(norm_vec_last, norm_vec_last, max_vec_last); // normalize the previous norm vec according to the new max
div(norm_vec_last, norm_vec_last, norm_vec); // normalize the previous norm vec according to the new norm
copy(att_block_mma, att_block); // convert to bf16 for mma_AB
load(v_reg, v_smem[subtile]); // load v from shared into registers.
rt_bf_1x4<ducks::rt_layout::col> &v_reg_col = swap_layout_inplace(v_reg); // this is a reference and the call has invalidated v_reg
mul_row(o_reg, o_reg, norm_vec_last); // normalize o_reg in advance of mma_AB'ing onto it
mma_AB(o_reg, att_block_mma, v_reg_col, o_reg); // mfma onto o_reg with the local attention@V matmul.
}
__syncthreads(); // we need to make sure all warps are done before we can start loading the next kv chunk
}
store(_o + (q_blk*NUM_WORKERS + warpid)*q_reg.num_elements, o_reg, q_reg.cols); // write out o. compiler has an issue with register usage if d is made constexpr q_reg.rows :/
}
}
template<int D>
__global__ __launch_bounds__((NUM_WORKERS)*kittens::WARP_THREADS, 2)
void fwd_attend_ker_dim(int N, const CUtensorMap* tma_q, const CUtensorMap* tma_k, const CUtensorMap* tma_v, CUtensorMap* tma_o) {
extern __shared__ int __shm[]; // this is the CUDA shared memory
tma_swizzle_allocator al((int*)&__shm[0]);
constexpr int tile_width = fwd_attend_ker_tile_dims<D>::tile_width; // constants
constexpr int qo_height = fwd_attend_ker_tile_dims<D>::qo_height;
constexpr int kv_height = fwd_attend_ker_tile_dims<D>::kv_height;
st_bf<qo_height, tile_width, layout_q> (&q_smem) [NUM_WARPGROUPS] = al.allocate<st_bf<qo_height, tile_width, layout_q>, NUM_WARPGROUPS>();
st_bf<kv_height, tile_width, layout_k> (&k_smem)[2][NUM_WORKERS_KV] = al.allocate<st_bf<kv_height, tile_width, layout_k>, 2, NUM_WORKERS_KV>();
st_bf<kv_height, tile_width, layout_v> (&v_smem)[2][NUM_WORKERS_KV] = al.allocate<st_bf<kv_height, tile_width, layout_v>, 2, NUM_WORKERS_KV>();
int tic = 0, toc = 1;
rt_fl<1, kv_height> att_block;
rt_bf<1, kv_height> att_block_mma;
rt_fl<1, qo_height> o_prev;
col_vec<rt_fl<1, kv_height>> max_vec_last, max_vec;
col_vec<rt_fl<1, kv_height>> norm_vec_last, norm_vec;
int warpid = kittens::warpid();
int warpgroupid = warpid/kittens::WARPGROUP_WARPS;
int kv_blocks = N / (NUM_WORKERS_KV*k_smem[0][0].rows);
__shared__ uint64_t qsmem_barrier, kvsmem_barrier;//, vsmem_barrier;
int q_phasebit = 0;
int kv_phasebit = 0;
if (threadIdx.x == 0) {
tma::init_barrier<st_bf<qo_height, tile_width, layout_q>, NUM_WARPGROUPS>(qsmem_barrier, 1);
tma::init_barrier<st_bf<kv_height, tile_width, layout_k>, NUM_WORKERS_KV*2>(kvsmem_barrier, 1);
}
if (warpid == 0) {
for (int wg = 0; wg < NUM_WORKERS/kittens::WARPGROUP_WARPS; wg++) { // load q
int tile_idx = (blockIdx.y * NUM_WARPGROUPS * gridDim.x) + (blockIdx.x * NUM_WARPGROUPS) + wg;
tma::load_async((q_smem[wg]), tma_q, qsmem_barrier, tile_idx);
}
for (int w = 0; w < NUM_WORKERS_KV; w++) { // load k, v
int tile_idx = (blockIdx.y * NUM_WORKERS_KV * kv_blocks) + (0 * NUM_WORKERS_KV) + w;
tma::load_async((k_smem[tic][w]), tma_k, kvsmem_barrier, tile_idx);
tma::load_async((v_smem[tic][w]), tma_v, kvsmem_barrier, tile_idx);
}
}
neg_infty(max_vec); // zero registers for the Q chunk
zero(norm_vec);
zero(o_prev);
__syncthreads();
tma::arrive_and_wait(qsmem_barrier, q_phasebit);
q_phasebit ^= 1;
if constexpr (D == 64) { warpgroup::mul(q_smem[warpgroupid], q_smem[warpgroupid], __float2bfloat16(0.125f)); }
else { warpgroup::mul(q_smem[warpgroupid], q_smem[warpgroupid], __float2bfloat16(0.08838834764f)); }
for (auto kv_idx = 0; kv_idx < kv_blocks; kv_idx++, tic ^= 1, toc ^= 1) {
tma::arrive_and_wait(kvsmem_barrier, kv_phasebit);
kv_phasebit ^= 1;
__syncthreads();
if (warpid == 0) {
tma::set_bytes(kvsmem_barrier, 2 * NUM_WORKERS_KV * k_smem[0][0].num_elements * sizeof(bf16));
if (kv_idx + 1 < kv_blocks) {
for (int w = 0; w < NUM_WORKERS_KV; w++) {
int tile_idx = (blockIdx.y * NUM_WORKERS_KV * kv_blocks) + ((kv_idx + 1) * NUM_WORKERS_KV) + w;
tma::load_async((k_smem[toc][w]), tma_k, kvsmem_barrier, tile_idx);
tma::load_async((v_smem[toc][w]), tma_v, kvsmem_barrier, tile_idx);
}
}
}
warpgroup::mma_fence(att_block);
warpgroup::mm_ABt(att_block, q_smem[warpgroupid], k_smem[tic][0]);
warpgroup::mma_commit_group();
copy(norm_vec_last, norm_vec);
copy(max_vec_last, max_vec);
warpgroup::mma_async_wait();
row_max(max_vec, att_block, max_vec); // accumulate onto the max_vec
sub_row(att_block, att_block, max_vec);
exp(att_block, att_block);
sub(max_vec_last, max_vec_last, max_vec);
exp(max_vec_last, max_vec_last);
mul(norm_vec, norm_vec, max_vec_last);
row_sum(norm_vec, att_block, norm_vec); // accumulate onto the norm_vec
div_row(att_block, att_block, norm_vec);
mul(norm_vec_last, norm_vec_last, max_vec_last);
div(norm_vec_last, norm_vec_last, norm_vec);
copy(att_block_mma, att_block); // convert to bf16 for mma
mul_row(o_prev, o_prev, norm_vec_last); // normalize o_prev in advance of mma'ing onto it
warpgroup::mma_fence(o_prev);
warpgroup::mma_AB(o_prev, att_block_mma, v_smem[tic][0]);
warpgroup::mma_commit_group();
}
auto (*o_smem) = reinterpret_cast<st_bf<qo_height, tile_width, layout_o>(*)>(q_smem); // reuse q memory
warpgroup::store(o_smem[warpgroupid], o_prev);
__syncthreads();
if (warpid % 4 == 0) { // store o
int tile_idx = (blockIdx.y * NUM_WARPGROUPS * gridDim.x) + (blockIdx.x * NUM_WARPGROUPS) + warpgroupid;
tma::store_async(tma_o, (o_smem[warpgroupid]), tile_idx);
tma::store_commit_group();
}
tma::store_async_wait();
}
H100 SXM 上各种配置的 FlashAttention-2(Pytorch)与 ThunderKittens 的比较。
使用 ThunderKittens 可以非常快地实现线性注意力。
© THE END
转载请联系本公众号获得授权
投稿或寻求报道:[email protected]
微信扫码关注该文公众号作者
来源:机器之心
相关新闻
传微软组建新团队开发更小、更便宜AI模型;Pika联手北大斯坦福开源文生图框架;传和硕独家拿下Ai Pin代工订单丨AIGC日报扩散模型更懂复杂提示词!Pika北大斯坦福开源新框架,利用LLM提升理解力北大快手攻克复杂视频生成难题!新框架轻松组合各种细节,代码将开源小洞不补,大洞吃苦:西交、麦马开源全新「拖动式编辑」框架&数据集红杉资本入局,马斯克的AI公司接近达成60亿美元融资;苹果发布基于开源训练和推理框架的语言模型OpenELM丨AIGC日报字节豆包全新图像Tokenizer:生成图像最低只需32个token,最高提速410倍可「自主进化」的Agent?首个端到端智能体符号化训练框架开源了开源框架中的责任链模式实践腾讯:终于补齐了Muse系列数字人开源框架,感谢阿里!阿里通义实验室薄列峰:从兵马俑跳“科目三”到照片唱歌,四大框架让AI生成的人物活起来丨GenAICon 2024微软TaskWeaver开源框架:携手数据分析与行业定制,打造顶级Agent解决方案中科大/华为诺亚出手!芯片性能≠布局评分,EDA物理设计框架全面开源现场Live震撼!OmAgent框架强势开源!行业应用已全面开花苹果、AMD和高通GPU被爆存在漏洞!只需十行代码即可窃取数据,数百万台苹果设备或将受到影响ICLR 2024 | 机器人领域首个开源视觉-语言操作大模型!RoboFlamingo框架激发开源VLMs更大潜能机器人领域首个开源视觉-语言操作大模型,RoboFlamingo框架激发开源VLMs更大潜能ICLR 2024 | Adobe提出DMV3D:3D生成只需30秒!让文本、图像都动起来的新方法!滴滴开源Flutter混合开发框架Unify小米大模型提效新框架:训练最高提速34%,推理最高提速52%!Kaldi之父合作出品复刻Sora的通用视频生成能力,开源多智能体框架Mora来了5秒完成3D生成,真香合成数据集已开源,上交港中文新框架超越Instant3D开放开源!蚂蚁集团浙江大学联合发布开源大模型知识抽取框架OneKE寡姐带货国风Polo衫,马斯克穿牛仔走红毯!虚拟试衣新框架火了,只需两张图30秒即生成模块化重构LLaVA,替换组件只需添加1-2个文件,开源TinyLLaVA Factory来了