在深入了解了 quantization 之后,对quant有所了解之后,不论是 dynamic quant还是static quant都有所了解,但是因为看了大佬的有关量化之后,理解了trt中的W8A8的运算,理解了为什么量化之后会加速的原因,但是针对gptq的 W8A16或者W4A16 却不明白到底属于是 dynamic quant 还是 static quant,因此纠结了好久,后续通过看了gptq的源码理解到,整个过程其实是 将量化的 weight 先反量化为 fp16 然后再和 W*X再进行运算,具体源码可以参看gptq的源码。
但看完之后,又纠结了,就是觉得既然在相乘之前,有个反量化的过程,岂不是速度变慢了?为啥大家都说速度加快了呢?为什么加速了呢?还是纠结
void vecquant8matmul_cuda(
torch::Tensor vec,
torch::Tensor mat,
torch::Tensor mul,
torch::Tensor scales,
torch::Tensor zeros,
torch::Tensor g_idx
) {
int batch = vec.size(0);
int vec_height = vec.size(1);
int height = mat.size(0);
int width = mat.size(1);
int zero_width = zeros.size(1);
dim3 blocks(
(height + BLOCKHEIGHT8 - 1) / BLOCKHEIGHT8,
(width + BLOCKWIDTH - 1) / BLOCKWIDTH
);
dim3 threads(BLOCKWIDTH); // 申请资源
AT_DISPATCH_FLOATING_TYPES(
vec.type(), "vecquant8matmul_cuda", ([&] {
VecQuant8MatMulKernel<<<blocks, threads>>>( // 真正的cuda函数,包装了thread之后重新调用
vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),
scales.data<scalar_t>(), zeros.data<int>(), g_idx.data<int>(),
batch, vec_height, height, width, zero_width
);
})
);
}
template <typename scalar_t> // 类型模版
__global__ void VecQuant8MatMulKernel(
const scalar_t* __restrict__ vec, // x
const int* __restrict__ mat, // w
scalar_t* __restrict__ mul, // w*x 的结果
const scalar_t* __restrict__ scales, // w 量化过程中的 scale
const int* __restrict__ zeros, // w 量化过程中的 zero
const int* __restrict__ g_idx,
int batch,
int vec_height,
int height,
int width,
int zero_width
) {
int h = BLOCKHEIGHT8 * blockIdx.x;
int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;
__shared__ scalar_t blockvec[BLOCKWIDTH];
int i = width * h + w;
int g_h = h * 4;
int k;
unsigned int g;
scalar_t w_tmp;
int z_w = w / 4;
int z_mod = (w % 4) * 8;
float weight[BLOCKWIDTH];
for (k = 0; k < BLOCKWIDTH; ++k){
int k_w = (k / 4);
int k_bit = (k % 4) * 8;
g = as_int(g_idx[g_h + k]);
scalar_t scale = scales[g * width + w]; // 获取 scale fp16类型
scalar_t zero = scalar_t((((as_unsigned(zeros[g * zero_width + z_w]) >> z_mod) & 0xFF) + 1) & 0x0f);
w_tmp = ((as_unsigned(mat[i + (k_w * width)]) >> k_bit) & 0xFF);
weight[k] = scale * (w_tmp - zero); // 反量化
}
scalar_t res;
for (int b = 0; b < batch; ++b){
res = 0;
blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];
__syncthreads();
for (k = 0; k < BLOCKWIDTH; ++k){
res += weight[k] * blockvec[k]; // 相乘
}
atomicAdd(&mul[b * width + w], res); // 赋值相乘结果
__syncthreads();
}
}
标签:__,BLOCKWIDTH,gptq,W4A16,int,W8A16,width,scalar,vec
From: https://www.cnblogs.com/chenenenen/p/18367849