Skip to content

Commit 6db6601

Browse files
committed
iq1_kt: basics
iq1_kt: CUDA dequantize Testing with LlaMA-3.1-8B-Instruct, we get almost the same PPL as iq2_xxs, so about 0.2 bpw fewer bits for the same quality. iq1_kt: CUDA MMQ iq1_kt: CUDA MMVQ iq1_kt: AVX2 GEMM/GEMV iq1_kt: convert/repack to q8_0_r8 (AVX2) iq1_kt: slightly faster GEMV 18.6 t/s -> 19.4 t/s iq1_kt: NEON GEMM/GEMV Pathetic as usual iq1_kt: slightly faster NEON - still pathetic iq1_kt: tiny bit better GEMV on NEON iq1_kt: convert/repack to q8_0_r8 (NEON) iq1_kt: very slightly faster convert/repack to q8_0_r8 on NEON Adding frgotten file Update stable-diffusion.h Update IKL files, including IQ1_KT Update constants.py Author : @ikawrakow
1 parent b6f70e4 commit 6db6601

File tree

23 files changed

+947
-14
lines changed

23 files changed

+947
-14
lines changed

ggml/include/ggml.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,7 @@ extern "C" {
434434
GGML_TYPE_IQ4_KT = 155,
435435
GGML_TYPE_IQ3_KS = 156,
436436
GGML_TYPE_IQ2_KL = 157,
437+
GGML_TYPE_IQ1_KT = 158,
437438

438439
GGML_TYPE_IQ3_KS_V1 = 196,
439440

@@ -525,6 +526,7 @@ extern "C" {
525526
GGML_FTYPE_MOSTLY_IQ4_KT = 144, // except 1d tensors
526527
GGML_FTYPE_MOSTLY_IQ3_KS = 145, // except 1d tensors
527528
GGML_FTYPE_MOSTLY_IQ2_KL = 146, // except 1d tensors
529+
GGML_FTYPE_MOSTLY_IQ1_KT = 147, // except 1d tensors
528530

529531
GGML_FTYPE_MOSTLY_IQ3_KS_V1 = 185, // except 1d tensors
530532
//

ggml/src/ggml-common.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -728,6 +728,13 @@ typedef struct {
728728
} block_iq2_ks;
729729
static_assert(sizeof(block_iq2_ks) == sizeof(uint16_t) + QK_K/64 + QK_K/4, "wrong iq2_ks block size/padding");
730730

731+
typedef struct {
732+
uint8_t sh[QK_K/32]; // 4-bit scales + 13th bits for groups of 8
733+
uint8_t ql[QK_K/8]; // low 8 bits for groups of 8
734+
uint8_t qh[QK_K/16]; // high 4 bits for groups of 8
735+
} block_iq1_kt;
736+
static_assert(sizeof(block_iq1_kt) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_kt block size/padding");
737+
731738
typedef struct {
732739
uint8_t scales[QK_K/64];
733740
uint8_t ql[QK_K/4];

ggml/src/ggml-cpu/ggml-cpu.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -741,6 +741,16 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
741741
.vec_dot_type = GGML_TYPE_Q8_K,
742742
.nrows = 1,
743743
},
744+
[GGML_TYPE_IQ1_KT] = {
745+
.from_float = quantize_row_iq1_kt,
746+
.vec_dot = vec_dot_iq1_kt_q8_k,
747+
#if defined __AVX2__
748+
.vec_dot_type = GGML_TYPE_Q8_2_X4,
749+
#else
750+
.vec_dot_type = GGML_TYPE_Q8_0_X4,
751+
#endif
752+
.nrows = 1,
753+
},
744754
[GGML_TYPE_IQ2_KT] = {
745755
.from_float = quantize_row_iq2_kt,
746756
.vec_dot = vec_dot_iq2_kt_q8_k,

ggml/src/ggml-cpu/ops.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1780,6 +1780,7 @@ void ggml_compute_forward_add(
17801780
case GGML_TYPE_IQ2_K:
17811781
case GGML_TYPE_IQ2_K_R4:
17821782
case GGML_TYPE_IQ2_KS:
1783+
case GGML_TYPE_IQ1_KT:
17831784
case GGML_TYPE_IQ2_KT:
17841785
case GGML_TYPE_IQ3_KT:
17851786
case GGML_TYPE_IQ4_KT:
@@ -2264,6 +2265,7 @@ void ggml_compute_forward_add1(
22642265
case GGML_TYPE_IQ2_K:
22652266
case GGML_TYPE_IQ2_K_R4:
22662267
case GGML_TYPE_IQ2_KS:
2268+
case GGML_TYPE_IQ1_KT:
22672269
case GGML_TYPE_IQ2_KT:
22682270
case GGML_TYPE_IQ3_KT:
22692271
case GGML_TYPE_IQ4_KT:
@@ -2445,6 +2447,7 @@ void ggml_compute_forward_acc(
24452447
case GGML_TYPE_IQ2_K:
24462448
case GGML_TYPE_IQ2_K_R4:
24472449
case GGML_TYPE_IQ2_KS:
2450+
case GGML_TYPE_IQ1_KT:
24482451
case GGML_TYPE_IQ2_KT:
24492452
case GGML_TYPE_IQ3_KT:
24502453
case GGML_TYPE_IQ4_KT:
@@ -5444,6 +5447,7 @@ void ggml_compute_forward_out_prod(
54445447
case GGML_TYPE_IQ2_K:
54455448
case GGML_TYPE_IQ2_K_R4:
54465449
case GGML_TYPE_IQ2_KS:
5450+
case GGML_TYPE_IQ1_KT:
54475451
case GGML_TYPE_IQ2_KT:
54485452
case GGML_TYPE_IQ3_KT:
54495453
case GGML_TYPE_IQ4_KT:
@@ -5966,6 +5970,7 @@ void ggml_compute_forward_set(
59665970
case GGML_TYPE_IQ2_K:
59675971
case GGML_TYPE_IQ2_K_R4:
59685972
case GGML_TYPE_IQ2_KS:
5973+
case GGML_TYPE_IQ1_KT:
59695974
case GGML_TYPE_IQ2_KT:
59705975
case GGML_TYPE_IQ3_KT:
59715976
case GGML_TYPE_IQ4_KT:
@@ -6281,6 +6286,7 @@ void ggml_compute_forward_get_rows(
62816286
case GGML_TYPE_IQ2_K:
62826287
case GGML_TYPE_IQ2_K_R4:
62836288
case GGML_TYPE_IQ2_KS:
6289+
case GGML_TYPE_IQ1_KT:
62846290
case GGML_TYPE_IQ2_KT:
62856291
case GGML_TYPE_IQ3_KT:
62866292
case GGML_TYPE_IQ4_KT:
@@ -7038,6 +7044,7 @@ void ggml_compute_forward_clamp(
70387044
case GGML_TYPE_IQ2_K:
70397045
case GGML_TYPE_IQ2_K_R4:
70407046
case GGML_TYPE_IQ2_KS:
7047+
case GGML_TYPE_IQ1_KT:
70417048
case GGML_TYPE_IQ2_KT:
70427049
case GGML_TYPE_IQ3_KT:
70437050
case GGML_TYPE_IQ4_KT:

ggml/src/ggml-cuda/common.cuh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -716,6 +716,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KS> {
716716
static constexpr int qi = QI4_XS;
717717
};
718718

719+
template<>
720+
struct ggml_cuda_type_traits<GGML_TYPE_IQ1_KT> {
721+
static constexpr int qk = QK_K;
722+
static constexpr int qr = QR4_XS;
723+
static constexpr int qi = QI4_XS;
724+
};
725+
719726
template<>
720727
struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KT> {
721728
static constexpr int qk = QK_K;

ggml/src/ggml-cuda/convert.cu

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -379,6 +379,26 @@ float __device__ __forceinline__ trellis_next(uint32_t& val) {
379379
return (float)(h[0]+h[1]);
380380
}
381381

382+
template<typename dst_t>
383+
static __global__ void dequantize_block_iq1_kt(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
384+
385+
int64_t ii = blockIdx.x;
386+
int64_t row = (QK_K * ii) / n_per_row;
387+
const char * cx = (const char *)vx + row * row_size;
388+
float scale = *(const float *)cx;
389+
const block_iq1_kt * x = (const block_iq1_kt *)(cx + sizeof(float));
390+
const int64_t i = ii - (row*n_per_row)/QK_K;
391+
392+
const int64_t tid = threadIdx.x;
393+
const int64_t ib = tid; // 0...31
394+
dst_t * y = yy + ii*QK_K + 8*ib;
395+
uint32_t idx = (x[i].ql[ib] | ((x[i].qh[ib%16] << (8 - 4*(ib/16))) & 0xf00) | ((x[i].sh[ib/4] << (8 - (ib%4))) & 0x1000)) + 4096;
396+
const float dl = scale * iq4k_values[x[i].sh[ib/4] & 0xf];
397+
for (int j = 0; j < 8; ++j) {
398+
y[j] = dl * trellis_next_int(idx);
399+
}
400+
}
401+
382402
template<typename dst_t>
383403
static __global__ void dequantize_block_iq2_kt(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
384404

@@ -1585,6 +1605,13 @@ static void dequantize_row_tq2_0_cuda(const void * vx, dst_t * y, const int64_t
15851605
dequantize_block_tq2_0<<<nb, 64, 0, stream>>>(vx, y);
15861606
}
15871607

1608+
template<typename dst_t>
1609+
static void dequantize_row_iq1_kt_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
1610+
const int64_t k = nrows * n_per_row;
1611+
const int nb = k / QK_K;
1612+
dequantize_block_iq1_kt<<<nb, 32, 0, stream>>>(vx, y, n_per_row, ggml_row_size(GGML_TYPE_IQ1_KT, n_per_row));
1613+
}
1614+
15881615
template<typename dst_t>
15891616
static void dequantize_row_iq2_kt_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
15901617
const int64_t k = nrows * n_per_row;
@@ -1995,6 +2022,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
19952022
return dequantize_row_tq2_0_cuda;
19962023
case GGML_TYPE_IQ2_XXS:
19972024
return dequantize_row_iq2_xxs_cuda;
2025+
case GGML_TYPE_IQ1_KT:
2026+
return dequantize_row_iq1_kt_cuda;
19982027
case GGML_TYPE_IQ2_KT:
19992028
return dequantize_row_iq2_kt_cuda;
20002029
case GGML_TYPE_IQ3_KT:
@@ -2099,6 +2128,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
20992128
return dequantize_row_tq2_0_cuda;
21002129
case GGML_TYPE_IQ2_XXS:
21012130
return dequantize_row_iq2_xxs_cuda;
2131+
case GGML_TYPE_IQ1_KT:
2132+
return dequantize_row_iq1_kt_cuda;
21022133
case GGML_TYPE_IQ2_KT:
21032134
return dequantize_row_iq2_kt_cuda;
21042135
case GGML_TYPE_IQ3_KT:

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3468,6 +3468,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
34683468
case GGML_TYPE_IQ2_K:
34693469
case GGML_TYPE_IQ2_KS:
34703470
case GGML_TYPE_IQ3_KS_V1:
3471+
case GGML_TYPE_IQ1_KT:
34713472
case GGML_TYPE_IQ2_KT:
34723473
case GGML_TYPE_IQ3_KT:
34733474
case GGML_TYPE_IQ4_KT:

ggml/src/ggml-cuda/iqk_mmvq.cu

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -472,6 +472,39 @@ __device__ __forceinline__ void vec_dot_iq4_kt_q8_1(
472472
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
473473
}
474474

475+
__device__ __forceinline__ void vec_dot_iq1_kt_q8_1(
476+
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
477+
478+
constexpr uint32_t ka = 0xCBAC1FED;
479+
constexpr uint32_t km = 0x3f3f3f3f;
480+
481+
float scale = *(const float *)vbq;
482+
const block_iq1_kt * bq1 = (const block_iq1_kt *)((const char *)vbq + sizeof(float)) + kbx;
483+
484+
// iqs is 0...28
485+
const int ib32 = iqs/4;
486+
const int32_t * q8 = (const int *)bq8_1[ib32].qs;
487+
const int ls = iq4k_values[bq1->sh[ib32] & 0xf];
488+
const float dl = scale * ls;
489+
int sumi = 0;
490+
for (int j = 0; j < 4; ++j) {
491+
uint32_t val = bq1->ql[4*ib32+j] + 4096 + ((bq1->qh[4*(ib32%4)+j] << (8 - 4*(ib32/4))) & 0xf00) + ((bq1->sh[ib32] << (8 - j)) & 0x1000);
492+
int v4 = 0;
493+
for (int k = 0; k < 4; ++k) {
494+
val *= ka;
495+
v4 |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k;
496+
}
497+
sumi = ggml_cuda_dp4a(v4, q8[2*j+0], sumi);
498+
v4 = 0;
499+
for (int k = 0; k < 4; ++k) {
500+
val *= ka;
501+
v4 |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k;
502+
}
503+
sumi = ggml_cuda_dp4a(v4, q8[2*j+1], sumi);
504+
}
505+
*result += dl * __low2float(bq8_1[ib32].ds) * sumi;
506+
}
507+
475508
__device__ __forceinline__ void vec_dot_iq2_kt_q8_1(
476509
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
477510

@@ -1419,6 +1452,13 @@ void mul_mat_vec_iq4_kt_q8_1_cuda(
14191452
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KT, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_kt_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
14201453
}
14211454

1455+
void mul_mat_vec_iq1_kt_q8_1_cuda(
1456+
const void * vx, const void * vy, float * dst,
1457+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
1458+
1459+
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ1_KT, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq1_kt_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
1460+
}
1461+
14221462
void mul_mat_vec_iq2_kt_q8_1_cuda(
14231463
const void * vx, const void * vy, float * dst,
14241464
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {

ggml/src/ggml-cuda/iqk_mmvq.cuh

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ void mul_mat_vec_iq5_ks_q8_1_cuda(
4848
const void * vx, const void * vy, float * dst,
4949
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
5050

51-
void mul_mat_vec_iq4_kt_q8_1_cuda(
51+
void mul_mat_vec_iq1_kt_q8_1_cuda(
5252
const void * vx, const void * vy, float * dst,
5353
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
5454

@@ -59,6 +59,10 @@ void mul_mat_vec_iq2_kt_q8_1_cuda(
5959
void mul_mat_vec_iq3_kt_q8_1_cuda(
6060
const void * vx, const void * vy, float * dst,
6161
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
62+
63+
void mul_mat_vec_iq4_kt_q8_1_cuda(
64+
const void * vx, const void * vy, float * dst,
65+
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
6266

6367
void mul_mat_vec_iq1_bn_q8_1_cuda(
6468
const void * vx, const void * vy, float * dst,
@@ -105,4 +109,4 @@ void mul_mat_vec_iq1_m_r4_q8_1_cuda(
105109
const void * vx, const void * vy, float * dst, const char * ids_data,
106110
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
107111
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
108-
*/
112+
*/

ggml/src/ggml-cuda/mmq.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ void ggml_cuda_op_mul_mat_q(
113113
case GGML_TYPE_IQ4_KT:
114114
mul_mat_q_case<GGML_TYPE_IQ4_KT>(ctx, args, stream);
115115
break;
116+
case GGML_TYPE_IQ1_KT:
117+
mul_mat_q_case<GGML_TYPE_IQ1_KT>(ctx, args, stream);
118+
break;
116119
case GGML_TYPE_IQ2_KT:
117120
mul_mat_q_case<GGML_TYPE_IQ2_KT>(ctx, args, stream);
118121
break;
@@ -220,6 +223,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
220223
case GGML_TYPE_IQ5_KS:
221224
case GGML_TYPE_IQ5_KS_R4:
222225
case GGML_TYPE_IQ2_KS:
226+
case GGML_TYPE_IQ1_KT:
223227
case GGML_TYPE_IQ2_KT:
224228
case GGML_TYPE_IQ3_KT:
225229
case GGML_TYPE_IQ4_KT:

0 commit comments

Comments
 (0)