Skip to content

Commit e1b5c21

Browse files
ggerganovhazelnutcloud
authored andcommitted
ggml : remove old quantization functions (ggml-org#5942)
* ggml : remove old quantization functions ggml-ci * ggml : simplify ggml_quantize_chunk ggml-ci * ggml : restrict correctness ggml-ci * ggml : remove hist data from the quantization API ggml-ci * tests : remove hist usage in test-backend-ops ggml-ci * vulkan : remove hist and fix typo
1 parent 303515a commit e1b5c21

File tree

9 files changed

+131
-568
lines changed

9 files changed

+131
-568
lines changed

examples/benchmark/benchmark-matmult.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -189,12 +189,10 @@ int main(int argc, char ** argv) {
189189

190190
int32_t nelements = sizex*sizey;
191191

192-
std::vector<int64_t> hist_cur(1 << 4, 0);
193-
194192
// Set up a the benchmark matrices
195193
// printf("Creating new tensor q11 & Running quantize\n");
196194
struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
197-
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], hist_cur.data(), nullptr);
195+
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], nullptr);
198196

199197
// Set up a the compute graph
200198
// printf("Creating new tensor q31\n");
@@ -207,7 +205,7 @@ int main(int argc, char ** argv) {
207205
// Set up a second graph computation to make sure we override the CPU cache lines
208206
// printf("Creating new tensor q12 & Running quantize\n");
209207
struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
210-
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], hist_cur.data(), nullptr);
208+
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], nullptr);
211209

212210
// printf("Creating new tensor q32\n");
213211
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);

examples/llava/clip.cpp

Lines changed: 1 addition & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1862,7 +1862,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
18621862

18631863
std::vector<uint8_t> work(512);
18641864
std::vector<float> conv_buf(512);
1865-
std::vector<int64_t> hist_all(1 << 4, 0);
18661865
size_t total_size_org = 0;
18671866
size_t total_size_new = 0;
18681867

@@ -1917,48 +1916,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
19171916
}
19181917
new_data = work.data();
19191918

1920-
std::vector<int64_t> hist_cur(1 << 4, 0);
1921-
1922-
switch (new_type) {
1923-
case GGML_TYPE_Q4_0: {
1924-
new_size = ggml_quantize_q4_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1925-
} break;
1926-
case GGML_TYPE_Q4_1: {
1927-
new_size = ggml_quantize_q4_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1928-
} break;
1929-
case GGML_TYPE_Q5_0: {
1930-
new_size = ggml_quantize_q5_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1931-
} break;
1932-
case GGML_TYPE_Q5_1: {
1933-
new_size = ggml_quantize_q5_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1934-
} break;
1935-
case GGML_TYPE_Q8_0: {
1936-
new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1937-
} break;
1938-
case GGML_TYPE_Q2_K: {
1939-
new_size = ggml_quantize_q2_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1940-
} break;
1941-
case GGML_TYPE_Q3_K: {
1942-
new_size = ggml_quantize_q3_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1943-
} break;
1944-
case GGML_TYPE_Q4_K: {
1945-
new_size = ggml_quantize_q4_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1946-
} break;
1947-
case GGML_TYPE_Q5_K: {
1948-
new_size = ggml_quantize_q5_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1949-
} break;
1950-
case GGML_TYPE_Q6_K: {
1951-
new_size = ggml_quantize_q6_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
1952-
} break;
1953-
default: {
1954-
fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type);
1955-
return false;
1956-
}
1957-
}
1958-
1959-
for (size_t j = 0; j < hist_cur.size(); ++j) {
1960-
hist_all[j] += hist_cur[j];
1961-
}
1919+
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
19621920
} else {
19631921
new_type = cur->type;
19641922
new_data = cur->data;
@@ -1993,17 +1951,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
19931951
{
19941952
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
19951953
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
1996-
1997-
int64_t sum_all = 0;
1998-
for (size_t i = 0; i < hist_all.size(); ++i) {
1999-
sum_all += hist_all[i];
2000-
}
2001-
2002-
printf("%s: hist: ", __func__);
2003-
for (size_t i = 0; i < hist_all.size(); ++i) {
2004-
printf("%5.3f ", hist_all[i] / (float)sum_all);
2005-
}
2006-
printf("\n");
20071954
}
20081955

20091956
return true;

0 commit comments

Comments
 (0)