Skip to content

Commit 77c1d3a

Browse files
authored
Merge branch 'ggml-org:master' into master
2 parents 9f7c0d2 + 9a390c4 commit 77c1d3a

File tree

23 files changed

+432
-329
lines changed

23 files changed

+432
-329
lines changed

common/arg.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2451,6 +2451,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
24512451
}
24522452
}
24532453
));
2454+
add_opt(common_arg(
2455+
{"--no-op-offload"},
2456+
string_format("disable offloading host tensor operations to device (default: %s)", params.no_op_offload ? "true" : "false"),
2457+
[](common_params & params) {
2458+
params.no_op_offload = true;
2459+
}
2460+
));
24542461
add_opt(common_arg(
24552462
{"--lora"}, "FNAME",
24562463
"path to LoRA adapter (can be repeated to use multiple adapters)",

common/common.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,6 +1113,7 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11131113
cparams.offload_kqv = !params.no_kv_offload;
11141114
cparams.flash_attn = params.flash_attn;
11151115
cparams.no_perf = params.no_perf;
1116+
cparams.op_offload = !params.no_op_offload;
11161117

11171118
if (params.reranking) {
11181119
cparams.embeddings = true;

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,7 @@ struct common_params {
336336
bool no_kv_offload = false; // disable KV offloading
337337
bool warmup = true; // warmup run
338338
bool check_tensors = false; // validate tensor data
339+
bool no_op_offload = false; // globally disable offload host tensor operations to device
339340

340341
bool single_turn = false; // single turn chat conversation
341342

ggml/include/ggml-backend.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ extern "C" {
248248
// preferrably to run on the same backend as the buffer
249249
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
250250
251-
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
251+
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false, true);
252252
253253
// initialize buffers from a max size graph (optional)
254254
reserve_graph = build_graph(sched, max_batch_size);
@@ -289,7 +289,7 @@ extern "C" {
289289
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
290290

291291
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
292-
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
292+
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
293293
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
294294

295295
// Initialize backend buffers from a measure graph

ggml/src/ggml-backend.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,8 @@ struct ggml_backend_sched {
674674
char * context_buffer;
675675
size_t context_buffer_size;
676676

677+
bool op_offload;
678+
677679
int debug;
678680
};
679681

@@ -766,7 +768,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
766768
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
767769
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
768770
// check if a backend with higher prio wants to offload the op
769-
if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
771+
if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
770772
for (int b = 0; b < src_backend_id; b++) {
771773
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
772774
SET_CAUSE(tensor, "1.off");
@@ -1452,7 +1454,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
14521454
ggml_backend_buffer_type_t * bufts,
14531455
int n_backends,
14541456
size_t graph_size,
1455-
bool parallel) {
1457+
bool parallel,
1458+
bool op_offload) {
14561459
GGML_ASSERT(n_backends > 0);
14571460
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
14581461
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
@@ -1497,6 +1500,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
14971500
}
14981501

14991502
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
1503+
sched->op_offload = op_offload;
15001504

15011505
ggml_backend_sched_reset(sched);
15021506

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1909,13 +1909,19 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
19091909
static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
19101910
const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft);
19111911

1912+
// If src0 is a temporary compute buffer it may have some padding that needs to be cleared for mul_mat_vec_q or mul_mat_q.
1913+
// But if src0 is also a view of another tensor then this cannot be done safely because it may overwrite valid tensor data.
1914+
// Therefore, in such cases use cuBLAS.
1915+
const bool bad_padding_clear = ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE
1916+
&& ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src;
1917+
19121918
bool use_mul_mat_vec = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16)
19131919
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
19141920
&& src0->ne[0] % 2 == 0 && src1->ne[1] == 1;
1915-
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
1921+
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear
19161922
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
19171923
&& src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
1918-
bool use_mul_mat_q = ggml_is_quantized(src0->type)
1924+
bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear
19191925
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
19201926

19211927
bool any_gpus_with_slow_fp16 = false;

ggml/src/ggml-cuda/mmq.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,11 @@ void ggml_cuda_mul_mat_q(
9191

9292
// If src0 is a temporary compute buffer, clear any potential padding.
9393
if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
94-
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
95-
GGML_ASSERT(!src0->view_src);
9694
const size_t size_data = ggml_nbytes(src0);
9795
const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0);
9896
if (size_alloc > size_data) {
97+
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
98+
GGML_ASSERT(!src0->view_src);
9999
CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream));
100100
}
101101
}

ggml/src/ggml-cuda/mmvq.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -515,11 +515,11 @@ void ggml_cuda_mul_mat_vec_q(
515515

516516
// If src0 is a temporary compute buffer, clear any potential padding.
517517
if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
518-
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
519-
GGML_ASSERT(!src0->view_src);
520518
const size_t size_data = ggml_nbytes(src0);
521519
const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0);
522520
if (size_alloc > size_data) {
521+
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
522+
GGML_ASSERT(!src0->view_src);
523523
CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream));
524524
}
525525
}

gguf-py/gguf/constants.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,9 @@ class MODEL_TENSOR(IntEnum):
483483
V_ENC_EMBD_PATCH = auto()
484484
V_ENC_EMBD_POS = auto()
485485
V_ENC_ATTN_Q = auto()
486+
V_ENC_ATTN_Q_NORM = auto()
486487
V_ENC_ATTN_K = auto()
488+
V_ENC_ATTN_K_NORM = auto()
487489
V_ENC_ATTN_V = auto()
488490
V_ENC_INPUT_NORM = auto()
489491
V_ENC_OUTPUT = auto()
@@ -742,7 +744,9 @@ class MODEL_TENSOR(IntEnum):
742744
MODEL_TENSOR.V_ENC_EMBD_PATCH: "v.patch_embd",
743745
MODEL_TENSOR.V_ENC_EMBD_POS: "v.position_embd",
744746
MODEL_TENSOR.V_ENC_ATTN_Q: "v.blk.{bid}.attn_q",
747+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM: "v.blk.{bid}.attn_q_norm",
745748
MODEL_TENSOR.V_ENC_ATTN_K: "v.blk.{bid}.attn_k",
749+
MODEL_TENSOR.V_ENC_ATTN_K_NORM: "v.blk.{bid}.attn_k_norm",
746750
MODEL_TENSOR.V_ENC_ATTN_V: "v.blk.{bid}.attn_v",
747751
MODEL_TENSOR.V_ENC_INPUT_NORM: "v.blk.{bid}.ln1",
748752
MODEL_TENSOR.V_ENC_OUTPUT: "v.blk.{bid}.attn_out",
@@ -782,7 +786,9 @@ class MODEL_TENSOR(IntEnum):
782786
MODEL_TENSOR.V_ENC_EMBD_PATCH,
783787
MODEL_TENSOR.V_ENC_EMBD_POS,
784788
MODEL_TENSOR.V_ENC_ATTN_Q,
789+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM,
785790
MODEL_TENSOR.V_ENC_ATTN_K,
791+
MODEL_TENSOR.V_ENC_ATTN_K_NORM,
786792
MODEL_TENSOR.V_ENC_ATTN_V,
787793
MODEL_TENSOR.V_ENC_INPUT_NORM,
788794
MODEL_TENSOR.V_ENC_OUTPUT,

gguf-py/gguf/tensor_mapping.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -938,6 +938,10 @@ class TensorNameMap:
938938
"visual.blocks.{bid}.attn.q", # qwen2vl, generated
939939
),
940940

941+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM: (
942+
"vision_tower.vision_model.encoder.layers.{bid}.attn.q_norm", # InternVL
943+
),
944+
941945
MODEL_TENSOR.V_ENC_ATTN_K: (
942946
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.k_proj",
943947
"vpm.encoder.layers.{bid}.self_attn.k_proj",
@@ -946,6 +950,10 @@ class TensorNameMap:
946950
"visual.blocks.{bid}.attn.k", # qwen2vl, generated
947951
),
948952

953+
MODEL_TENSOR.V_ENC_ATTN_K_NORM: (
954+
"vision_tower.vision_model.encoder.layers.{bid}.attn.k_norm", # InternVL
955+
),
956+
949957
MODEL_TENSOR.V_ENC_ATTN_V: (
950958
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.v_proj",
951959
"vpm.encoder.layers.{bid}.self_attn.v_proj",

0 commit comments

Comments
 (0)