Skip to content

Commit bdff33e

Browse files
committed
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # README.md # ci/run.sh # docs/build.md # examples/CMakeLists.txt # examples/parallel/parallel.cpp # ggml/CMakeLists.txt # ggml/src/CMakeLists.txt # scripts/server-bench.py # src/llama-kv-cache-unified.cpp # tests/test-backend-ops.cpp # tools/batched-bench/batched-bench.cpp # tools/server/README.md
2 parents f0564f9 + 21c0217 commit bdff33e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+3116
-497
lines changed

common/arg.cpp

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1466,6 +1466,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
14661466
params.swa_full = true;
14671467
}
14681468
).set_env("LLAMA_ARG_SWA_FULL"));
1469+
add_opt(common_arg(
1470+
{"--kv-unified", "-kvu"},
1471+
string_format("use single unified KV buffer for the KV cache of all sequences (default: %s)\n"
1472+
"[(more info)](https://github.com/ggml-org/llama.cpp/pull/14363)", params.kv_unified ? "true" : "false"),
1473+
[](common_params & params) {
1474+
params.kv_unified = true;
1475+
}
1476+
).set_env("LLAMA_ARG_KV_SPLIT"));
14691477
add_opt(common_arg(
14701478
{"--no-context-shift"},
14711479
string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
@@ -3425,5 +3433,34 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34253433
}
34263434
).set_examples({LLAMA_EXAMPLE_SERVER}));
34273435

3436+
// diffusion parameters
3437+
add_opt(common_arg(
3438+
{ "--diffusion-steps" }, "N",
3439+
string_format("number of diffusion steps (default: %d)", params.diffusion.steps),
3440+
[](common_params & params, int value) { params.diffusion.steps = value; }
3441+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3442+
add_opt(common_arg(
3443+
{ "--diffusion-eps" }, "F",
3444+
string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps),
3445+
[](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); }
3446+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3447+
add_opt(common_arg(
3448+
{ "--diffusion-algorithm" }, "N",
3449+
string_format("diffusion algorithm: 0=ORIGIN, 1=MASKGIT_PLUS, 2=TOPK_MARGIN, 3=ENTROPY (default: %d)",
3450+
params.diffusion.algorithm),
3451+
[](common_params & params, int value) { params.diffusion.algorithm = value; }
3452+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3453+
add_opt(common_arg(
3454+
{ "--diffusion-alg-temp" }, "F",
3455+
string_format("algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
3456+
[](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); }
3457+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3458+
add_opt(common_arg(
3459+
{ "--diffusion-visual" },
3460+
string_format("enable visual diffusion mode (show progressive generation) (default: %s)",
3461+
params.diffusion.visual_mode ? "true" : "false"),
3462+
[](common_params & params) { params.diffusion.visual_mode = true; }
3463+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3464+
34283465
return ctx_arg;
34293466
}

common/common.cpp

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1013,15 +1013,21 @@ struct common_init_result common_init_from_params(common_params & params) {
10131013
params.sampling.ignore_eos = false;
10141014
}
10151015

1016-
if (params.sampling.ignore_eos) {
1017-
for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) {
1018-
if (llama_vocab_is_eog(vocab, i)) {
1019-
LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY);
1020-
params.sampling.logit_bias.push_back({i, -INFINITY});
1021-
}
1016+
// initialize once
1017+
for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) {
1018+
if (llama_vocab_is_eog(vocab, i)) {
1019+
LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY);
1020+
params.sampling.logit_bias_eog.push_back({i, -INFINITY});
10221021
}
10231022
}
10241023

1024+
if (params.sampling.ignore_eos) {
1025+
// add EOG biases to the active set of logit biases
1026+
params.sampling.logit_bias.insert(
1027+
params.sampling.logit_bias.end(),
1028+
params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end());
1029+
}
1030+
10251031
if (params.sampling.penalty_last_n == -1) {
10261032
LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
10271033
params.sampling.penalty_last_n = llama_n_ctx(lctx);
@@ -1165,6 +1171,7 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11651171
cparams.no_perf = params.no_perf;
11661172
cparams.op_offload = !params.no_op_offload;
11671173
cparams.swa_full = params.swa_full;
1174+
cparams.kv_unified = params.kv_unified;
11681175

11691176
cparams.type_k = params.cache_type_k;
11701177
cparams.type_v = params.cache_type_v;

common/common.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ enum llama_example {
7777
LLAMA_EXAMPLE_LOOKUP,
7878
LLAMA_EXAMPLE_PARALLEL,
7979
LLAMA_EXAMPLE_TTS,
80+
LLAMA_EXAMPLE_DIFFUSION,
8081

8182
LLAMA_EXAMPLE_COUNT,
8283
};
@@ -173,7 +174,8 @@ struct common_params_sampling {
173174
std::vector<common_grammar_trigger> grammar_triggers; // optional triggers (for lazy grammars)
174175
std::set<llama_token> preserved_tokens;
175176

176-
std::vector<llama_logit_bias> logit_bias; // logit biases to apply
177+
std::vector<llama_logit_bias> logit_bias; // logit biases to apply
178+
std::vector<llama_logit_bias> logit_bias_eog; // pre-calculated logit biases for EOG tokens
177179

178180
// print the parameters into a string
179181
std::string print() const;
@@ -213,6 +215,14 @@ struct common_params_vocoder {
213215
bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy // NOLINT
214216
};
215217

218+
struct common_params_diffusion {
219+
int32_t steps = 64; // number of diffusion steps
220+
float eps = 1e-3f; // epsilon for timesteps
221+
int32_t algorithm = 0; // diffusion algorithm (0=ORIGIN, 1=MASKGIT_PLUS, 2=TOPK_MARGIN, 3=ENTROPY)
222+
float alg_temp = 0.0f; // algorithm temperature
223+
bool visual_mode = false; // show progressive diffusion on screen
224+
};
225+
216226
enum common_reasoning_format {
217227
COMMON_REASONING_FORMAT_NONE,
218228
COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in <think> tags in stream mode
@@ -264,6 +274,7 @@ struct common_params {
264274
struct common_params_sampling sampling;
265275
struct common_params_speculative speculative;
266276
struct common_params_vocoder vocoder;
277+
struct common_params_diffusion diffusion;
267278

268279
struct common_params_model model;
269280

@@ -326,6 +337,7 @@ struct common_params {
326337
bool no_perf = false; // disable performance metrics
327338
bool ctx_shift = true; // context shift on inifinite text generation
328339
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
340+
bool kv_unified = false; // enable unified KV cache
329341

330342
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
331343
bool use_mmap = true; // use mmap for faster loads

convert_hf_to_gguf.py

Lines changed: 100 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -669,6 +669,36 @@ def get_vocab_base_pre(self, tokenizer) -> str:
669669
# NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
670670
# or pull the latest version of the model from Huggingface
671671
# don't edit the hashes manually!
672+
if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
673+
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
674+
res = "chatglm-bpe"
675+
if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
676+
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
677+
res = "chatglm-bpe"
678+
if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
679+
# ref: https://huggingface.co/THUDM/glm-4-9b-hf
680+
res = "glm4"
681+
if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
682+
# ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
683+
res = "minerva-7b"
684+
if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
685+
# ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
686+
res = "hunyuan"
687+
if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
688+
# ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
689+
res = "falcon-h1"
690+
if chkhsh == "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86":
691+
# ref: https://huggingface.co/tiiuae/Falcon-H1-1B-Base
692+
res = "falcon-h1"
693+
if chkhsh == "3eda48b4c4dc7de733d1a8b3e3b4a85243dbbf704da2ee9d42c6beced8897896":
694+
# ref: https://huggingface.co/tiiuae/Falcon-H1-7B-Base
695+
res = "falcon-h1"
696+
if chkhsh == "48f8e02c0359c0bbdd82f26909171fac1c18a457bb47573ed1fe3bbb2c1cfd4b":
697+
# ref: https://huggingface.co/tiiuae/Falcon-H1-34B-Base
698+
res = "falcon-h1"
699+
if chkhsh == "81212dc7cdb7e0c1074ca62c5aeab0d43c9f52b8a737be7b12a777c953027890":
700+
# ref: https://huggingface.co/moonshotai/Kimi-K2-Base
701+
res = "kimi-k2"
672702
if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
673703
# ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B
674704
res = "llama-bpe"
@@ -804,45 +834,15 @@ def get_vocab_base_pre(self, tokenizer) -> str:
804834
if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
805835
# ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
806836
res = "seed-coder"
807-
if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
808-
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
809-
res = "chatglm-bpe"
810-
if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
811-
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
812-
res = "chatglm-bpe"
813-
if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
814-
# ref: https://huggingface.co/THUDM/glm-4-9b-hf
815-
res = "glm4"
816-
if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
817-
# ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
818-
res = "minerva-7b"
819-
if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
820-
# ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
821-
res = "hunyuan"
822837
if chkhsh == "b0a6b1c0bd5998ebd9df08611efde34a4ff03faed45ae09c43e6b31ebd4b94cf":
823838
# ref: https://huggingface.co/skt/A.X-4.0
824839
res = "a.x-4.0"
825-
if chkhsh == "a6b57017d60e6edb4d88ecc2845188e0eb333a70357e45dcc9b53964a73bbae6":
826-
# ref: https://huggingface.co/tiiuae/Falcon-H1-0.5B-Base
827-
res = "falcon-h1"
828-
if chkhsh == "60476e1243776c4fb1b993dbd7a5f15ac22f83c80afdf425fa5ae01c8d44ef86":
829-
# ref: https://huggingface.co/tiiuae/Falcon-H1-1B-Base
830-
res = "falcon-h1"
831-
if chkhsh == "3eda48b4c4dc7de733d1a8b3e3b4a85243dbbf704da2ee9d42c6beced8897896":
832-
# ref: https://huggingface.co/tiiuae/Falcon-H1-7B-Base
833-
res = "falcon-h1"
834-
if chkhsh == "48f8e02c0359c0bbdd82f26909171fac1c18a457bb47573ed1fe3bbb2c1cfd4b":
835-
# ref: https://huggingface.co/tiiuae/Falcon-H1-34B-Base
836-
res = "falcon-h1"
837840
if chkhsh == "f6791d196f87ce6b56a7d234be618e0d58f8cda3549416635b2bebcd22cd95c4":
838841
# ref: https://huggingface.co/K-intelligence/Midm-2.0-Base-Instruct
839842
res = "midm-2.0"
840843
if chkhsh == "169bf0296a13c4d9b7672313f749eb36501d931022de052aad6e36f2bf34dd51":
841844
# ref: https://huggingface.co/LiquidAI/LFM2-Tokenizer
842845
res = "lfm2"
843-
if chkhsh == "81212dc7cdb7e0c1074ca62c5aeab0d43c9f52b8a737be7b12a777c953027890":
844-
# ref: https://huggingface.co/moonshotai/Kimi-K2-Base
845-
res = "kimi-k2"
846846

847847
if res is None:
848848
logger.warning("\n")
@@ -2778,6 +2778,76 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
27782778
yield from super().modify_tensors(data_torch, name, bid)
27792779

27802780

2781+
@ModelBase.register("DreamModel")
2782+
class DreamModel(TextModel):
2783+
model_arch = gguf.MODEL_ARCH.DREAM
2784+
2785+
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
2786+
tokens: list[str] = []
2787+
toktypes: list[int] = []
2788+
2789+
from transformers import AutoTokenizer
2790+
tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
2791+
2792+
vocab_dict = tokenizer.get_vocab()
2793+
vocab_size = self.hparams.get("vocab_size", len(vocab_dict))
2794+
assert max(vocab_dict.values()) < vocab_size
2795+
2796+
tokpre = self.get_vocab_base_pre(tokenizer)
2797+
2798+
reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in vocab_dict.items()}
2799+
added_vocab = tokenizer.get_added_vocab()
2800+
2801+
for i in range(vocab_size):
2802+
if i not in reverse_vocab:
2803+
tokens.append(f"[PAD{i}]")
2804+
toktypes.append(gguf.TokenType.UNUSED)
2805+
elif reverse_vocab[i] in added_vocab:
2806+
tokens.append(reverse_vocab[i])
2807+
# Check if it's a special token - treat special tokens as CONTROL tokens
2808+
if hasattr(tokenizer, 'added_tokens_decoder') and i in tokenizer.added_tokens_decoder:
2809+
if tokenizer.added_tokens_decoder[i].special:
2810+
toktypes.append(gguf.TokenType.CONTROL)
2811+
else:
2812+
toktypes.append(gguf.TokenType.USER_DEFINED)
2813+
else:
2814+
# Fallback: treat all added vocab as control tokens for special tokens like <|im_start|>
2815+
toktypes.append(gguf.TokenType.CONTROL)
2816+
else:
2817+
tokens.append(reverse_vocab[i])
2818+
toktypes.append(gguf.TokenType.NORMAL)
2819+
2820+
return tokens, toktypes, tokpre
2821+
2822+
def set_vocab(self):
2823+
try:
2824+
self._set_vocab_sentencepiece()
2825+
except FileNotFoundError:
2826+
self._set_vocab_gpt2()
2827+
2828+
def set_gguf_parameters(self):
2829+
super().set_gguf_parameters()
2830+
self._try_set_pooling_type()
2831+
2832+
# Dream models use non-causal attention for diffusion
2833+
self.gguf_writer.add_causal_attention(False)
2834+
# Handle RoPE scaling similar to Qwen2
2835+
rope_scaling = self.hparams.get("rope_scaling") or {}
2836+
if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
2837+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
2838+
self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
2839+
self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
2840+
2841+
# Add Dream-specific parameters
2842+
mask_token_id = self.hparams.get("mask_token_id")
2843+
if mask_token_id is not None:
2844+
self.gguf_writer.add_mask_token_id(mask_token_id)
2845+
2846+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
2847+
# Dream model tensors should be mapped directly since it's the base model
2848+
yield from super().modify_tensors(data_torch, name, bid)
2849+
2850+
27812851
@ModelBase.register("Ernie4_5_ForCausalLM")
27822852
class Ernie4_5Model(TextModel):
27832853
model_arch = gguf.MODEL_ARCH.ERNIE4_5

convert_hf_to_gguf_update.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -232,19 +232,14 @@ def get_existing_models(convert_py):
232232
# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function:
233233

234234
src_ifs = ""
235-
for model in [*all_models, *pre_computed_hashes]:
235+
for model in [*pre_computed_hashes, *all_models]:
236236
name = model["name"]
237237
tokt = model["tokt"]
238238
chkhsh = model.get("chkhsh")
239239

240240
if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
241241
continue
242242

243-
# Skip if the tokenizer folder does not exist or there are other download issues previously
244-
if not os.path.exists(f"models/tokenizers/{name}"):
245-
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
246-
continue
247-
248243
# create the tokenizer
249244
if chkhsh is not None:
250245
# if the model has a pre-computed hash, use it
@@ -254,6 +249,12 @@ def get_existing_models(convert_py):
254249
chkhsh = existing_models[name]
255250
else:
256251
# otherwise, compute the hash of the tokenizer
252+
253+
# Skip if the tokenizer folder does not exist or there are other download issues previously
254+
if not os.path.exists(f"models/tokenizers/{name}"):
255+
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
256+
continue
257+
257258
try:
258259
logger.info(f"Loading tokenizer from {f'models/tokenizers/{name}'}...")
259260
if name == "t5":

examples/diffusion/CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
set(TARGET llama-diffusion-cli)
2+
add_executable(${TARGET} diffusion-cli.cpp)
3+
install(TARGETS ${TARGET} RUNTIME)
4+
target_link_libraries(${TARGET} PRIVATE llama common ${CMAKE_THREAD_LIBS_INIT})
5+
target_compile_features(${TARGET} PRIVATE cxx_std_17)

0 commit comments

Comments
 (0)