Skip to content

Commit 5a9753b

Browse files
committed
llama : remove sampling from llama_context
ggml-ci
1 parent 97ab664 commit 5a9753b

File tree

25 files changed

+75
-137
lines changed

25 files changed

+75
-137
lines changed

common/common.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,10 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
264264
params.kv_overrides.back().key[0] = 0;
265265
}
266266

267+
if (params.sparams.seed == LLAMA_DEFAULT_SEED) {
268+
params.sparams.seed = time(NULL);
269+
}
270+
267271
return true;
268272
}
269273

@@ -294,8 +298,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
294298

295299
if (arg == "-s" || arg == "--seed") {
296300
CHECK_ARG
297-
// TODO: this is temporary, in the future the sampling state will be moved fully to llama_sampling_context.
298-
params.seed = std::stoul(argv[i]);
299301
sparams.seed = std::stoul(argv[i]);
300302
return true;
301303
}
@@ -1414,7 +1416,6 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
14141416
options.push_back({ "*", " --verbose-prompt", "print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false" });
14151417
options.push_back({ "*", " --no-display-prompt", "don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false" });
14161418
options.push_back({ "*", "-co, --color", "colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false" });
1417-
options.push_back({ "*", "-s, --seed SEED", "RNG seed (default: %d, use random seed for < 0)", params.seed });
14181419
options.push_back({ "*", "-t, --threads N", "number of threads to use during generation (default: %d)", params.n_threads });
14191420
options.push_back({ "*", "-tb, --threads-batch N", "number of threads to use during batch and prompt processing (default: same as --threads)" });
14201421
options.push_back({ "speculative", "-td, --threads-draft N", "number of threads to use during generation (default: same as --threads)" });
@@ -1465,6 +1466,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
14651466
" --spm-infill", "use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: %s)", params.spm_infill ? "enabled" : "disabled" });
14661467

14671468
options.push_back({ "sampling" });
1469+
options.push_back({ "*", "-s, --seed SEED", "RNG seed (default: %d, use random seed for < 0)", sparams.seed });
14681470
options.push_back({ "*", " --samplers SAMPLERS", "samplers that will be used for generation in the order, separated by \';\'\n"
14691471
"(default: %s)", sampler_type_names.c_str() });
14701472
options.push_back({ "*", " --sampling-seq SEQUENCE",
@@ -2237,7 +2239,6 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
22372239
cparams.n_ubatch = params.n_ubatch;
22382240
cparams.n_threads = params.n_threads;
22392241
cparams.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
2240-
cparams.seed = params.seed;
22412242
cparams.logits_all = params.logits_all;
22422243
cparams.embeddings = params.embedding;
22432244
cparams.rope_scaling_type = params.rope_scaling_type;
@@ -3247,7 +3248,6 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
32473248

32483249
fprintf(stream, "rope_freq_base: %f # default: 10000.0\n", params.rope_freq_base);
32493250
fprintf(stream, "rope_freq_scale: %f # default: 1.0\n", params.rope_freq_scale);
3250-
fprintf(stream, "seed: %u # default: -1 (random seed)\n", params.seed);
32513251
fprintf(stream, "simple_io: %s # default: false\n", params.simple_io ? "true" : "false");
32523252
fprintf(stream, "cont_batching: %s # default: false\n", params.cont_batching ? "true" : "false");
32533253
fprintf(stream, "flash_attn: %s # default: false\n", params.flash_attn ? "true" : "false");

common/common.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,6 @@ enum dimre_method {
6868
};
6969

7070
struct gpt_params {
71-
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
72-
7371
int32_t n_threads = cpu_get_num_math();
7472
int32_t n_threads_draft = -1;
7573
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)

common/sampling.cpp

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,10 @@
33
#include <random>
44

55
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params, const struct llama_model * model) {
6-
auto result = llama_sampling_init(params, llama_sampling_init(model, params.grammar.c_str(), "root"));
7-
8-
result->owned = true;
9-
10-
return result;
11-
}
12-
13-
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params, struct llama_sampling * smpl) {
146
struct llama_sampling_context * result = new llama_sampling_context();
157

168
result->params = params;
17-
result->owned = false;
18-
result->smpl = smpl;
9+
result->smpl = llama_sampling_init(model, params.grammar.c_str(), "root");
1910

2011
result->prev.resize(params.n_prev);
2112

@@ -27,9 +18,7 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
2718
}
2819

2920
void llama_sampling_free(struct llama_sampling_context * ctx) {
30-
if (ctx->owned) {
31-
llama_sampling_free(ctx->smpl);
32-
}
21+
llama_sampling_free(ctx->smpl);
3322

3423
delete ctx;
3524
}

common/sampling.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,6 @@ struct llama_sampling_context {
7171
// mirostat sampler state
7272
float mirostat_mu;
7373

74-
bool owned;
75-
7674
llama_sampling * smpl;
7775

7876
// TODO: replace with ring-buffer
@@ -86,7 +84,6 @@ struct llama_sampling_context {
8684

8785
// Create a new sampling context instance.
8886
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params, const struct llama_model * model);
89-
struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_params & params, struct llama_sampling * smpl);
9087

9188
void llama_sampling_free(struct llama_sampling_context * ctx);
9289

examples/batched.swift/Sources/main.swift

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ guard let model = llama_load_model_from_file(modelPath.cString(using: .utf8), mo
2727
print("Failed to load model")
2828
exit(1)
2929
}
30-
3130
defer {
3231
llama_free_model(model)
3332
}
@@ -37,24 +36,29 @@ var tokens = tokenize(text: prompt, add_bos: true)
3736
let n_kv_req = UInt32(tokens.count) + UInt32((n_len - Int(tokens.count)) * n_parallel)
3837

3938
var context_params = llama_context_default_params()
40-
context_params.seed = 1234
4139
context_params.n_ctx = n_kv_req
4240
context_params.n_batch = UInt32(max(n_len, n_parallel))
4341
context_params.n_threads = 8
4442
context_params.n_threads_batch = 8
4543

4644
let context = llama_new_context_with_model(model, context_params)
47-
let smpl = llama_get_sampling(context)
48-
4945
guard context != nil else {
5046
print("Failed to initialize context")
5147
exit(1)
5248
}
53-
5449
defer {
5550
llama_free(context)
5651
}
5752

53+
let smpl = llama_sampling_init(model, nil, nil)
54+
guard smpl != nil else {
55+
print("Failed to initialize sampling")
56+
exit(1)
57+
}
58+
defer {
59+
llama_sampling_free(smpl)
60+
}
61+
5862
let n_ctx = llama_n_ctx(context)
5963

6064
print("\nn_len = \(n_len), n_ctx = \(n_ctx), n_batch = \(context_params.n_batch), n_parallel = \(n_parallel), n_kv_req = \(n_kv_req)\n")

examples/batched/batched.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ int main(int argc, char ** argv) {
6464
ctx_params.n_batch = std::max(n_predict, n_parallel);
6565

6666
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
67-
llama_sampling * smpl = llama_get_sampling(ctx);
67+
llama_sampling * smpl = llama_sampling_init(model, nullptr, nullptr);
6868

6969
if (ctx == NULL) {
7070
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);

examples/embedding/embedding.cpp

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,13 +68,7 @@ int main(int argc, char ** argv) {
6868

6969
print_build_info();
7070

71-
if (params.seed == LLAMA_DEFAULT_SEED) {
72-
params.seed = time(NULL);
73-
}
74-
75-
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
76-
77-
std::mt19937 rng(params.seed);
71+
LOG_TEE("%s: seed = %u\n", __func__, params.sparams.seed);
7872

7973
llama_backend_init();
8074
llama_numa_init(params.numa);

examples/eval-callback/eval-callback.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,6 @@ int main(int argc, char ** argv) {
151151

152152
print_build_info();
153153

154-
std::mt19937 rng(params.seed);
155-
156154
llama_backend_init();
157155
llama_numa_init(params.numa);
158156

examples/gritlm/gritlm.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,10 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
9292
return result;
9393
}
9494

95-
static std::string generate(llama_context * ctx, const std::string & prompt, bool stream) {
95+
static std::string generate(llama_context * ctx, llama_sampling * smpl, const std::string & prompt, bool stream) {
9696
std::string result;
9797

9898
const llama_model * model = llama_get_model(ctx);
99-
llama_sampling * smpl = llama_get_sampling(ctx);
10099
llama_token eos_token = llama_token_eos(model);
101100

102101
llama_kv_cache_clear(ctx);
@@ -117,7 +116,7 @@ static std::string generate(llama_context * ctx, const std::string & prompt, boo
117116
inputs.clear();
118117

119118
llama_decode(ctx, bat);
120-
auto logits = llama_get_logits_ith(ctx, bat.n_tokens - 1);
119+
auto * logits = llama_get_logits_ith(ctx, bat.n_tokens - 1);
121120

122121
auto candidates = std::vector<llama_token_data>(llama_n_vocab(model));
123122
auto n_candidates = (int32_t)candidates.size();
@@ -173,6 +172,8 @@ int main(int argc, char * argv[]) {
173172
// create generation context
174173
llama_context * ctx = llama_new_context_with_model(model, cparams);
175174

175+
llama_sampling * smpl = llama_sampling_init(model, nullptr, nullptr);
176+
176177
// ### Embedding/Representation ###
177178
// samples taken from: https://github.com/ContextualAI/gritlm#basic
178179
{
@@ -209,9 +210,10 @@ int main(int argc, char * argv[]) {
209210
// GritLM models are not finetuned with system prompts, as you can just include system-like instructions together with your user instruction
210211
{
211212
const std::string prompt = "<|user|>\nPlease write me a poem about my recent hike of Mt. Fuji at midnight in the style of Shakespeare.\n<|assistant|>\n";
212-
std::string response = generate(ctx, prompt, true);
213+
std::string response = generate(ctx, smpl, prompt, true);
213214
}
214215

216+
llama_sampling_free(smpl);
215217
llama_free(ctx);
216218
llama_free_model(model);
217219
llama_backend_free();

examples/infill/infill.cpp

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -156,16 +156,9 @@ int main(int argc, char ** argv) {
156156
LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
157157
}
158158

159-
LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
160-
LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);
159+
print_build_info();
161160

162-
if (params.seed == LLAMA_DEFAULT_SEED) {
163-
params.seed = time(NULL);
164-
}
165-
166-
LOG_TEE("%s: seed = %u\n", __func__, params.seed);
167-
168-
std::mt19937 rng(params.seed);
161+
LOG_TEE("%s: seed = %u\n", __func__, params.sparams.seed);
169162

170163
LOG("%s: llama backend init\n", __func__);
171164
llama_backend_init();
@@ -351,7 +344,7 @@ int main(int argc, char ** argv) {
351344

352345
std::vector<llama_token> embd;
353346

354-
ctx_sampling = llama_sampling_init(sparams, llama_get_sampling(ctx));
347+
ctx_sampling = llama_sampling_init(sparams, model);
355348

356349
while (n_remain != 0 || params.interactive) {
357350
// predict

0 commit comments

Comments
 (0)