Skip to content

Commit 6a41816

Browse files
committed
examples: suppress Windows compiler warnings
This commit suppresses Windows compiler warnings in the examples. With this commit and the previous that handled compiler warnings in ggml there are now warnings generated on windows.
1 parent 55c07c2 commit 6a41816

25 files changed

+147
-141
lines changed

examples/command/command.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ static std::string transcribe(
186186
}
187187
}
188188

189-
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
189+
if (whisper_full(ctx, wparams, pcmf32.data(), (int)pcmf32.size()) != 0) {
190190
return "";
191191
}
192192

@@ -368,10 +368,10 @@ static int process_command_list(struct whisper_context * ctx, audio_async &audio
368368
wparams.audio_ctx = params.audio_ctx;
369369

370370
wparams.prompt_tokens = k_tokens.data();
371-
wparams.prompt_n_tokens = k_tokens.size();
371+
wparams.prompt_n_tokens = (int)k_tokens.size();
372372

373373
// run the transformer and a single decoding pass
374-
if (whisper_full(ctx, wparams, pcmf32_cur.data(), pcmf32_cur.size()) != 0) {
374+
if (whisper_full(ctx, wparams, pcmf32_cur.data(), (int)pcmf32_cur.size()) != 0) {
375375
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
376376
break;
377377
}
@@ -415,7 +415,7 @@ static int process_command_list(struct whisper_context * ctx, audio_async &audio
415415

416416
// normalize
417417
for (auto & p : probs_id) {
418-
p.first /= psum;
418+
p.first = (float)(p.first / psum);
419419
}
420420

421421
// sort descending
@@ -474,7 +474,7 @@ static int always_prompt_transcription(struct whisper_context * ctx, audio_async
474474

475475
const std::string k_prompt = params.prompt;
476476

477-
const int k_prompt_length = get_words(k_prompt).size();
477+
const int k_prompt_length = (int)get_words(k_prompt).size();
478478

479479
fprintf(stderr, "\n");
480480
fprintf(stderr, "%s: always-prompt mode\n", __func__);
@@ -624,7 +624,7 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy
624624
//printf("len command: %.4f\n", pcmf32_cur.size() / (float) WHISPER_SAMPLE_RATE);
625625

626626
// prepend 3 second of silence
627-
pcmf32_cur.insert(pcmf32_cur.begin(), 3.0f*WHISPER_SAMPLE_RATE, 0.0f);
627+
pcmf32_cur.insert(pcmf32_cur.begin(), (size_t)(3.0f*WHISPER_SAMPLE_RATE), 0.0f);
628628

629629
// prepend the prompt audio
630630
pcmf32_cur.insert(pcmf32_cur.begin(), pcmf32_prompt.begin(), pcmf32_prompt.end());
@@ -639,7 +639,7 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy
639639
// find the prompt in the text
640640
float best_sim = 0.0f;
641641
size_t best_len = 0;
642-
for (size_t n = 0.8*k_prompt.size(); n <= 1.2*k_prompt.size(); ++n) {
642+
for (size_t n = (size_t)(0.8*k_prompt.size()); n <= 1.2*k_prompt.size(); ++n) {
643643
if (n >= txt.size()) {
644644
break;
645645
}

examples/common-sdl.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -194,9 +194,9 @@ void audio_async::get(int ms, std::vector<float> & result) {
194194

195195
result.resize(n_samples);
196196

197-
int s0 = m_audio_pos - n_samples;
197+
int s0 = (int)(m_audio_pos - n_samples);
198198
if (s0 < 0) {
199-
s0 += m_audio.size();
199+
s0 += (int)m_audio.size();
200200
}
201201

202202
if (s0 + n_samples > m_audio.size()) {

examples/common.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ float similarity(const std::string & s0, const std::string & s1);
264264
static int rgb2xterm256(int r, int g, int b) {
265265
unsigned char cube[] = {0, 0137, 0207, 0257, 0327, 0377};
266266
int av, ir, ig, ib, il, qr, qg, qb, ql;
267-
av = r * .299 + g * .587 + b * .114 + .5;
267+
av = (int)(r * .299 + g * .587 + b * .114 + .5);
268268
ql = (il = av > 238 ? 23 : (av - 3) / 10) * 10 + 8;
269269
qr = cube[(ir = UNCUBE(r))];
270270
qg = cube[(ig = UNCUBE(g))];

examples/lsp/CMakeLists.txt

+2
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,6 @@ if (WHISPER_SDL2)
66
include(DefaultTargetOptions)
77

88
target_link_libraries(${TARGET} PRIVATE common json_cpp common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
9+
10+
target_compile_definitions(lsp PRIVATE _CRT_SECURE_NO_WARNINGS)
911
endif ()

examples/lsp/lsp.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ static uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_pa
122122
std::this_thread::sleep_for(milliseconds(500 - (time_now - start_time)));
123123
time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
124124
} else if (time_now - start_time > 1000) {
125-
audio.get(time_now-start_time, pcmf32);
125+
audio.get((int)(time_now-start_time), pcmf32);
126126
size_t max_offset = pcmf32.size() - WHISPER_SAMPLE_RATE;
127127
for(size_t offset=0;offset < max_offset;offset+=WHISPER_SAMPLE_RATE/10) {
128128
std::vector<float> audio_chunk(&pcmf32[offset], &pcmf32[offset+WHISPER_SAMPLE_RATE]);
@@ -138,17 +138,17 @@ static uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_pa
138138
}
139139
}
140140
size_t window_duration = std::max((uint64_t)1000, time_now-start_time);
141-
audio.get(window_duration, pcmf32);
141+
audio.get((int)window_duration, pcmf32);
142142
while (!::vad_simple(pcmf32, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
143143
std::this_thread::sleep_for(milliseconds(100));
144144
time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
145145
window_duration = std::max((uint64_t)1000,time_now-start_time);
146-
audio.get(window_duration, pcmf32);
146+
audio.get((int)window_duration, pcmf32);
147147
}
148148
if (time_now - start_time > maxlength_ms) {
149-
audio.get(maxlength_ms, pcmf32);
149+
audio.get((int)maxlength_ms, pcmf32);
150150
} else {
151-
audio.get(time_now - start_time, pcmf32);
151+
audio.get((int)(time_now - start_time), pcmf32);
152152
}
153153

154154
return time_now;
@@ -168,7 +168,7 @@ static json unguided_transcription(struct whisper_context * ctx, audio_async &au
168168
prompt_tokens.resize(n);
169169

170170
wparams.prompt_tokens = prompt_tokens.data();
171-
wparams.prompt_n_tokens = prompt_tokens.size();
171+
wparams.prompt_n_tokens = (int)prompt_tokens.size();
172172
}
173173
wparams.print_progress = false;
174174
wparams.print_special = params.print_special;
@@ -184,7 +184,7 @@ static json unguided_transcription(struct whisper_context * ctx, audio_async &au
184184
wparams.audio_ctx = params.audio_ctx;
185185
wparams.suppress_nst = true;
186186
// run the transformer and a single decoding pass
187-
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
187+
if (whisper_full(ctx, wparams, pcmf32.data(), (int)pcmf32.size()) != 0) {
188188
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
189189
throw json{
190190
{"code", -32803},
@@ -224,12 +224,12 @@ static json guided_transcription(struct whisper_context * ctx, audio_async &audi
224224
// TODO: Do some time testing. Does an overly long prompt slow down processing?
225225
// Set up command sets/precompute prompts
226226
wparams.prompt_tokens = cs.prompt_tokens.data();
227-
wparams.prompt_n_tokens = cs.prompt_tokens.size();
227+
wparams.prompt_n_tokens = (int)cs.prompt_tokens.size();
228228
// TODO: properly expose as option
229229
wparams.suppress_nst = true;
230230

231231
// run the transformer and a single decoding pass
232-
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
232+
if (whisper_full(ctx, wparams, pcmf32.data(), (int)pcmf32.size()) != 0) {
233233
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
234234
throw json{
235235
{"code", -32803},
@@ -322,7 +322,7 @@ static json register_commandset(struct whisper_context * ctx, json jparams, std:
322322
int n = whisper_tokenize(ctx, k_prompt.c_str(), cs.prompt_tokens.data(), 1024);
323323
cs.prompt_tokens.resize(n);
324324
// prepare response
325-
int index = commandset_list.size();
325+
int index = (int)commandset_list.size();
326326
commandset_list.push_back(cs);
327327
return json{{"index",index}};
328328
}

examples/server/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_L
77

88
if (WIN32)
99
target_link_libraries(${TARGET} PRIVATE ws2_32)
10+
target_compile_definitions(${TARGET} PRIVATE _CRT_SECURE_NO_WARNINGS)
1011
endif()
1112

1213
install(TARGETS ${TARGET} RUNTIME)

examples/stream/CMakeLists.txt

+2
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,6 @@ if (WHISPER_SDL2)
77
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
88

99
install(TARGETS ${TARGET} RUNTIME)
10+
11+
target_compile_definitions(${TARGET} PRIVATE _CRT_SECURE_NO_WARNINGS)
1012
endif ()

examples/stream/stream.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -125,10 +125,10 @@ int main(int argc, char ** argv) {
125125
params.keep_ms = std::min(params.keep_ms, params.step_ms);
126126
params.length_ms = std::max(params.length_ms, params.step_ms);
127127

128-
const int n_samples_step = (1e-3*params.step_ms )*WHISPER_SAMPLE_RATE;
129-
const int n_samples_len = (1e-3*params.length_ms)*WHISPER_SAMPLE_RATE;
130-
const int n_samples_keep = (1e-3*params.keep_ms )*WHISPER_SAMPLE_RATE;
131-
const int n_samples_30s = (1e-3*30000.0 )*WHISPER_SAMPLE_RATE;
128+
const int n_samples_step = (int)(1e-3*params.step_ms )*WHISPER_SAMPLE_RATE;
129+
const int n_samples_len = (int)(1e-3*params.length_ms)*WHISPER_SAMPLE_RATE;
130+
const int n_samples_keep = (int)(1e-3*params.keep_ms )*WHISPER_SAMPLE_RATE;
131+
const int n_samples_30s = (int)(1e-3*30000.0 )*WHISPER_SAMPLE_RATE;
132132

133133
const bool use_vad = n_samples_step <= 0; // sliding window mode uses VAD
134134

@@ -265,7 +265,7 @@ int main(int argc, char ** argv) {
265265
std::this_thread::sleep_for(std::chrono::milliseconds(1));
266266
}
267267

268-
const int n_samples_new = pcmf32_new.size();
268+
const int n_samples_new = (int)pcmf32_new.size();
269269

270270
// take up to params.length_ms audio from previous iteration
271271
const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_keep + n_samples_len - n_samples_new));
@@ -328,9 +328,9 @@ int main(int argc, char ** argv) {
328328
wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc;
329329

330330
wparams.prompt_tokens = params.no_context ? nullptr : prompt_tokens.data();
331-
wparams.prompt_n_tokens = params.no_context ? 0 : prompt_tokens.size();
331+
wparams.prompt_n_tokens = params.no_context ? 0 : (int)prompt_tokens.size();
332332

333-
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
333+
if (whisper_full(ctx, wparams, pcmf32.data(), (int)pcmf32.size()) != 0) {
334334
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
335335
return 6;
336336
}
@@ -346,7 +346,7 @@ int main(int argc, char ** argv) {
346346
printf("\33[2K\r");
347347
} else {
348348
const int64_t t1 = (t_last - t_start).count()/1000000;
349-
const int64_t t0 = std::max(0.0, t1 - pcmf32.size()*1000.0/WHISPER_SAMPLE_RATE);
349+
const int64_t t0 = (int64_t)std::max(0.0, t1 - pcmf32.size()*1000.0/WHISPER_SAMPLE_RATE);
350350

351351
printf("\n");
352352
printf("### Transcription %d START | t0 = %d ms | t1 = %d ms\n", n_iter, (int) t0, (int) t1);

examples/talk-llama/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ if (WHISPER_SDL2)
3030
if(WIN32)
3131
# It requires Windows 8.1 or later for PrefetchVirtualMemory
3232
target_compile_definitions(${TARGET} PRIVATE -D_WIN32_WINNT=0x0602)
33+
target_compile_definitions(${TARGET} PRIVATE _CRT_SECURE_NO_WARNINGS)
3334
endif()
3435

3536
include(DefaultTargetOptions)

examples/talk-llama/llama-adapter.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ bool llama_adapter_cvec::init(const llama_model & model) {
6464
tensors.reserve(hparams.n_layer);
6565
tensors.push_back(nullptr); // there's never a tensor for layer 0
6666
for (size_t il = 1; il < hparams.n_layer; il++) {
67-
ggml_backend_buffer_type_t buft = model.select_buft(il);
67+
ggml_backend_buffer_type_t buft = model.select_buft((int)il);
6868
ggml_context * ctx = ctx_for_buft(buft);
6969
if (!ctx) {
7070
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
@@ -165,11 +165,11 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
165165
// check metadata
166166
{
167167
auto get_kv_str = [&](const std::string & key) -> std::string {
168-
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
168+
int id = (int)gguf_find_key(ctx_gguf.get(), key.c_str());
169169
return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id));
170170
};
171171
auto get_kv_f32 = [&](const std::string & key) -> float {
172-
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
172+
int id = (int)gguf_find_key(ctx_gguf.get(), key.c_str());
173173
return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id);
174174
};
175175
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
@@ -193,7 +193,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
193193
adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
194194
}
195195

196-
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
196+
int n_tensors = (int)gguf_get_n_tensors(ctx_gguf.get());
197197

198198
// contexts for each buffer type
199199
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;

examples/talk-llama/llama-batch.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,10 @@ void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & s
127127
}
128128
}
129129
if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
130-
ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
130+
ubatch.n_seq_tokens = ubatch.equal_seqs ? (int)length : 1;
131131
}
132-
ubatch.n_tokens += length;
133-
ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
132+
ubatch.n_tokens += (int)length;
133+
ubatch.n_seqs += ubatch.equal_seqs ? 1 : (int)length; // virtual sequences for simple splits
134134
seq.offset += length;
135135
seq.length -= length;
136136
n_tokens -= length;
@@ -286,7 +286,7 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0
286286
if (!batch.n_seq_id) {
287287
n_seq_id.resize(batch.n_tokens);
288288
for (int32_t i = 0; i < batch.n_tokens; i++) {
289-
n_seq_id[i] = seq_id_0.size();
289+
n_seq_id[i] = (llama_seq_id)seq_id_0.size();
290290
}
291291
batch.n_seq_id = n_seq_id.data();
292292
}

examples/talk-llama/llama-chat.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -571,7 +571,7 @@ int32_t llm_chat_apply_template(
571571
return -1;
572572
}
573573
dest = ss.str();
574-
return dest.size();
574+
return (int32_t)dest.size();
575575
}
576576

577577
// public interface

0 commit comments

Comments
 (0)