@@ -42,6 +42,34 @@ function(llama_test target)
42
42
set_property (TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL} )
43
43
endfunction ()
44
44
45
+ function (llama_test_cmd target )
46
+ include (CMakeParseArguments )
47
+ set (options )
48
+ set (oneValueArgs NAME LABEL WORKING_DIRECTORY )
49
+ set (multiValueArgs ARGS )
50
+ cmake_parse_arguments (LLAMA_TEST "${options} " "${oneValueArgs} " "${multiValueArgs} " ${ARGN} )
51
+
52
+ if (NOT DEFINED LLAMA_TEST_LABEL )
53
+ set (LLAMA_TEST_LABEL "main" )
54
+ endif ()
55
+ if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY )
56
+ set (LLAMA_TEST_WORKING_DIRECTORY . )
57
+ endif ()
58
+ if (DEFINED LLAMA_TEST_NAME )
59
+ set (TEST_NAME ${LLAMA_TEST_NAME} )
60
+ else ()
61
+ set (TEST_NAME ${target} )
62
+ endif ()
63
+
64
+ add_test (
65
+ NAME ${TEST_NAME}
66
+ WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
67
+ COMMAND ${target}
68
+ ${LLAMA_TEST_ARGS} )
69
+
70
+ set_property (TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL} )
71
+ endfunction ()
72
+
45
73
# Builds and runs a test source file.
46
74
# Optional args:
47
75
# - NAME: name of the executable & test target (defaults to the source file name without extension)
@@ -97,8 +125,14 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE
97
125
llama_test (test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-refact.gguf )
98
126
llama_test (test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-starcoder.gguf )
99
127
100
- # TODO: missing HF tokenizer for this model in convert_hf_to_gguf_update.py, see https://github.com/ggml-org/llama.cpp/pull/13847
101
- # llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf)
128
+ if (NOT WIN32 )
129
+ llama_test_cmd (
130
+ ${CMAKE_CURRENT_SOURCE_DIR} /test-tokenizers-repo.sh
131
+ NAME test -tokenizers-ggml-vocabs
132
+ WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
133
+ ARGS https://huggingface.co/ggml-org/vocabs ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocabs
134
+ )
135
+ endif ()
102
136
103
137
if (LLAMA_LLGUIDANCE )
104
138
llama_build_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /../models/ggml-vocab-llama-bpe.gguf )
0 commit comments