@@ -111,31 +111,31 @@ endfunction()
111
111
# build test-tokenizer-0 target once and add many tests
112
112
llama_build (test -tokenizer-0.cpp )
113
113
114
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-bert-bge.gguf )
115
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-command-r.gguf )
116
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-deepseek-coder.gguf )
117
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-deepseek-llm.gguf )
118
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-falcon.gguf )
119
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-gpt-2.gguf )
120
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-bpe.gguf )
121
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-spm.gguf )
122
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-mpt.gguf )
123
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-phi-3.gguf )
124
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-qwen2.gguf )
125
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-refact.gguf )
126
- llama_test (test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-starcoder.gguf )
114
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-bert-bge.gguf )
115
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-command-r.gguf )
116
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-coder.gguf )
117
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-deepseek-llm.gguf )
118
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf )
119
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf )
120
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf )
121
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf )
122
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf )
123
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-phi-3.gguf )
124
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-qwen2.gguf )
125
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf )
126
+ llama_test (test -tokenizer-0 NAME test -tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf )
127
127
128
128
if (NOT WIN32 )
129
129
llama_test_cmd (
130
130
${CMAKE_CURRENT_SOURCE_DIR} /test-tokenizers-repo.sh
131
131
NAME test -tokenizers-ggml-vocabs
132
132
WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
133
- ARGS https://huggingface.co/ggml-org/vocabs ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocabs
133
+ ARGS https://huggingface.co/ggml-org/vocabs ${PROJECT_SOURCE_DIR} /models/ggml-vocabs
134
134
)
135
135
endif ()
136
136
137
137
if (LLAMA_LLGUIDANCE )
138
- llama_build_and_test (test -grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-bpe.gguf )
138
+ llama_build_and_test (test -grammar-llguidance.cpp ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf )
139
139
endif ()
140
140
141
141
if (NOT WIN32 OR NOT BUILD_SHARED_LIBS )
@@ -147,8 +147,8 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
147
147
llama_build_and_test (test -chat.cpp )
148
148
# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
149
149
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64" )
150
- llama_build_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} /.. )
151
- target_include_directories (test -json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} /.. /tools/server )
150
+ llama_build_and_test (test -json-schema-to-grammar.cpp WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} )
151
+ target_include_directories (test -json-schema-to-grammar PRIVATE ${PROJECT_SOURCE_DIR} /tools/server )
152
152
endif ()
153
153
154
154
if (NOT GGML_BACKEND_DL )
@@ -161,20 +161,20 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
161
161
llama_build (test -tokenizer-1-bpe.cpp )
162
162
163
163
# TODO: disabled due to slowness
164
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-aquila.gguf)
165
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-falcon.gguf)
166
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-gpt-2.gguf)
167
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-gpt-neox.gguf)
168
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-llama-bpe.gguf --ignore-merges)
169
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-mpt.gguf)
170
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-refact.gguf)
171
- #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-starcoder.gguf)
164
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-aquila.gguf)
165
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-falcon.gguf)
166
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-2.gguf)
167
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-gpt-neox.gguf)
168
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-bpe.gguf --ignore-merges)
169
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-mpt.gguf)
170
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-refact.gguf)
171
+ #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-starcoder.gguf)
172
172
173
173
# build test-tokenizer-1-spm target once and add many tests
174
174
llama_build (test -tokenizer-1-spm.cpp )
175
175
176
- llama_test (test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR} /.. /models/ggml-vocab-llama-spm.gguf )
177
- #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/.. /models/ggml-vocab-baichuan.gguf)
176
+ llama_test (test -tokenizer-1-spm NAME test -tokenizer-1-llama-spm ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-llama-spm.gguf )
177
+ #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${PROJECT_SOURCE_DIR} /models/ggml-vocab-baichuan.gguf)
178
178
179
179
# llama_build_and_test(test-double-float.cpp) # SLOW
180
180
endif ()
0 commit comments