Skip to content

Commit c4c0a4d

Browse files
committed
Merge branch 'master' into gg/llama-kv-cache
2 parents 3753b30 + 51f311e commit c4c0a4d

File tree

23 files changed

+810
-29
lines changed

23 files changed

+810
-29
lines changed

CONTRIBUTING.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
- Verify that the perplexity and the performance are not affected negatively by your changes (use `llama-perplexity` and `llama-bench`)
77
- If you modified the `ggml` source, run the `test-backend-ops` tool to check whether different backend implementations of the `ggml` operators produce consistent results (this requires access to at least two different `ggml` backends)
88
- If you modified a `ggml` operator or added a new one, add the corresponding test cases to `test-backend-ops`
9+
- Create separate PRs for each feature or fix. Avoid combining unrelated changes in a single PR
910
- Consider allowing write access to your branch for faster reviews, as reviewers can push commits directly
1011
- If your PR becomes stale, don't hesitate to ping the maintainers in the comments
1112

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,7 @@ ifdef GGML_MUSA
847847
CXX := $(MUSA_PATH)/bin/clang++
848848
MCC := $(CCACHE) $(MUSA_PATH)/bin/mcc
849849

850-
MUSAFLAGS = -x musa -mtgpu
850+
MUSAFLAGS = -fsigned-char -x musa -mtgpu
851851
MUSAFLAGS += $(foreach arch,$(subst ;, ,$(MUSA_ARCHITECTURES)),--cuda-gpu-arch=mp_$(arch))
852852

853853
ifdef GGML_CUDA_FORCE_MMQ

docs/build.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,14 @@ This provides GPU acceleration using the MUSA cores of your Moore Threads MTT GP
206206
cmake --build build --config Release
207207
```
208208

209+
For static build:
210+
211+
```bash
212+
cmake -B build -DGGML_MUSA=ON \
213+
-DBUILD_SHARED_LIBS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON
214+
cmake --build build --config Release
215+
```
216+
209217
The environment variable [`MUSA_VISIBLE_DEVICES`](https://docs.mthreads.com/musa-sdk/musa-sdk-doc-online/programming_guide/Z%E9%99%84%E5%BD%95/) can be used to specify which GPU(s) will be used.
210218

211219
The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted.

examples/llava/clip.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2712,9 +2712,13 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
27122712

27132713
if (!ctx->has_glm_projector) {
27142714
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
2715+
// The patches vector is used to get rows to index into the embeds with;
2716+
// we should skip dim 0 only if we have CLS to avoid going out of bounds
2717+
// when retrieving the rows.
2718+
int patch_offset = ctx->has_class_embedding ? 1 : 0;
27152719
int* patches_data = (int*)malloc(ggml_nbytes(patches));
27162720
for (int i = 0; i < num_patches; i++) {
2717-
patches_data[i] = i + 1;
2721+
patches_data[i] = i + patch_offset;
27182722
}
27192723
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
27202724
free(patches_data);

examples/server/public/index.html.gz

37 Bytes
Binary file not shown.

examples/server/webui/src/components/ChatScreen.tsx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,7 @@ export default function ChatScreen() {
228228
value={inputMsg}
229229
onChange={(e) => setInputMsg(e.target.value)}
230230
onKeyDown={(e) => {
231+
if (e.nativeEvent.isComposing || e.keyCode === 229) return;
231232
if (e.key === 'Enter' && e.shiftKey) return;
232233
if (e.key === 'Enter' && !e.shiftKey) {
233234
e.preventDefault();

examples/server/webui/src/utils/llama-vscode.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ export const useVSCodeContext = (
4040

4141
window.addEventListener('message', handleMessage);
4242
return () => window.removeEventListener('message', handleMessage);
43-
}, []);
43+
}, [inputRef, setInputMsg]);
4444

4545
// Add a keydown listener that sends the "escapePressed" message to the parent window
4646
useEffect(() => {

ggml/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ endif()
102102

103103
option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF)
104104
option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON)
105+
option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF)
105106
option(GGML_AVX "ggml: enable AVX" ${INS_ENB})
106107
option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF)
107108
option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB})

ggml/include/ggml-cpu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ extern "C" {
9595
GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void);
9696
GGML_BACKEND_API int ggml_cpu_has_sve (void);
9797
GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
98+
GGML_BACKEND_API int ggml_cpu_has_sme (void);
9899
// other
99100
GGML_BACKEND_API int ggml_cpu_has_riscv_v (void);
100101
GGML_BACKEND_API int ggml_cpu_has_vsx (void);

ggml/src/ggml-cpu/CMakeLists.txt

Lines changed: 96 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -111,21 +111,23 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
111111
function(check_arm_feature tag code)
112112
set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
113113
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+${tag}")
114-
check_cxx_source_runs(
115-
"${code}"
116-
GGML_MACHINE_SUPPORTS_${tag}
117-
)
114+
check_cxx_source_runs("${code}" GGML_MACHINE_SUPPORTS_${tag})
118115
if (GGML_MACHINE_SUPPORTS_${tag})
119116
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+${tag}" PARENT_SCOPE)
120117
else()
121-
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE)
118+
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+no${tag}")
119+
check_cxx_source_compiles("int main() { return 0; }" GGML_MACHINE_SUPPORTS_no${tag})
120+
if (GGML_MACHINE_SUPPORTS_no${tag})
121+
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE)
122+
endif()
122123
endif()
123124
set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
124125
endfunction()
125126

126127
check_arm_feature(dotprod "#include <arm_neon.h>\nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }")
127128
check_arm_feature(i8mm "#include <arm_neon.h>\nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }")
128129
check_arm_feature(sve "#include <arm_sve.h>\nint main() { svfloat32_t _a, _b; volatile svfloat32_t _c = svadd_f32_z(svptrue_b8(), _a, _b); return 0; }")
130+
check_arm_feature(sme "#include <arm_sme.h>\n__arm_locally_streaming int main() { __asm__ volatile(\"smstart; smstop;\"); return 0; }")
129131

130132
list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}")
131133
else()
@@ -150,7 +152,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
150152
if (ARM_FEATURE_RESULT)
151153
message(WARNING "Failed to get ARM features")
152154
else()
153-
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC)
155+
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC SME)
154156
string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos)
155157
if (NOT ${feature_pos} EQUAL -1)
156158
message(STATUS "ARM feature ${feature} enabled")
@@ -316,6 +318,94 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
316318
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64)
317319
endif()
318320

321+
if (GGML_CPU_KLEIDIAI)
322+
message(STATUS "Using KleidiAI optimized kernels if applicable")
323+
324+
# Disable the KleidiAI tests
325+
set(KLEIDIAI_BUILD_TESTS OFF)
326+
327+
# Fetch KleidiAI sources:
328+
include(FetchContent)
329+
set(KLEIDIAI_COMMIT_TAG "v1.3.0")
330+
set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz")
331+
set(KLEIDIAI_ARCHIVE_MD5 "060bd2dc64642b091f461cc8dd7426d9")
332+
333+
if (POLICY CMP0135)
334+
cmake_policy(SET CMP0135 NEW)
335+
endif()
336+
337+
FetchContent_Declare(KleidiAI_Download
338+
URL ${KLEIDIAI_DOWNLOAD_URL}
339+
DOWNLOAD_EXTRACT_TIMESTAMP NEW
340+
URL_HASH MD5=${KLEIDIAI_ARCHIVE_MD5})
341+
342+
FetchContent_MakeAvailable(KleidiAI_Download)
343+
FetchContent_GetProperties(KleidiAI_Download
344+
SOURCE_DIR KLEIDIAI_SRC
345+
POPULATED KLEIDIAI_POPULATED)
346+
347+
if (NOT KLEIDIAI_POPULATED)
348+
message(FATAL_ERROR "KleidiAI source downloaded failed.")
349+
endif()
350+
351+
add_compile_definitions(GGML_USE_CPU_KLEIDIAI)
352+
353+
# Remove kleidiai target after fetching it
354+
if (TARGET kleidiai)
355+
set_target_properties(kleidiai PROPERTIES EXCLUDE_FROM_ALL TRUE)
356+
endif()
357+
358+
list(APPEND GGML_CPU_SOURCES
359+
ggml-cpu/kleidiai/kleidiai.cpp
360+
ggml-cpu/kleidiai/kernels.cpp
361+
ggml-cpu/kleidiai/kleidiai.h
362+
ggml-cpu/kleidiai/kernels.h
363+
)
364+
365+
# KleidiAI
366+
include_directories(
367+
${KLEIDIAI_SRC}/
368+
${KLEIDIAI_SRC}/kai/
369+
${KLEIDIAI_SRC}/kai/ukernels/
370+
${KLEIDIAI_SRC}/kai/ukernels/matmul/
371+
${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/
372+
${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/)
373+
374+
set(ARCH_FLAGS_TEMP "${ARCH_FLAGS}")
375+
if (NOT ARCH_FLAGS_TEMP)
376+
string(REGEX MATCH "-march=[^ ]+" ARCH_FLAGS_TEMP "${CMAKE_C_FLAGS}")
377+
endif()
378+
string(FIND "${ARCH_FLAGS_TEMP}" "+dotprod" DOTPROD_ENABLED)
379+
string(FIND "${ARCH_FLAGS_TEMP}" "+i8mm" I8MM_ENABLED)
380+
string(FIND "${ARCH_FLAGS_TEMP}" "+sme" SME_ENABLED)
381+
382+
set(PRIVATE_ARCH_FLAGS ${ARCH_FLAGS})
383+
384+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32.c)
385+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.c)
386+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32_neon.c)
387+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.c)
388+
389+
if (NOT DOTPROD_ENABLED MATCHES -1)
390+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c)
391+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.c)
392+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.c)
393+
endif()
394+
395+
if (NOT I8MM_ENABLED MATCHES -1)
396+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c)
397+
endif()
398+
399+
if (NOT SME_ENABLED MATCHES -1)
400+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.c)
401+
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.c)
402+
set(PRIVATE_ARCH_FLAGS "${PRIVATE_ARCH_FLAGS}+sve+sve2")
403+
endif()
404+
405+
set_source_files_properties(${GGML_KLEIDIAI_SOURCES} PROPERTIES COMPILE_OPTIONS "${PRIVATE_ARCH_FLAGS}")
406+
list(APPEND GGML_CPU_SOURCES ${GGML_KLEIDIAI_SOURCES})
407+
endif()
408+
319409
message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}")
320410
target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES})
321411
target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS})

0 commit comments

Comments
 (0)