Skip to content

ggml: adds CONV_2D op and direct GEMM Vulkan implementation #14316

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
2 changes: 2 additions & 0 deletions ggml/include/ggml-backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,8 @@ extern "C" {

// Compare the output of two backends
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node);
// Compare the output of two backends, graphs can be different and only the selected nodes will be compared
GGML_API bool ggml_backend_compare_graph_backend_node(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph1, struct ggml_cgraph * graph2, ggml_backend_eval_callback callback, void * user_data, char* op_name_out_1, char* op_name_out_2);

// Tensor initialization
GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
Expand Down
49 changes: 49 additions & 0 deletions ggml/src/ggml-backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1882,6 +1882,55 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t
return true;
}

bool ggml_backend_compare_graph_backend_node(
ggml_backend_t backend1,
ggml_backend_t backend2,
struct ggml_cgraph * graph1,
struct ggml_cgraph * graph2,
ggml_backend_eval_callback callback, void * user_data, char* op_name_out_1, char* op_name_out_2) {

ggml_tensor * out1 = NULL;
ggml_tensor * out2 = NULL;

struct ggml_cgraph * g1 = graph1;
struct ggml_cgraph * g2 = graph2;

for (int i = 0; i < g1->n_nodes; i++) {
struct ggml_tensor * t1 = g1->nodes[i];
struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
ggml_backend_graph_compute(backend1, &g1v);
if (ggml_is_view_op(t1->op)) {
continue;
}
if(strcmp(t1 -> name, op_name_out_1) == 0){
out1 = t1;
}
}

for (int i = 0; i < g2->n_nodes; i++) {
struct ggml_tensor * t2 = g2->nodes[i];
struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
ggml_backend_graph_compute(backend2, &g2v);
if (ggml_is_view_op(t2->op)) {
continue;
}
if(strcmp(t2 -> name, op_name_out_2) == 0){
out2 = t2;
}
}

assert(out1 != NULL);
assert(out2 != NULL);
assert(ggml_are_same_layout(out1, out2));

// compare results, calculate rms etc
if (!callback(0, out1, out2, user_data)) {
return false;
}

return true;
}

// CPU backend - buffer

static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
Expand Down
Loading
Loading