Skip to content

Commit 3666c86

Browse files
Ming Xieflynnjiang
authored andcommitted
ggml/kompute: Rename ggml_kompute_context to ggml_backend_kompute_context
Signed-off-by: Ming Xie <xieming@kylinos.cn>
1 parent fc54ef0 commit 3666c86

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

ggml/src/ggml-kompute.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -66,20 +66,20 @@ static std::string ggml_kompute_format_name(int device) {
6666
return "Kompute" + std::to_string(device);
6767
}
6868

69-
struct ggml_kompute_context {
69+
struct ggml_backend_kompute_context {
7070
int device;
7171
std::string name;
7272
std::shared_ptr<vk::DescriptorPool> pool;
7373

74-
ggml_kompute_context(int device)
74+
ggml_backend_kompute_context(int device)
7575
: device(device), name(ggml_kompute_format_name(device)) {}
7676
};
7777

7878
// FIXME: It would be good to consolidate the kompute manager and the kompute context into one object
7979
// and consolidate the init functions and simplify object lifetime management. As it currently stands,
8080
// we *have* to have the kompute manager no matter what for device discovery, but the kompute context
8181
// is only created when a device is set and vulkan is explicitly turned on.
82-
static ggml_kompute_context *s_kompute_context = nullptr;
82+
static ggml_backend_kompute_context *s_kompute_context = nullptr;
8383

8484
class kompute_manager {
8585
kp::Manager *s_mgr = nullptr;
@@ -348,7 +348,7 @@ ggml_vk_device ggml_vk_current_device() {
348348
}
349349

350350
static
351-
void ggml_vk_allocate_descriptor_pool(struct ggml_kompute_context * ctx, size_t size) {
351+
void ggml_vk_allocate_descriptor_pool(struct ggml_backend_kompute_context * ctx, size_t size) {
352352
std::vector<vk::DescriptorPoolSize> descriptorPoolSizes = {
353353
vk::DescriptorPoolSize(
354354
vk::DescriptorType::eStorageBuffer,
@@ -370,7 +370,7 @@ void ggml_vk_allocate_descriptor_pool(struct ggml_kompute_context * ctx, size_t
370370
}
371371

372372
static
373-
void ggml_vk_free_descriptor_pool(struct ggml_kompute_context * ctx) {
373+
void ggml_vk_free_descriptor_pool(struct ggml_backend_kompute_context * ctx) {
374374
if (ctx->pool) {
375375
komputeManager()->device()->destroy(
376376
*ctx->pool,
@@ -1412,7 +1412,7 @@ static bool ggml_vk_supports_op(const struct ggml_tensor * op) {
14121412
return false;
14131413
}
14141414

1415-
static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph * gf) {
1415+
static void ggml_vk_graph_compute(struct ggml_backend_kompute_context * ctx, struct ggml_cgraph * gf) {
14161416
const int n_seq = 8;
14171417

14181418
// FIXME: Figure out if we can somehow optimize the size of the pool... right now we're setting
@@ -1935,12 +1935,12 @@ ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device) {
19351935
// backend
19361936

19371937
static const char * ggml_backend_kompute_name(ggml_backend_t backend) {
1938-
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
1938+
auto * ctx = static_cast<ggml_backend_kompute_context *>(backend->context);
19391939
return ctx->name.c_str();
19401940
}
19411941

19421942
static void ggml_backend_kompute_free(ggml_backend_t backend) {
1943-
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
1943+
auto * ctx = static_cast<ggml_backend_kompute_context *>(backend->context);
19441944

19451945
assert(ctx == s_kompute_context);
19461946
s_kompute_context = nullptr;
@@ -1952,12 +1952,12 @@ static void ggml_backend_kompute_free(ggml_backend_t backend) {
19521952
}
19531953

19541954
static ggml_backend_buffer_type_t ggml_backend_kompute_get_default_buffer_type(ggml_backend_t backend) {
1955-
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
1955+
auto * ctx = static_cast<ggml_backend_kompute_context *>(backend->context);
19561956
return ggml_backend_kompute_buffer_type(ctx->device);
19571957
}
19581958

19591959
static ggml_status ggml_backend_kompute_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
1960-
auto * ctx = static_cast<ggml_kompute_context *>(backend->context);
1960+
auto * ctx = static_cast<ggml_backend_kompute_context *>(backend->context);
19611961
ggml_vk_graph_compute(ctx, cgraph);
19621962
return GGML_STATUS_SUCCESS;
19631963
}
@@ -2002,7 +2002,7 @@ static ggml_guid_t ggml_backend_kompute_guid() {
20022002

20032003
ggml_backend_t ggml_backend_kompute_init(int device) {
20042004
GGML_ASSERT(s_kompute_context == nullptr);
2005-
s_kompute_context = new ggml_kompute_context(device);
2005+
s_kompute_context = new ggml_backend_kompute_context(device);
20062006

20072007
ggml_backend_t kompute_backend = new ggml_backend {
20082008
/* .guid = */ ggml_backend_kompute_guid(),

0 commit comments

Comments
 (0)