Skip to content

Commit cdcb324

Browse files
author
Iwan Kawrakow
committed
Better strategy for GPU offload
1 parent 3f54b49 commit cdcb324

File tree

3 files changed

+30
-4
lines changed

3 files changed

+30
-4
lines changed

ggml/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,8 @@ option(GGML_CUDA_F16 "ggml: use 16 bit floats for some ca
125125
set (GGML_CUDA_KQUANTS_ITER "2" CACHE STRING
126126
"ggml: iters./thread per block for Q2_K/Q6_K")
127127
set (GGML_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
128+
"ggml: min batch size for GPU offload")
129+
set (GGML_CUDA_MIN_BATCH_OFFLOAD "32" CACHE STRING
128130
"ggml: max. batch size for using peer access")
129131
option(GGML_CUDA_NO_PEER_COPY "ggml: do not use peer to peer copies" OFF)
130132
option(GGML_CUDA_NO_VMM "ggml: do not try to use CUDA VMM" OFF)

ggml/src/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,7 @@ if (GGML_CUDA)
382382
add_compile_definitions(GGML_CUDA_MMV_Y=${GGML_CUDA_MMV_Y})
383383
add_compile_definitions(K_QUANTS_PER_ITERATION=${GGML_CUDA_KQUANTS_ITER})
384384
add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE})
385+
add_compile_definitions(GGML_CUDA_MIN_BATCH_OFFLOAD=${GGML_CUDA_MIN_BATCH_OFFLOAD})
385386

386387
if (GGML_CUDA_USE_GRAPHS)
387388
add_compile_definitions(GGML_CUDA_USE_GRAPHS)

ggml/src/ggml-cuda.cu

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3656,10 +3656,33 @@ GGML_CALL static bool ggml_backend_cuda_supports_buft(ggml_backend_t backend, gg
36563656
}
36573657

36583658
GGML_CALL static bool ggml_backend_cuda_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
3659-
const int min_batch_size = 32;
3660-
3661-
return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
3662-
(op->ne[2] >= min_batch_size && (op->op == GGML_OP_MUL_MAT_ID || op->op == GGML_OP_MOE_FUSED_UP_GATE));
3659+
constexpr int min_batch_size = GGML_CUDA_MIN_BATCH_OFFLOAD;
3660+
3661+
// Why do we want to do this? The heuristics that the batch must have more than min_batch_size tokens to be worth it
3662+
// offloading the required model weights comes from dense models. For MoE models, the average number of tokens
3663+
// each expert deals with in a batch is (active_experts / total_experts) * batch_size. Hence, according to the
3664+
// learned heuristics, we need (active_experts / total_experts) * batch_size >= min_batch_size.
3665+
// Rearranging we get
3666+
//
3667+
// batch_size * active_experts >= min_batch_size * total_experts
3668+
//
3669+
// as the condition for offloading model weights resinding in RAM to the GPU.
3670+
// In this case, the number of tokens is not as usual in op->ne[1] but rather in op->ne[2].
3671+
if (op->op == GGML_OP_MUL_MAT_ID || op->op == GGML_OP_MOE_FUSED_UP_GATE) {
3672+
auto ids = op->op == GGML_OP_MUL_MAT_ID ? op->src[2] : op->src[3];
3673+
int64_t batch_size = op->ne[2];
3674+
if (batch_size < min_batch_size) return false;
3675+
int64_t n_experts_tot = op->src[0]->ne[2];
3676+
int64_t n_experts_active = ids->ne[0];
3677+
//printf("%s(%s): op->ne[2] = %ld, n_experts_tot = %ld, n_experts_active = %ld, ids: %s, %ld x %ld x %ld x %ld\n", __func__, op->name, op->ne[2], n_experts_tot, n_experts_active, ids->name, ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3]);
3678+
return batch_size*n_experts_active >= min_batch_size*n_experts_tot;
3679+
}
3680+
3681+
return op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS;
3682+
3683+
// Original:
3684+
//return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
3685+
// (op->ne[2] >= min_batch_size && (op->op == GGML_OP_MUL_MAT_ID || op->op == GGML_OP_MOE_FUSED_UP_GATE));
36633686

36643687
GGML_UNUSED(backend);
36653688
}

0 commit comments

Comments
 (0)