Skip to content

Commit 95d576b

Browse files
authored
metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci
1 parent 59c17f0 commit 95d576b

File tree

4 files changed

+14
-2
lines changed

4 files changed

+14
-2
lines changed

common/common.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ int32_t get_num_physical_cores() {
101101
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
102102
}
103103

104-
void process_escapes(std::string& input) {
104+
void process_escapes(std::string & input) {
105105
std::size_t input_len = input.length();
106106
std::size_t output_idx = 0;
107107

examples/batched/batched.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ int main(int argc, char ** argv) {
4848
params.prompt = "Hello my name is";
4949
}
5050

51+
process_escapes(params.prompt);
52+
5153
// init LLM
5254

5355
llama_backend_init();
@@ -78,7 +80,7 @@ int main(int argc, char ** argv) {
7880
llama_context_params ctx_params = llama_context_default_params();
7981

8082
ctx_params.seed = 1234;
81-
ctx_params.n_ctx = n_kv_req;
83+
ctx_params.n_ctx = n_kv_req;
8284
ctx_params.n_batch = std::max(n_len, n_parallel);
8385
ctx_params.n_seq_max = n_parallel;
8486
ctx_params.n_threads = params.n_threads;

llama.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model(
1304413044
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
1304513045
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
1304613046

13047+
// this is necessary due to kv_self.n being padded later during inference
13048+
cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32);
13049+
1304713050
// with causal attention, the batch size is limited by the context size
1304813051
cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
1304913052
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);

tests/test-backend-ops.cpp

+7
Original file line numberDiff line numberDiff line change
@@ -2091,6 +2091,13 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
20912091
}
20922092
}
20932093

2094+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
2095+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
2096+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
2097+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
2098+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
2099+
test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
2100+
20942101
for (ggml_type type_a : all_types) {
20952102
for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
20962103
for (int n_mats : {2, 4, 8}) {

0 commit comments

Comments
 (0)