Skip to content

Commit 567051f

Browse files
committed
llama : expose model's rope_freq_scale in the API
so it can be scaled further before creating a context.
1 parent f5ef5cf commit 567051f

File tree

2 files changed

+7
-0
lines changed

2 files changed

+7
-0
lines changed

llama.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -6890,6 +6890,10 @@ int llama_n_embd(const struct llama_model * model) {
68906890
return model->hparams.n_embd;
68916891
}
68926892

6893+
float llama_rope_freq_scale_train(const struct llama_model * model) {
6894+
return model->hparams.rope_freq_scale_train;
6895+
}
6896+
68936897
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
68946898
return snprintf(buf, buf_size, "%s %s %s",
68956899
llama_model_arch_name(model->arch).c_str(),

llama.h

+3
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,9 @@ extern "C" {
282282
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
283283
LLAMA_API int llama_n_embd (const struct llama_model * model);
284284

285+
// Get the model's RoPE frequency scaling factor
286+
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
287+
285288
// Get a string describing the model type
286289
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
287290

0 commit comments

Comments
 (0)