Skip to content

lora : update API names #11167

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 13 additions & 12 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -910,12 +910,13 @@ struct common_init_result common_init_from_params(common_params & params) {
return iparams;
}

int err = llama_control_vector_apply(lctx,
cvec.data.data(),
cvec.data.size(),
cvec.n_embd,
params.control_vector_layer_start,
params.control_vector_layer_end);
int err = llama_apply_adapter_cvec(
lctx,
cvec.data.data(),
cvec.data.size(),
cvec.n_embd,
params.control_vector_layer_start,
params.control_vector_layer_end);
if (err) {
llama_free(lctx);
llama_model_free(model);
Expand All @@ -926,8 +927,8 @@ struct common_init_result common_init_from_params(common_params & params) {

// load and optionally apply lora adapters
for (auto & la : params.lora_adapters) {
llama_lora_adapter_ptr lora;
lora.reset(llama_lora_adapter_init(model, la.path.c_str()));
llama_adapter_lora_ptr lora;
lora.reset(llama_adapter_lora_init(model, la.path.c_str()));
if (lora == nullptr) {
LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
llama_free(lctx);
Expand All @@ -940,7 +941,7 @@ struct common_init_result common_init_from_params(common_params & params) {
}

if (!params.lora_init_without_apply) {
common_lora_adapters_apply(lctx, params.lora_adapters);
common_set_adapter_lora(lctx, params.lora_adapters);
}

if (params.sampling.ignore_eos && llama_token_eos(vocab) == LLAMA_TOKEN_NULL) {
Expand Down Expand Up @@ -1008,11 +1009,11 @@ struct common_init_result common_init_from_params(common_params & params) {
return iparams;
}

void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_info> & lora) {
llama_lora_adapter_clear(ctx);
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
llama_clear_adapter_lora(ctx);
for (auto & la : lora) {
if (la.scale != 0.0f) {
llama_lora_adapter_set(ctx, la.ptr, la.scale);
llama_set_adapter_lora(ctx, la.ptr, la.scale);
}
}
}
Expand Down
12 changes: 6 additions & 6 deletions common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@

#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"

struct common_lora_adapter_info {
struct common_adapter_lora_info {
std::string path;
float scale;

struct llama_lora_adapter * ptr;
struct llama_adapter_lora * ptr;
};

using llama_tokens = std::vector<llama_token>;
Expand Down Expand Up @@ -246,8 +246,8 @@ struct common_params {
std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
std::vector<llama_model_kv_override> kv_overrides;

bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply)
std::vector<common_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale
bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_adapter_lora_apply)
std::vector<common_adapter_lora_info> lora_adapters; // lora adapter path with user defined scale

std::vector<common_control_vector_load_info> control_vectors; // control vector with user defined scale

Expand Down Expand Up @@ -481,7 +481,7 @@ struct common_init_result {
llama_model_ptr model;
llama_context_ptr context;

std::vector<llama_lora_adapter_ptr> lora;
std::vector<llama_adapter_lora_ptr> lora;
};

struct common_init_result common_init_from_params(common_params & params);
Expand All @@ -503,7 +503,7 @@ struct llama_model * common_load_model_from_hf(
const struct llama_model_params & params);

// clear LoRA adapters from context, then apply new list of adapters
void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_lora_adapter_info> & lora);
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);

//
// Batch utils
Expand Down
2 changes: 1 addition & 1 deletion examples/export-lora/export-lora.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ struct lora_merge_ctx {

lora_merge_ctx(
std::string & base_fname,
std::vector<common_lora_adapter_info> & lora_files,
std::vector<common_adapter_lora_info> & lora_files,
std::string & outfile,
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
Expand Down
8 changes: 4 additions & 4 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ struct slot_params {
int64_t t_max_prompt_ms = -1; // TODO: implement
int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit

std::vector<common_lora_adapter_info> lora;
std::vector<common_adapter_lora_info> lora;

std::vector<std::string> antiprompt;
std::vector<std::string> response_fields;
Expand Down Expand Up @@ -198,7 +198,7 @@ struct server_task {
bool metrics_reset_bucket = false;

// used by SERVER_TASK_TYPE_SET_LORA
std::vector<common_lora_adapter_info> set_lora;
std::vector<common_adapter_lora_info> set_lora;

server_task(server_task_type type) : type(type) {}

Expand Down Expand Up @@ -1133,7 +1133,7 @@ struct server_slot {

common_speculative * spec = nullptr;

std::vector<common_lora_adapter_info> lora;
std::vector<common_adapter_lora_info> lora;

// the index relative to completion multi-task request
size_t index = 0;
Expand Down Expand Up @@ -2934,7 +2934,7 @@ struct server_context {
// make sure we're in the right embedding mode
llama_set_embeddings(ctx, slot_batched->is_non_causal());
// apply lora, only need to do it once per batch
common_lora_adapters_apply(ctx, slot_batched->lora);
common_set_adapter_lora(ctx, slot_batched->lora);
}

// process the created batch of tokens
Expand Down
10 changes: 5 additions & 5 deletions examples/server/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -804,8 +804,8 @@ static std::vector<llama_token_data> get_token_probabilities(llama_context * ctx
}

static bool are_lora_equal(
const std::vector<common_lora_adapter_info> & l1,
const std::vector<common_lora_adapter_info> & l2) {
const std::vector<common_adapter_lora_info> & l1,
const std::vector<common_adapter_lora_info> & l2) {
if (l1.size() != l2.size()) {
return false;
}
Expand All @@ -819,10 +819,10 @@ static bool are_lora_equal(
}

// parse lora config from JSON request, returned a copy of lora_base with updated scale
static std::vector<common_lora_adapter_info> parse_lora_request(
const std::vector<common_lora_adapter_info> & lora_base,
static std::vector<common_adapter_lora_info> parse_lora_request(
const std::vector<common_adapter_lora_info> & lora_base,
const json & data) {
std::vector<common_lora_adapter_info> lora(lora_base);
std::vector<common_adapter_lora_info> lora(lora_base);
int max_idx = lora.size();

// clear existing value
Expand Down
6 changes: 3 additions & 3 deletions include/llama-cpp.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ struct llama_sampler_deleter {
void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); }
};

struct llama_lora_adapter_deleter {
void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); }
struct llama_adapter_lora_deleter {
void operator()(llama_adapter_lora * adapter) { llama_adapter_lora_free(adapter); }
};

typedef std::unique_ptr<llama_model, llama_model_deleter> llama_model_ptr;
typedef std::unique_ptr<llama_context, llama_context_deleter> llama_context_ptr;
typedef std::unique_ptr<llama_sampler, llama_sampler_deleter> llama_sampler_ptr;
typedef std::unique_ptr<llama_lora_adapter, llama_lora_adapter_deleter> llama_lora_adapter_ptr;
typedef std::unique_ptr<llama_adapter_lora, llama_adapter_lora_deleter> llama_adapter_lora_ptr;
35 changes: 15 additions & 20 deletions include/llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -385,8 +385,7 @@ extern "C" {
} llama_chat_message;

// lora adapter
// TODO: rename to llama_adapter_lora
struct llama_lora_adapter;
struct llama_adapter_lora;

// Helpers for getting default parameters
// TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172)
Expand Down Expand Up @@ -520,44 +519,40 @@ extern "C" {
//

// Load a LoRA adapter from file
// TODO: rename to llama_adapter_lora_init
LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init(
LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init(
struct llama_model * model,
const char * path_lora);

// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter);

// The following functions operate on a llama_context, hence the naming: llama_verb_...

// Add a loaded LoRA adapter to given context
// This will not modify model's weight
// TODO: rename to llama_set_adapter_lora
LLAMA_API int32_t llama_lora_adapter_set(
LLAMA_API int32_t llama_set_adapter_lora(
struct llama_context * ctx,
struct llama_lora_adapter * adapter,
struct llama_adapter_lora * adapter,
float scale);

// Remove a specific LoRA adapter from given context
// Return -1 if the adapter is not present in the context
// TODO: rename to llama_rm_adapter_lora
LLAMA_API int32_t llama_lora_adapter_remove(
LLAMA_API int32_t llama_rm_adapter_lora(
struct llama_context * ctx,
struct llama_lora_adapter * adapter);
struct llama_adapter_lora * adapter);

// Remove all LoRA adapters from given context
// TODO: rename to llama_clear_adapter_lora
LLAMA_API void llama_lora_adapter_clear(struct llama_context * ctx);

// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
// TODO: rename to llama_adapter_lora_free
LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);
LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx);

// Apply a loaded control vector to a llama_context, or if data is NULL, clear
// the currently loaded vector.
// n_embd should be the size of a single layer's control, and data should point
// to an n_embd x n_layers buffer starting from layer 1.
// il_start and il_end are the layer range the vector should apply to (both inclusive)
// See llama_control_vector_load in common to load a control vector.
// TODO: rename to llama_adapter_cvec_apply
LLAMA_API int32_t llama_control_vector_apply(
struct llama_context * lctx,
LLAMA_API int32_t llama_apply_adapter_cvec(
struct llama_context * ctx,
const float * data,
size_t len,
int32_t n_embd,
Expand Down
Loading
Loading