Skip to content

Commit bebea65

Browse files
authored
Merge pull request ggml-org#13 from anon998/small-fixes
Small fixes.
2 parents 88cc7bb + abb7782 commit bebea65

File tree

2 files changed

+17
-36
lines changed

2 files changed

+17
-36
lines changed

examples/server/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ add_executable(${TARGET} server.cpp json.hpp httplib.h)
44
target_compile_definitions(${TARGET} PRIVATE
55
# single thread
66
CPPHTTPLIB_THREAD_POOL_COUNT=1
7-
# crash the server in the debug mode, otherwise send http 500 error
7+
# crash the server in debug mode, otherwise send an http 500 error
88
$<$<CONFIG:Debug>:
99
CPPHTTPLIB_NO_EXCEPTIONS=1
1010
>

examples/server/server.cpp

+16-35
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,6 @@ struct llama_server_context
5555

5656
size_t num_tokens_predicted = 0;
5757
size_t n_past = 0;
58-
size_t n_consumed = 0;
59-
size_t n_session_consumed = 0;
6058
size_t n_remain = 0;
6159

6260
std::vector<llama_token> embd;
@@ -87,7 +85,6 @@ struct llama_server_context
8785

8886
n_remain = 0;
8987
n_past = 0;
90-
n_consumed = 0;
9188
}
9289

9390
bool loadModel(const gpt_params &params_)
@@ -105,7 +102,7 @@ struct llama_server_context
105102
return true;
106103
}
107104

108-
bool loadPrompt() {
105+
void loadPrompt() {
109106
params.prompt.insert(0, 1, ' '); // always add a first space
110107
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
111108

@@ -135,14 +132,11 @@ struct llama_server_context
135132
n_past--;
136133
}
137134
has_next_token = true;
138-
return true;
139135
}
140136

141137
void beginCompletion()
142138
{
143139
// number of tokens to keep when resetting context
144-
145-
146140
n_remain = params.n_predict;
147141
llama_set_rng_seed(ctx, params.seed);
148142
}
@@ -196,9 +190,8 @@ struct llama_server_context
196190
auto n_vocab = llama_n_vocab(ctx);
197191

198192
// Apply params.logit_bias map
199-
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++)
200-
{
201-
logits[it->first] += it->second;
193+
for (const auto &it : params.logit_bias) {
194+
logits[it.first] += it.second;
202195
}
203196

204197
std::vector<llama_token_data> candidates;
@@ -275,7 +268,7 @@ struct llama_server_context
275268
return result;
276269
}
277270

278-
has_next_token = params.n_predict == -1 ? true : n_remain != 0;
271+
has_next_token = params.n_predict == -1 || n_remain != 0;
279272
return result;
280273
}
281274

@@ -334,7 +327,7 @@ struct llama_server_context
334327
std::vector<float> embedding(std::string content, int threads) {
335328
content.insert(0, 1, ' ');
336329
std::vector<llama_token> tokens = ::llama_tokenize(ctx, content, true);
337-
if (tokens.size() > 0)
330+
if (!tokens.empty())
338331
{
339332
if (llama_eval(ctx, tokens.data(), tokens.size(), 0, threads))
340333
{
@@ -344,7 +337,7 @@ struct llama_server_context
344337
}
345338
}
346339
const int n_embd = llama_n_embd(ctx);
347-
const auto embeddings = llama_get_embeddings(ctx);
340+
auto *const embeddings = llama_get_embeddings(ctx);
348341
std::vector<float> embeddings_(embeddings, embeddings + n_embd);
349342
return embeddings_;
350343
}
@@ -392,7 +385,7 @@ void server_print_usage(int /*argc*/, char **argv, const gpt_params &params, con
392385
fprintf(stderr, "\n");
393386
}
394387

395-
bool server_params_parse(int argc, char **argv, server_params &sparams, gpt_params &params)
388+
void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params &params)
396389
{
397390
gpt_params default_params;
398391
server_params default_sparams;
@@ -534,7 +527,6 @@ bool server_params_parse(int argc, char **argv, server_params &sparams, gpt_para
534527
server_print_usage(argc, argv, default_params, default_sparams);
535528
exit(1);
536529
}
537-
return true;
538530
}
539531

540532
json format_generation_settings(llama_server_context &llama) {
@@ -575,12 +567,12 @@ bool parse_options_completion(json body, llama_server_context& llama, Response &
575567
llama.stream = false;
576568
}
577569
if (!body["n_predict"].is_null()) {
578-
llama.params.n_predict = body["n_predict"].get<int>();
570+
llama.params.n_predict = body["n_predict"].get<int32_t>();
579571
} else {
580572
llama.params.n_predict = default_params.n_predict;
581573
}
582574
if (!body["top_k"].is_null()) {
583-
llama.params.top_k = body["top_k"].get<int>();
575+
llama.params.top_k = body["top_k"].get<int32_t>();
584576
} else {
585577
llama.params.top_k = default_params.top_k;
586578
}
@@ -600,7 +592,7 @@ bool parse_options_completion(json body, llama_server_context& llama, Response &
600592
llama.params.typical_p = default_params.typical_p;
601593
}
602594
if (!body["repeat_last_n"].is_null()) {
603-
llama.params.repeat_last_n = body["repeat_last_n"].get<int>();
595+
llama.params.repeat_last_n = body["repeat_last_n"].get<int32_t>();
604596
} else {
605597
llama.params.repeat_last_n = default_params.repeat_last_n;
606598
}
@@ -625,7 +617,7 @@ bool parse_options_completion(json body, llama_server_context& llama, Response &
625617
llama.params.frequency_penalty = default_params.frequency_penalty;
626618
}
627619
if (!body["mirostat"].is_null()) {
628-
llama.params.mirostat = body["mirostat"].get<float>();
620+
llama.params.mirostat = body["mirostat"].get<int>();
629621
} else {
630622
llama.params.mirostat = default_params.mirostat;
631623
}
@@ -640,17 +632,17 @@ bool parse_options_completion(json body, llama_server_context& llama, Response &
640632
llama.params.mirostat_eta = default_params.mirostat_eta;
641633
}
642634
if (!body["penalize_nl"].is_null()) {
643-
llama.params.penalize_nl = body["penalize_nl"].get<float>();
635+
llama.params.penalize_nl = body["penalize_nl"].get<bool>();
644636
} else {
645637
llama.params.penalize_nl = default_params.penalize_nl;
646638
}
647639
if (!body["n_keep"].is_null()) {
648-
llama.params.n_keep = body["n_keep"].get<int>();
640+
llama.params.n_keep = body["n_keep"].get<int32_t>();
649641
} else {
650642
llama.params.n_keep = default_params.n_keep;
651643
}
652644
if (!body["seed"].is_null()) {
653-
llama.params.seed = body["seed"].get<int>();
645+
llama.params.seed = body["seed"].get<int32_t>();
654646
} else {
655647
llama.params.seed = time(NULL);
656648
}
@@ -717,10 +709,7 @@ int main(int argc, char **argv)
717709
llama_server_context llama;
718710
params.model = "ggml-model.bin";
719711

720-
if (server_params_parse(argc, argv, sparams, params) == false)
721-
{
722-
return 1;
723-
}
712+
server_params_parse(argc, argv, sparams, params);
724713

725714
llama.verbose = sparams.verbose;
726715
llama.json_indent = sparams.verbose ? 4 : -1;
@@ -768,15 +757,7 @@ int main(int argc, char **argv)
768757
return;
769758
}
770759

771-
if (!llama.loadPrompt()) {
772-
json data = {{"status", "error"}, {"reason", "Context too long."}};
773-
res.set_content(
774-
data.dump(llama.json_indent, ' ', false, json::error_handler_t::replace),
775-
"application/json");
776-
res.status = 400;
777-
return;
778-
}
779-
760+
llama.loadPrompt();
780761
llama.beginCompletion();
781762

782763
if (!llama.stream) {

0 commit comments

Comments
 (0)