From a55e8fc6f080664ed333128319b4d83d7f6d8d23 Mon Sep 17 00:00:00 2001 From: Pierrick HYMBERT Date: Thu, 29 Feb 2024 11:35:49 +0100 Subject: [PATCH] server: allow to override threads server pool with --threads-http --- examples/server/README.md | 1 + examples/server/server.cpp | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/examples/server/README.md b/examples/server/README.md index 0e9bd7fd404ba..ad35306c60c4e 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -18,6 +18,7 @@ The project is under active development, and we are [looking for feedback and co - `--threads N`, `-t N`: Set the number of threads to use during generation. - `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. +- `--threads-http N`: number of threads in the http server pool to process requests (default: `std::thread::hardware_concurrency()`) - `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`). - `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. - `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 080fa9bd5702c..076d395751043 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -44,6 +44,7 @@ struct server_params int32_t write_timeout = 600; bool slots_endpoint = true; bool metrics_endpoint = false; + int n_threads_http = -1; }; bool server_verbose = false; @@ -2065,6 +2066,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n"); + printf(" --threads-http N number of threads in the http server pool to process requests (default: hardware concurrency)\n"); printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); printf(" --rope-scaling {none,linear,yarn}\n"); printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n"); @@ -2351,6 +2353,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.n_threads_batch = std::stoi(argv[i]); } + else if (arg == "--threads-http") + { + if (++i >= argc) + { + invalid_param = true; + break; + } + sparams.n_threads_http = std::stoi(argv[i]); + } else if (arg == "-b" || arg == "--batch-size") { if (++i >= argc) @@ -3509,6 +3520,11 @@ int main(int argc, char **argv) }*/ //); + if (sparams.n_threads_http > 0) { + log_data["n_threads_http"] = std::to_string(sparams.n_threads_http); + svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); }; + } + LOG_INFO("HTTP server listening", log_data); // run the HTTP server in a thread - see comment below std::thread t([&]()