Skip to content

Commit 96ea727

Browse files
authored
Add interactive mode (#61)
* Initial work on interactive mode. * Improve interactive mode. Make rev. prompt optional. * Update README to explain interactive mode. * Fix OS X build
1 parent 9661954 commit 96ea727

File tree

4 files changed

+170
-10
lines changed

4 files changed

+170
-10
lines changed

README.md

+23
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,29 @@ The number of files generated for each model is as follows:
183183

184184
When running the larger models, make sure you have enough disk space to store all the intermediate files.
185185

186+
### Interactive mode
187+
188+
If you want a more ChatGPT-like experience, you can run in interactive mode by passing `-i` as a parameter.
189+
In this mode, you can always interrupt generation by pressing Ctrl+C and enter one or more lines of text which will be converted into tokens and appended to the current context. You can also specify a *reverse prompt* with the parameter `-r "reverse prompt string"`. This will result in user input being prompted whenever the exact tokens of the reverse prompt string are encountered in the generation. A typical use is to use a prompt which makes LLaMa emulate a chat between multiple users, say Alice and Bob, and pass `-r "Alice:"`.
190+
191+
Here is an example few-shot interaction, invoked with the command
192+
```
193+
./main -m ./models/13B/ggml-model-q4_0.bin -t 8 --repeat_penalty 1.2 --temp 0.9 --top_p 0.9 -n 256 \
194+
--color -i -r "User:" \
195+
-p \
196+
"Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
197+
198+
User: Hello, Bob.
199+
Bob: Hello. How may I help you today?
200+
User: Please tell me the largest city in Europe.
201+
Bob: Sure. The largest city in Europe is London, the capital of the United Kingdom.
202+
User:"
203+
```
204+
Note the use of `--color` to distinguish between user input and generated text.
205+
206+
![image](https://user-images.githubusercontent.com/401380/224572787-d418782f-47b2-49c4-a04e-65bfa7ad4ec0.png)
207+
208+
186209
## Limitations
187210

188211
- Not sure if my tokenizer is correct. There are a few places where we might have a mistake:

main.cpp

+127-10
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,18 @@
1111
#include <string>
1212
#include <vector>
1313

14+
#include <signal.h>
15+
#include <unistd.h>
16+
17+
#define ANSI_COLOR_RED "\x1b[31m"
18+
#define ANSI_COLOR_GREEN "\x1b[32m"
19+
#define ANSI_COLOR_YELLOW "\x1b[33m"
20+
#define ANSI_COLOR_BLUE "\x1b[34m"
21+
#define ANSI_COLOR_MAGENTA "\x1b[35m"
22+
#define ANSI_COLOR_CYAN "\x1b[36m"
23+
#define ANSI_COLOR_RESET "\x1b[0m"
24+
#define ANSI_BOLD "\x1b[1m"
25+
1426
// determine number of model parts based on the dimension
1527
static const std::map<int, int> LLAMA_N_PARTS = {
1628
{ 4096, 1 },
@@ -733,6 +745,18 @@ bool llama_eval(
733745
return true;
734746
}
735747

748+
static bool is_interacting = false;
749+
750+
void sigint_handler(int signo) {
751+
if (signo == SIGINT) {
752+
if (!is_interacting) {
753+
is_interacting=true;
754+
} else {
755+
_exit(130);
756+
}
757+
}
758+
}
759+
736760
int main(int argc, char ** argv) {
737761
ggml_time_init();
738762
const int64_t t_main_start_us = ggml_time_us();
@@ -787,13 +811,34 @@ int main(int argc, char ** argv) {
787811

788812
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
789813

814+
// tokenize the reverse prompt
815+
std::vector<gpt_vocab::id> antiprompt_inp = ::llama_tokenize(vocab, params.antiprompt, false);
816+
790817
printf("\n");
791818
printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
792819
printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
793820
for (int i = 0; i < (int) embd_inp.size(); i++) {
794821
printf("%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
795822
}
796823
printf("\n");
824+
if (params.interactive) {
825+
struct sigaction sigint_action;
826+
sigint_action.sa_handler = sigint_handler;
827+
sigemptyset (&sigint_action.sa_mask);
828+
sigint_action.sa_flags = 0;
829+
sigaction(SIGINT, &sigint_action, NULL);
830+
831+
printf("%s: interactive mode on.\n", __func__);
832+
833+
if(antiprompt_inp.size()) {
834+
printf("%s: reverse prompt: '%s'\n", __func__, params.antiprompt.c_str());
835+
printf("%s: number of tokens in reverse prompt = %zu\n", __func__, antiprompt_inp.size());
836+
for (int i = 0; i < (int) antiprompt_inp.size(); i++) {
837+
printf("%6d -> '%s'\n", antiprompt_inp[i], vocab.id_to_token.at(antiprompt_inp[i]).c_str());
838+
}
839+
printf("\n");
840+
}
841+
}
797842
printf("sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
798843
printf("\n\n");
799844

@@ -807,7 +852,28 @@ int main(int argc, char ** argv) {
807852
std::vector<gpt_vocab::id> last_n_tokens(last_n_size);
808853
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
809854

810-
for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
855+
856+
if (params.interactive) {
857+
printf("== Running in interactive mode. ==\n"
858+
" - Press Ctrl+C to interject at any time.\n"
859+
" - Press Return to return control to LLaMa.\n"
860+
" - If you want to submit another line, end your input in '\\'.\n");
861+
}
862+
863+
int remaining_tokens = params.n_predict;
864+
int input_consumed = 0;
865+
bool input_noecho = false;
866+
867+
// prompt user immediately after the starting prompt has been loaded
868+
if (params.interactive_start) {
869+
is_interacting = true;
870+
}
871+
872+
if (params.use_color) {
873+
printf(ANSI_COLOR_YELLOW);
874+
}
875+
876+
while (remaining_tokens > 0) {
811877
// predict
812878
if (embd.size() > 0) {
813879
const int64_t t_start_us = ggml_time_us();
@@ -823,8 +889,8 @@ int main(int argc, char ** argv) {
823889
n_past += embd.size();
824890
embd.clear();
825891

826-
if (i >= embd_inp.size()) {
827-
// sample next token
892+
if (embd_inp.size() <= input_consumed) {
893+
// out of input, sample next token
828894
const float top_k = params.top_k;
829895
const float top_p = params.top_p;
830896
const float temp = params.temp;
@@ -847,24 +913,74 @@ int main(int argc, char ** argv) {
847913

848914
// add it to the context
849915
embd.push_back(id);
916+
917+
// echo this to console
918+
input_noecho = false;
919+
920+
// decrement remaining sampling budget
921+
--remaining_tokens;
850922
} else {
851923
// if here, it means we are still processing the input prompt
852-
for (int k = i; k < embd_inp.size(); k++) {
853-
embd.push_back(embd_inp[k]);
924+
while (embd_inp.size() > input_consumed) {
925+
embd.push_back(embd_inp[input_consumed]);
854926
last_n_tokens.erase(last_n_tokens.begin());
855-
last_n_tokens.push_back(embd_inp[k]);
927+
last_n_tokens.push_back(embd_inp[input_consumed]);
928+
++input_consumed;
856929
if (embd.size() > params.n_batch) {
857930
break;
858931
}
859932
}
860-
i += embd.size() - 1;
933+
934+
if (params.use_color && embd_inp.size() <= input_consumed) {
935+
printf(ANSI_COLOR_RESET);
936+
}
861937
}
862938

863939
// display text
864-
for (auto id : embd) {
865-
printf("%s", vocab.id_to_token[id].c_str());
940+
if (!input_noecho) {
941+
for (auto id : embd) {
942+
printf("%s", vocab.id_to_token[id].c_str());
943+
}
944+
fflush(stdout);
945+
}
946+
947+
// in interactive mode, and not currently processing queued inputs;
948+
// check if we should prompt the user for more
949+
if (params.interactive && embd_inp.size() <= input_consumed) {
950+
// check for reverse prompt
951+
if (antiprompt_inp.size() && std::equal(antiprompt_inp.rbegin(), antiprompt_inp.rend(), last_n_tokens.rbegin())) {
952+
// reverse prompt found
953+
is_interacting = true;
954+
}
955+
if (is_interacting) {
956+
// currently being interactive
957+
bool another_line=true;
958+
while (another_line) {
959+
char buf[256] = {0};
960+
int n_read;
961+
if(params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
962+
scanf("%255[^\n]%n%*c", buf, &n_read);
963+
if(params.use_color) printf(ANSI_COLOR_RESET);
964+
965+
if (n_read > 0 && buf[n_read-1]=='\\') {
966+
another_line = true;
967+
buf[n_read-1] = '\n';
968+
buf[n_read] = 0;
969+
} else {
970+
another_line = false;
971+
buf[n_read] = '\n';
972+
buf[n_read+1] = 0;
973+
}
974+
975+
std::vector<gpt_vocab::id> line_inp = ::llama_tokenize(vocab, buf, false);
976+
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
977+
978+
input_noecho = true; // do not echo this again
979+
}
980+
981+
is_interacting = false;
982+
}
866983
}
867-
fflush(stdout);
868984

869985
// end of text token
870986
if (embd.back() == 2) {
@@ -873,6 +989,7 @@ int main(int argc, char ** argv) {
873989
}
874990
}
875991

992+
876993
// report timing
877994
{
878995
const int64_t t_main_end_us = ggml_time_us();

utils.cpp

+14
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,15 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
4949
params.n_batch = std::stoi(argv[++i]);
5050
} else if (arg == "-m" || arg == "--model") {
5151
params.model = argv[++i];
52+
} else if (arg == "-i" || arg == "--interactive") {
53+
params.interactive = true;
54+
} else if (arg == "--interactive-start") {
55+
params.interactive = true;
56+
params.interactive_start = true;
57+
} else if (arg == "--color") {
58+
params.use_color = true;
59+
} else if (arg == "-r" || arg == "--reverse-prompt") {
60+
params.antiprompt = argv[++i];
5261
} else if (arg == "-h" || arg == "--help") {
5362
gpt_print_usage(argc, argv, params);
5463
exit(0);
@@ -67,6 +76,11 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) {
6776
fprintf(stderr, "\n");
6877
fprintf(stderr, "options:\n");
6978
fprintf(stderr, " -h, --help show this help message and exit\n");
79+
fprintf(stderr, " -i, --interactive run in interactive mode\n");
80+
fprintf(stderr, " --interactive-start run in interactive mode and poll user input at startup\n");
81+
fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n");
82+
fprintf(stderr, " in interactive mode, poll user input upon seeing PROMPT\n");
83+
fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");
7084
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
7185
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
7286
fprintf(stderr, " -p PROMPT, --prompt PROMPT\n");

utils.h

+6
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,12 @@ struct gpt_params {
2828

2929
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
3030
std::string prompt;
31+
32+
bool use_color = false; // use color to distinguish generations and inputs
33+
34+
bool interactive = false; // interactive mode
35+
bool interactive_start = false; // reverse prompt immediately
36+
std::string antiprompt = ""; // string upon seeing which more user input is prompted
3137
};
3238

3339
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);

0 commit comments

Comments
 (0)