Enable prompt input through pipe, instead of using -p option.
This makes easier to give longer and multiple lines for the prompt.
Test:
$ echo "This is an example" > prompt.txt
$ cat prompt.txt | ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin
$ cat promot.txt | ./bin/gpt-2 -m models/gpt-2-117M/ggml-model.bin
Note that -p option and no -p specified case are kept.
$ ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin -p "This is an example"
$ ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin
$ ./bin/gpt-2 -m models/gpt-2-117M/ggml-model.bin -p "This is an example"
$ ./bin/gpt-2 -m models/gpt-2-117M/ggml-model.bin
#include <map>
#include <string>
#include <vector>
+#include <iostream>
+#include <unistd.h>
// default hparams (GPT-2 117M)
struct gpt2_hparams {
std::mt19937 rng(params.seed);
if (params.prompt.empty()) {
- params.prompt = gpt_random_prompt(rng);
+ if( !isatty(STDIN_FILENO) ){
+ std::string line;
+ while( std::getline(std::cin, line) ){
+ params.prompt = params.prompt + "\n" + line;
+ }
+ } else {
+ params.prompt = gpt_random_prompt(rng);
+ }
}
int64_t t_load_us = 0;
# Run the inference (requires 16GB of CPU RAM)
./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin -p "This is an example"
+
+# Input prompt through pipe and run the inference.
+echo "This is an example" > prompt.txt
+cat prompt.txt | ./bin/gpt-j -m models/gpt-j-6B/ggml-model.bin
```
To run the `gpt-j` tool, you need the 12GB `ggml-model.bin` file which contains the GPT-J model in
#include <map>
#include <string>
#include <vector>
+#include <iostream>
+#include <unistd.h>
// default hparams (GPT-J 6B)
struct gptj_hparams {
std::mt19937 rng(params.seed);
if (params.prompt.empty()) {
- params.prompt = gpt_random_prompt(rng);
+ if( !isatty(STDIN_FILENO) ){
+ std::string line;
+ while( std::getline(std::cin, line) ){
+ params.prompt = params.prompt + "\n" + line;
+ }
+ } else {
+ params.prompt = gpt_random_prompt(rng);
+ }
}
int64_t t_load_us = 0;