#include "ggml-cpu-impl.h"
#include "ggml-quants.h"
-#include <atomic>
#include <array>
#include <type_traits>
template <int RM, int RN, int BM>
NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) {
- static std::atomic<int64_t> current_chunk;
-
GGML_ASSERT(m % (RM * BM) == 0);
const int64_t ytiles = m / (RM * BM);
const int64_t xtiles = (n + RN -1) / RN;
if (params->ith == 0) {
GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles);
// Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
- std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed);
+ ggml_threadpool_chunk_set(params->threadpool, params->nth);
}
ggml_barrier(params->threadpool);
GGML_ASSERT(jj == jj2);
}
- // next step.
- job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed);
+ job = ggml_threadpool_chunk_add(params->threadpool, 1);
}
ggml_barrier(params->threadpool);
// if using single GPU mode, remove all except the main GPU
if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
- if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) {
- LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size());
- llama_model_free(model);
- return nullptr;
+ if (params.main_gpu < 0) {
+ model->devices.clear();
+ } else {
+ if (params.main_gpu >= (int)model->devices.size()) {
+ LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size());
+ llama_model_free(model);
+ return nullptr;
+ }
+ ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
+ model->devices.clear();
+ model->devices.push_back(main_gpu);
}
- ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
- model->devices.clear();
- model->devices.push_back(main_gpu);
}
for (auto * dev : model->devices) {
--- /dev/null
+// thread safety test
+// - Loads a copy of the same model on each GPU, plus a copy on the CPU
+// - Creates n_parallel (--parallel) contexts per model
+// - Runs inference in parallel on each context
+
+#include <thread>
+#include <vector>
+#include <atomic>
+#include "llama.h"
+#include "arg.h"
+#include "common.h"
+#include "log.h"
+#include "sampling.h"
+
+int main(int argc, char ** argv) {
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
+ return 1;
+ }
+
+ common_init();
+
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ LOG_INF("%s\n", common_params_get_system_info(params).c_str());
+
+ //llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
+ // if (level == GGML_LOG_LEVEL_ERROR) {
+ // common_log_add(common_log_main(), level, "%s", text);
+ // }
+ //}, NULL);
+
+ auto cparams = common_context_params_to_llama(params);
+
+ int dev_count = ggml_backend_dev_count();
+ int gpu_dev_count = 0;
+ for (int i = 0; i < dev_count; ++i) {
+ auto * dev = ggml_backend_dev_get(i);
+ if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
+ gpu_dev_count++;
+ }
+ }
+ const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split
+ //const int num_models = std::max(1, gpu_dev_count);
+ const int num_contexts = std::max(1, params.n_parallel);
+
+ std::vector<llama_model_ptr> models;
+ std::vector<std::thread> threads;
+ std::atomic<bool> failed = false;
+
+ for (int m = 0; m < num_models; ++m) {
+ auto mparams = common_model_params_to_llama(params);
+
+ if (m < gpu_dev_count) {
+ mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
+ mparams.main_gpu = m;
+ } else if (m == gpu_dev_count) {
+ mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
+ mparams.main_gpu = -1; // CPU model
+ } else {
+ mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;;
+ }
+
+ llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
+ if (model == NULL) {
+ LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
+ return 1;
+ }
+
+ models.emplace_back(model);
+ }
+
+ for (int m = 0; m < num_models; ++m) {
+ auto * model = models[m].get();
+ for (int c = 0; c < num_contexts; ++c) {
+ threads.emplace_back([&, m, c, model]() {
+ LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models);
+
+ llama_context_ptr ctx { llama_init_from_model(model, cparams) };
+ if (ctx == NULL) {
+ LOG_ERR("failed to create context\n");
+ failed.store(true);
+ return;
+ }
+
+ std::unique_ptr<common_sampler, decltype(&common_sampler_free)> sampler { common_sampler_init(model, params.sampling), common_sampler_free };
+ if (sampler == NULL) {
+ LOG_ERR("failed to create sampler\n");
+ failed.store(true);
+ return;
+ }
+
+ llama_batch batch = {};
+ {
+ auto prompt = common_tokenize(ctx.get(), params.prompt, true);
+ if (prompt.empty()) {
+ LOG_ERR("failed to tokenize prompt\n");
+ failed.store(true);
+ return;
+ }
+ batch = llama_batch_get_one(prompt.data(), prompt.size());
+ if (llama_decode(ctx.get(), batch)) {
+ LOG_ERR("failed to decode prompt\n");
+ failed.store(true);
+ return;
+ }
+ }
+
+ const auto * vocab = llama_model_get_vocab(model);
+ std::string result = params.prompt;
+
+ for (int i = 0; i < params.n_predict; i++) {
+ llama_token token;
+ if (batch.n_tokens > 0) {
+ token = common_sampler_sample(sampler.get(), ctx.get(), batch.n_tokens - 1);
+ } else {
+ token = llama_vocab_bos(vocab);
+ }
+
+ result += common_token_to_piece(ctx.get(), token);
+
+ if (llama_vocab_is_eog(vocab, token)) {
+ break;
+ }
+
+ batch = llama_batch_get_one(&token, 1);
+ if (llama_decode(ctx.get(), batch)) {
+ LOG_ERR("Model %d/%d, Context %d/%d: failed to decode\n", m + 1, num_models, c + 1, num_contexts);
+ failed.store(true);
+ return;
+ }
+ }
+
+ LOG_INF("Model %d/%d, Context %d/%d: %s\n\n", m + 1, num_models, c + 1, num_contexts, result.c_str());
+ });
+ }
+ }
+
+ for (auto & thread : threads) {
+ thread.join();
+ }
+
+ if (failed) {
+ LOG_ERR("One or more threads failed.\n");
+ return 1;
+ }
+
+ LOG_INF("All threads finished without errors.\n");
+ return 0;
+}