+if (GGML_ALL_WARNINGS)
+ if (NOT MSVC)
+ set(cxx_flags
+ # TODO(marella): Add other warnings.
+ -Wunused-variable
+ -Wno-unused-function
+ -Wno-multichar
+ )
+ add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>")
+ endif()
+endif()
+
add_library(common STATIC common.cpp)
target_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
- const int n_ctx = hparams.n_ctx;
const int n_vocab = hparams.n_vocab;
model.layers.resize(n_layer);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
- printf("%s: memory_size = %8.2f MB, n_mem = %lld\n", __func__, memory_size/1024.0/1024.0, n_mem);
+ printf("%s: memory_size = %8.2f MB, n_mem = %ld\n", __func__, memory_size/1024.0/1024.0, n_mem);
}
// load weights
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
- const int n_ctx = hparams.n_ctx;
const int n_vocab = hparams.n_vocab;
model.layers.resize(n_layer);
const int n_vocab = hparams.n_vocab;
const int n_rot = hparams.n_rot;
- const int d_key = n_embd/n_head;
-
static size_t buf_size = 256u*1024*1024;
static void * buf = malloc(buf_size);
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
- const int n_ctx = hparams.n_ctx;
const int n_vocab = hparams.n_vocab;
model.layers.resize(n_layer);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
- printf("%s: memory_size = %8.2f MB, n_mem = %lld\n", __func__, memory_size/1024.0/1024.0, n_mem);
+ printf("%s: memory_size = %8.2f MB, n_mem = %ld\n", __func__, memory_size/1024.0/1024.0, n_mem);
}
// load weights
}
}
- const ggml_type wtype2 = GGML_TYPE_F32;
-
auto & ctx = model.ctx;
size_t ctx_size = 0;
fprintf(stderr, "Usage: %s models/mnist/ggml-model-f32.bin models/mnist/t10k-images.idx3-ubyte\n", argv[0]);
exit(0);
}
- const int64_t t_main_start_us = ggml_time_us();
mnist_hparams params;
int64_t t_load_us = 0;