GGML_API size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block
GGML_API size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row
- GGML_DEPRECATED(
- GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float
- "use ggml_row_size() instead");
-
GGML_API const char * ggml_type_name(enum ggml_type type);
GGML_API const char * ggml_op_name (enum ggml_op op);
GGML_API const char * ggml_op_symbol(enum ggml_op op);
};
const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type) {
- GGML_ASSERT(type < GGML_TYPE_COUNT);
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
return &type_traits[type];
}
}
int64_t ggml_blck_size(enum ggml_type type) {
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
return type_traits[type].blck_size;
}
size_t ggml_type_size(enum ggml_type type) {
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
return type_traits[type].type_size;
}
size_t ggml_row_size(enum ggml_type type, int64_t ne) {
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
assert(ne % ggml_blck_size(type) == 0);
return ggml_type_size(type)*ne/ggml_blck_size(type);
}
-double ggml_type_sizef(enum ggml_type type) {
- return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
-}
-
const char * ggml_type_name(enum ggml_type type) {
- return type < GGML_TYPE_COUNT ? type_traits[type].type_name : "NONE";
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
+ return type_traits[type].type_name;
}
bool ggml_is_quantized(enum ggml_type type) {
+ assert(type >= 0);
+ assert(type < GGML_TYPE_COUNT);
return type_traits[type].is_quantized;
}
const size_t cur_end = cur_offs + cur_size;
// align to GGML_MEM_ALIGN
+ GGML_ASSERT(size <= SIZE_MAX - (GGML_MEM_ALIGN - 1));
size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN);
char * const mem_buffer = ctx->mem_buffer;
struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
+ // integer overflow checks
+ if (cur_end > SIZE_MAX - size_needed) {
+ GGML_LOG_WARN("%s: overflow detected in cur_end (%zu) + size_needed (%zu)\n", __func__, cur_end, size_needed);
+ return NULL;
+ }
+ if (cur_end + size_needed > SIZE_MAX - GGML_OBJECT_SIZE) {
+ GGML_LOG_WARN("%s: overflow detected in cur_end (%zu) + size_needed (%zu) + GGML_OBJECT_SIZE (%zu)\n", __func__,
+ cur_end, size_needed, (size_t) GGML_OBJECT_SIZE);
+ return NULL;
+ }
+
if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
GGML_LOG_WARN("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
__func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
obj_alloc_size = data_size;
}
+ GGML_ASSERT(GGML_TENSOR_SIZE <= SIZE_MAX - obj_alloc_size);
+
struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TYPE_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
GGML_ASSERT(obj_new);