]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : Changing off_t to size_t for Windows (#18204)
authorJulius Tischbein <redacted>
Fri, 19 Dec 2025 14:42:46 +0000 (15:42 +0100)
committerGitHub <redacted>
Fri, 19 Dec 2025 14:42:46 +0000 (16:42 +0200)
src/llama-model-loader.cpp

index 1da89515f7ba7e077851eeda8716acc919b5a89a..33a76dba4017d93cadfed3193c0cbe4a626ff5b5 100644 (file)
@@ -1086,10 +1086,10 @@ bool llama_model_loader::load_all_data(
             } else {
                 // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
                 if (upload_backend) {
-                    auto offset = (off_t) weight->offs;
+                    size_t offset = weight->offs;
                     alignment = file->read_alignment();
-                    off_t aligned_offset = offset & ~(alignment - 1);
-                    off_t offset_from_alignment = offset - aligned_offset;
+                    size_t aligned_offset = offset & ~(alignment - 1);
+                    size_t offset_from_alignment = offset - aligned_offset;
                     file->seek(aligned_offset, SEEK_SET);
 
                     // Calculate aligned read boundaries