]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
ggml : sync llama.cpp (fix GCC 8 build, close #99)
authorGeorgi Gerganov <redacted>
Mon, 24 Apr 2023 15:52:25 +0000 (18:52 +0300)
committerGeorgi Gerganov <redacted>
Mon, 24 Apr 2023 15:52:46 +0000 (18:52 +0300)
src/ggml.c

index f8f73af3e75388ca78ef058da1596f4bb62910e3..6e46c0e5ad1dab8dba7c7111faa6502011340189 100644 (file)
@@ -436,7 +436,7 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
 static inline __m128i bytes_from_nibbles_16(const uint8_t * rsi)
 {
     // Load 8 bytes from memory
-    __m128i tmp = _mm_loadu_si64( ( const __m128i* )rsi );
+    __m128i tmp = _mm_loadl_epi64( ( const __m128i* )rsi );
 
     // Expand bytes into uint16_t values
     __m128i bytes = _mm_cvtepu8_epi16( tmp );