]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
android: routine maintenance - Dec 2025 (#18338)
authorNaco Siren <redacted>
Mon, 29 Dec 2025 13:51:13 +0000 (05:51 -0800)
committerGitHub <redacted>
Mon, 29 Dec 2025 13:51:13 +0000 (15:51 +0200)
* Fix `msg` typo

* Fix thread safety in destroy() to support generation abortion in lifecycle callbacks.

* UI polish: stack new message change from below; fix GGUF margin not in view port

* Bug fixes: rare racing condition when main thread updating view and and default thread updating messages at the same time; user input not disabled during generation.

* Bump dependencies' versions; Deprecated outdated dsl usage.

examples/llama.android/app/build.gradle.kts
examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt
examples/llama.android/app/src/main/res/layout/activity_main.xml
examples/llama.android/gradle/libs.versions.toml
examples/llama.android/lib/src/main/cpp/ai_chat.cpp
examples/llama.android/lib/src/main/java/com/arm/aichat/InferenceEngine.kt
examples/llama.android/lib/src/main/java/com/arm/aichat/internal/InferenceEngineImpl.kt

index 3524fe39c45285e29974aa7ff107f698fc45ef04..2edfe98845e5e11a0d550d7cb99a370a95e8acb2 100644 (file)
@@ -41,11 +41,8 @@ android {
         }
     }
     compileOptions {
-        sourceCompatibility = JavaVersion.VERSION_1_8
-        targetCompatibility = JavaVersion.VERSION_1_8
-    }
-    kotlinOptions {
-        jvmTarget = "1.8"
+        sourceCompatibility = JavaVersion.VERSION_17
+        targetCompatibility = JavaVersion.VERSION_17
     }
 }
 
index 52c5dc2154e3e849fc85fc234b242e009345ff40..872ec2b98ab40b94c47d1cab82ad37d22ee354cc 100644 (file)
@@ -6,6 +6,7 @@ import android.util.Log
 import android.widget.EditText
 import android.widget.TextView
 import android.widget.Toast
+import androidx.activity.addCallback
 import androidx.activity.enableEdgeToEdge
 import androidx.activity.result.contract.ActivityResultContracts
 import androidx.appcompat.app.AppCompatActivity
@@ -18,6 +19,7 @@ import com.arm.aichat.gguf.GgufMetadata
 import com.arm.aichat.gguf.GgufMetadataReader
 import com.google.android.material.floatingactionbutton.FloatingActionButton
 import kotlinx.coroutines.Dispatchers
+import kotlinx.coroutines.Job
 import kotlinx.coroutines.flow.onCompletion
 import kotlinx.coroutines.launch
 import kotlinx.coroutines.withContext
@@ -36,6 +38,7 @@ class MainActivity : AppCompatActivity() {
 
     // Arm AI Chat inference engine
     private lateinit var engine: InferenceEngine
+    private var generationJob: Job? = null
 
     // Conversation states
     private var isModelReady = false
@@ -47,11 +50,13 @@ class MainActivity : AppCompatActivity() {
         super.onCreate(savedInstanceState)
         enableEdgeToEdge()
         setContentView(R.layout.activity_main)
+        // View model boilerplate and state management is out of this basic sample's scope
+        onBackPressedDispatcher.addCallback { Log.w(TAG, "Ignore back press for simplicity") }
 
         // Find views
         ggufTv = findViewById(R.id.gguf)
         messagesRv = findViewById(R.id.messages)
-        messagesRv.layoutManager = LinearLayoutManager(this)
+        messagesRv.layoutManager = LinearLayoutManager(this).apply { stackFromEnd = true }
         messagesRv.adapter = messageAdapter
         userInputEt = findViewById(R.id.user_input)
         userActionFab = findViewById(R.id.fab)
@@ -157,33 +162,35 @@ class MainActivity : AppCompatActivity() {
      * Validate and send the user message into [InferenceEngine]
      */
     private fun handleUserInput() {
-        userInputEt.text.toString().also { userSsg ->
-            if (userSsg.isEmpty()) {
+        userInputEt.text.toString().also { userMsg ->
+            if (userMsg.isEmpty()) {
                 Toast.makeText(this, "Input message is empty!", Toast.LENGTH_SHORT).show()
             } else {
                 userInputEt.text = null
+                userInputEt.isEnabled = false
                 userActionFab.isEnabled = false
 
                 // Update message states
-                messages.add(Message(UUID.randomUUID().toString(), userSsg, true))
+                messages.add(Message(UUID.randomUUID().toString(), userMsg, true))
                 lastAssistantMsg.clear()
                 messages.add(Message(UUID.randomUUID().toString(), lastAssistantMsg.toString(), false))
 
-                lifecycleScope.launch(Dispatchers.Default) {
-                    engine.sendUserPrompt(userSsg)
+                generationJob = lifecycleScope.launch(Dispatchers.Default) {
+                    engine.sendUserPrompt(userMsg)
                         .onCompletion {
                             withContext(Dispatchers.Main) {
+                                userInputEt.isEnabled = true
                                 userActionFab.isEnabled = true
                             }
                         }.collect { token ->
-                            val messageCount = messages.size
-                            check(messageCount > 0 && !messages[messageCount - 1].isUser)
+                            withContext(Dispatchers.Main) {
+                                val messageCount = messages.size
+                                check(messageCount > 0 && !messages[messageCount - 1].isUser)
 
-                            messages.removeAt(messageCount - 1).copy(
-                                content = lastAssistantMsg.append(token).toString()
-                            ).let { messages.add(it) }
+                                messages.removeAt(messageCount - 1).copy(
+                                    content = lastAssistantMsg.append(token).toString()
+                                ).let { messages.add(it) }
 
-                            withContext(Dispatchers.Main) {
                                 messageAdapter.notifyItemChanged(messages.size - 1)
                             }
                         }
@@ -195,6 +202,7 @@ class MainActivity : AppCompatActivity() {
     /**
      * Run a benchmark with the model file
      */
+    @Deprecated("This benchmark doesn't accurately indicate GUI performance expected by app developers")
     private suspend fun runBenchmark(modelName: String, modelFile: File) =
         withContext(Dispatchers.Default) {
             Log.i(TAG, "Starts benchmarking $modelName")
@@ -223,6 +231,16 @@ class MainActivity : AppCompatActivity() {
             if (!it.exists()) { it.mkdir() }
         }
 
+    override fun onStop() {
+        generationJob?.cancel()
+        super.onStop()
+    }
+
+    override fun onDestroy() {
+        engine.destroy()
+        super.onDestroy()
+    }
+
     companion object {
         private val TAG = MainActivity::class.java.simpleName
 
index ad805a674ee44bfc606b0d4f453b9c6e57af0e05..d15772bd375989969cd949f1b810b15fb0a68c36 100644 (file)
@@ -24,7 +24,7 @@
                 android:id="@+id/gguf"
                 android:layout_width="match_parent"
                 android:layout_height="wrap_content"
-                android:layout_margin="16dp"
+                android:padding="16dp"
                 android:text="Selected GGUF model's metadata will show here."
                 style="@style/TextAppearance.MaterialComponents.Body2" />
 
@@ -33,8 +33,7 @@
         <com.google.android.material.divider.MaterialDivider
             android:layout_width="match_parent"
             android:layout_height="2dp"
-            android:layout_marginHorizontal="16dp"
-            android:layout_marginVertical="8dp" />
+            android:layout_marginHorizontal="16dp" />
 
         <androidx.recyclerview.widget.RecyclerView
             android:id="@+id/messages"
index df32a75661d0dd72eb71b6e2b968d8b1f1f7a365..8ff2afd40b750f006041e5f00d9eed4be1169493 100644 (file)
@@ -1,15 +1,15 @@
 [versions]
 
 # Plugins
-agp = "8.13.0"
-kotlin = "2.2.20"
+agp = "8.13.2"
+kotlin = "2.3.0"
 
 # AndroidX
-activity = "1.11.0"
+activity = "1.12.2"
 appcompat = "1.7.1"
 core-ktx = "1.17.0"
 constraint-layout = "2.2.1"
-datastore-preferences = "1.1.7"
+datastore-preferences = "1.2.0"
 
 # Material
 material = "1.13.0"
index d655a0965ff20d54e2ae3a3696e3b965cf023802..9e460ac198902e6f30d40613af69e544649de55e 100644 (file)
@@ -560,6 +560,6 @@ Java_com_arm_aichat_internal_InferenceEngineImpl_unload(JNIEnv * /*unused*/, job
 
 extern "C"
 JNIEXPORT void JNICALL
-Java_com_arm_aichat_internal_InferenceEngineImpl_shutdown(JNIEnv *env, jobject /*unused*/) {
+Java_com_arm_aichat_internal_InferenceEngineImpl_shutdown(JNIEnv *, jobject /*unused*/) {
     llama_backend_free();
 }
index 44852fa828d4ca6b9b99ea16fd89f7427c84fa3f..26c1668724662f9fb57f93f725e36d5678a4aced 100644 (file)
@@ -38,7 +38,7 @@ interface InferenceEngine {
     /**
      * Unloads the currently loaded model.
      */
-    suspend fun cleanUp()
+    fun cleanUp()
 
     /**
      * Cleans up resources when the engine is no longer needed.
index b9056ea8194891b5d6848173205d6688433752d8..a293796f9017326531021c9be38bdd33fcb9667e 100644 (file)
@@ -15,9 +15,11 @@ import kotlinx.coroutines.cancel
 import kotlinx.coroutines.flow.Flow
 import kotlinx.coroutines.flow.MutableStateFlow
 import kotlinx.coroutines.flow.StateFlow
+import kotlinx.coroutines.flow.asStateFlow
 import kotlinx.coroutines.flow.flow
 import kotlinx.coroutines.flow.flowOn
 import kotlinx.coroutines.launch
+import kotlinx.coroutines.runBlocking
 import kotlinx.coroutines.withContext
 import java.io.File
 import java.io.IOException
@@ -109,9 +111,11 @@ internal class InferenceEngineImpl private constructor(
 
     private val _state =
         MutableStateFlow<InferenceEngine.State>(InferenceEngine.State.Uninitialized)
-    override val state: StateFlow<InferenceEngine.State> = _state
+    override val state: StateFlow<InferenceEngine.State> = _state.asStateFlow()
 
     private var _readyForSystemPrompt = false
+    @Volatile
+    private var _cancelGeneration = false
 
     /**
      * Single-threaded coroutine dispatcher & scope for LLama asynchronous operations
@@ -169,6 +173,8 @@ internal class InferenceEngineImpl private constructor(
                 }
                 Log.i(TAG, "Model loaded!")
                 _readyForSystemPrompt = true
+
+                _cancelGeneration = false
                 _state.value = InferenceEngine.State.ModelReady
             } catch (e: Exception) {
                 Log.e(TAG, (e.message ?: "Error loading model") + "\n" + pathToModel, e)
@@ -231,15 +237,19 @@ internal class InferenceEngineImpl private constructor(
 
             Log.i(TAG, "User prompt processed. Generating assistant prompt...")
             _state.value = InferenceEngine.State.Generating
-            while (true) {
+            while (!_cancelGeneration) {
                 generateNextToken()?.let { utf8token ->
                     if (utf8token.isNotEmpty()) emit(utf8token)
                 } ?: break
             }
-            Log.i(TAG, "Assistant generation complete. Awaiting user prompt...")
+            if (_cancelGeneration) {
+                Log.i(TAG, "Assistant generation aborted per requested.")
+            } else {
+                Log.i(TAG, "Assistant generation complete. Awaiting user prompt...")
+            }
             _state.value = InferenceEngine.State.ModelReady
         } catch (e: CancellationException) {
-            Log.i(TAG, "Generation cancelled by user.")
+            Log.i(TAG, "Assistant generation's flow collection cancelled.")
             _state.value = InferenceEngine.State.ModelReady
             throw e
         } catch (e: Exception) {
@@ -268,8 +278,9 @@ internal class InferenceEngineImpl private constructor(
     /**
      * Unloads the model and frees resources, or reset error states
      */
-    override suspend fun cleanUp() =
-        withContext(llamaDispatcher) {
+    override fun cleanUp() {
+        _cancelGeneration = true
+        runBlocking(llamaDispatcher) {
             when (val state = _state.value) {
                 is InferenceEngine.State.ModelReady -> {
                     Log.i(TAG, "Unloading model and free resources...")
@@ -293,17 +304,21 @@ internal class InferenceEngineImpl private constructor(
                 else -> throw IllegalStateException("Cannot unload model in ${state.javaClass.simpleName}")
             }
         }
+    }
 
     /**
      * Cancel all ongoing coroutines and free GGML backends
      */
     override fun destroy() {
-        _readyForSystemPrompt = false
-        llamaScope.cancel()
-        when(_state.value) {
-            is InferenceEngine.State.Uninitialized -> {}
-            is InferenceEngine.State.Initialized -> shutdown()
-            else -> { unload(); shutdown() }
+        _cancelGeneration = true
+        runBlocking(llamaDispatcher) {
+            _readyForSystemPrompt = false
+            when(_state.value) {
+                is InferenceEngine.State.Uninitialized -> {}
+                is InferenceEngine.State.Initialized -> shutdown()
+                else -> { unload(); shutdown() }
+            }
         }
+        llamaScope.cancel()
     }
 }