]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama-run: add support for downloading models from ModelScope (#13370)
authorR0CKSTAR <redacted>
Fri, 9 May 2025 09:25:50 +0000 (17:25 +0800)
committerGitHub <redacted>
Fri, 9 May 2025 09:25:50 +0000 (10:25 +0100)
Signed-off-by: Xiaodong Ye <redacted>
tools/run/README.md
tools/run/run.cpp

index 89a55207986619520833729551fbcd7c27ac9a1a..5fd769b44cb9f9e26f138e335f91b2aeaef65936 100644 (file)
@@ -42,6 +42,8 @@ Examples:
   llama-run ollama://smollm:135m
   llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf
   llama-run huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf
+  llama-run ms://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf
+  llama-run modelscope://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf
   llama-run https://example.com/some-file1.gguf
   llama-run some-file2.gguf
   llama-run file://some-file3.gguf
index e63c2aac3314ed7a9214ad8da184e898a2d62c15..a189ae7faf1023408e2d0a0336b2859ca7e2e545 100644 (file)
@@ -267,7 +267,7 @@ class Opt {
             "Commands:\n"
             "  model\n"
             "      Model is a string with an optional prefix of \n"
-            "      huggingface:// (hf://), ollama://, https:// or file://.\n"
+            "      huggingface:// (hf://), modelscope:// (ms://), ollama://, https:// or file://.\n"
             "      If no protocol is specified and a file exists in the specified\n"
             "      path, file:// is assumed, otherwise if a file does not exist in\n"
             "      the specified path, ollama:// is assumed. Models that are being\n"
@@ -282,6 +282,9 @@ class Opt {
             "  llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
             "  llama-run "
             "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
+            "  llama-run ms://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n"
+            "  llama-run "
+            "modelscope://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n"
             "  llama-run https://example.com/some-file1.gguf\n"
             "  llama-run some-file2.gguf\n"
             "  llama-run file://some-file3.gguf\n"
@@ -689,7 +692,7 @@ class LlamaData {
         return 0;
     }
 
-    int huggingface_dl(std::string & model, const std::string & bn) {
+    int dl_from_endpoint(std::string & model_endpoint, std::string & model, const std::string & bn) {
         // Find the second occurrence of '/' after protocol string
         size_t pos = model.find('/');
         pos        = model.find('/', pos + 1);
@@ -697,8 +700,6 @@ class LlamaData {
         std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" };
         std::string              url;
 
-        std::string model_endpoint = get_model_endpoint();
-
         if (pos == std::string::npos) {
             auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + "v2/");
             hfr                             = model_name;
@@ -720,6 +721,16 @@ class LlamaData {
         return download(url, bn, true, headers);
     }
 
+    int modelscope_dl(std::string & model, const std::string & bn) {
+        std::string model_endpoint = "https://modelscope.cn/models/";
+        return dl_from_endpoint(model_endpoint, model, bn);
+    }
+
+    int huggingface_dl(std::string & model, const std::string & bn) {
+        std::string model_endpoint = get_model_endpoint();
+        return dl_from_endpoint(model_endpoint, model, bn);
+    }
+
     int ollama_dl(std::string & model, const std::string & bn) {
         const std::vector<std::string> headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" };
         if (model.find('/') == std::string::npos) {
@@ -837,6 +848,9 @@ class LlamaData {
             rm_until_substring(model_, "hf.co/");
             rm_until_substring(model_, "://");
             ret = huggingface_dl(model_, bn);
+        } else if (string_starts_with(model_, "ms://") || string_starts_with(model_, "modelscope://")) {
+            rm_until_substring(model_, "://");
+            ret = modelscope_dl(model_, bn);
         } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) &&
                    !string_starts_with(model_, "https://ollama.com/library/")) {
             ret = download(model_, bn, true);