]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
models : change HF hosting from dataset to model
authorGeorgi Gerganov <redacted>
Wed, 22 Mar 2023 18:44:56 +0000 (20:44 +0200)
committerGeorgi Gerganov <redacted>
Wed, 22 Mar 2023 18:44:56 +0000 (20:44 +0200)
README.md
bindings/go/examples/go-model-download/main.go
examples/talk/README.md
models/README.md
models/download-ggml-model.cmd
models/download-ggml-model.sh
whisper.cpp

index f891332f79d8d87288f5ac3e1667851e97490715..474220a9f2acf3a375482a56b20b9336514a878c 100644 (file)
--- a/README.md
+++ b/README.md
@@ -466,7 +466,7 @@ The original models are converted to a custom binary format. This allows to pack
 You can download the converted models using the [models/download-ggml-model.sh](models/download-ggml-model.sh) script
 or manually from here:
 
-- https://huggingface.co/datasets/ggerganov/whisper.cpp
+- https://huggingface.co/ggerganov/whisper.cpp
 - https://ggml.ggerganov.com
 
 For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or the README
index 91d016acc03e600325d5c840a505a6ee09647803..67462a581d3cc09709df0613683aeebcad2d00de 100644 (file)
@@ -17,9 +17,9 @@ import (
 // CONSTANTS
 
 const (
-       srcUrl  = "https://huggingface.co/datasets/ggerganov/whisper.cpp/resolve/main" // The location of the models
-       srcExt  = ".bin"                                                               // Filename extension
-       bufSize = 1024 * 64                                                            // Size of the buffer used for downloading the model
+       srcUrl  = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main" // The location of the models
+       srcExt  = ".bin"                                                      // Filename extension
+       bufSize = 1024 * 64                                                   // Size of the buffer used for downloading the model
 )
 
 var (
index 160f0ac68360da02f0056d8aab773c2638a1d36b..818a4283f9c54ac93273c4df0340be39d06cc9e1 100644 (file)
@@ -31,7 +31,7 @@ To run this, you will need a ggml GPT-2 model: [instructions](https://github.com
 Alternatively, you can simply download the smallest ggml GPT-2 117M model (240 MB) like this:\r
 \r
 ```\r
-wget --quiet --show-progress -O models/ggml-gpt-2-117M.bin https://huggingface.co/datasets/ggerganov/ggml/raw/main/ggml-model-gpt-2-117M.bin\r
+wget --quiet --show-progress -O models/ggml-gpt-2-117M.bin https://huggingface.co/ggerganov/ggml/raw/main/ggml-model-gpt-2-117M.bin\r
 ```\r
 \r
 ## TTS\r
index 64ce6b3836b89503d451cb3a918afbd24b7ece40..ab0dde7ccc84e22148787a4f9b24ec7a376d8e59 100644 (file)
@@ -6,7 +6,7 @@ using the [convert-pt-to-ggml.py](convert-pt-to-ggml.py) script. You can either
 the `ggml` files yourself using the conversion script, or you can use the [download-ggml-model.sh](download-ggml-model.sh)
 script to download the already converted models. Currently, they are hosted on the following locations:
 
-- https://huggingface.co/datasets/ggerganov/whisper.cpp
+- https://huggingface.co/ggerganov/whisper.cpp
 - https://ggml.ggerganov.com
 
 Sample usage:
@@ -23,7 +23,7 @@ You can now use it like this:
 
 A third option to obtain the model files is to download them from Hugging Face:
 
-https://huggingface.co/datasets/ggerganov/whisper.cpp/tree/main
+https://huggingface.co/ggerganov/whisper.cpp/tree/main
 
 ## Available models
 
index 0def31abe243ec2e1478b1ee2143dd0a43ded31f..f4ce633914c52e232d215f24b0dc977aaa0d4256 100644 (file)
@@ -40,7 +40,7 @@ if exist "ggml-%model%.bin" (
   goto :eof
 )
 
-PowerShell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri https://huggingface.co/datasets/ggerganov/whisper.cpp/resolve/main/ggml-%model%.bin -OutFile ggml-%model%.bin"
+PowerShell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-%model%.bin -OutFile ggml-%model%.bin"
 
 if %ERRORLEVEL% neq 0 (
   echo Failed to download ggml model %model%
index 7075080c97f8591adc074e82260c484d9436da4d..6fda18ea55d5fa12c67d8d49a60a5ac27eac3d7d 100755 (executable)
@@ -6,7 +6,7 @@
 #src="https://ggml.ggerganov.com"
 #pfx="ggml-model-whisper"
 
-src="https://huggingface.co/datasets/ggerganov/whisper.cpp"
+src="https://huggingface.co/ggerganov/whisper.cpp"
 pfx="resolve/main/ggml"
 
 # get the path of this script
index 14b04d7a1a2f28a9bc6332de972519673ba90a2e..bee1c258783b5a45cc7a62cd87f12ed4fa30c5dc 100644 (file)
@@ -631,7 +631,6 @@ struct whisper_context {
     int64_t t_load_us = 0;
     int64_t t_start_us = 0;
 
-
     ggml_type wtype = ggml_type::GGML_TYPE_F16; // weight type (FP32 or FP16)
 
     whisper_model model;