server: ServerProcess
-TIMEOUT_SERVER_START = 15*60
+TIMEOUT_START_SLOW = 15 * 60 # this is needed for real model tests
TIMEOUT_HTTP_REQUEST = 60
@pytest.fixture(autouse=True)
server.jinja = True
server.n_predict = n_predict
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start()
do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED, temperature=0.0, top_k=1, top_p=1.0)
server.jinja = True
server.n_predict = n_predict
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start(timeout_seconds=TIMEOUT_START_SLOW)
do_test_completion_with_required_tool_tiny(server, tool, argument_key, n_predict, stream=stream == CompletionMode.STREAMED)
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
elif isinstance(template_override, str):
server.chat_template = template_override
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start(timeout_seconds=TIMEOUT_START_SLOW)
body = server.make_any_request("POST", "/v1/chat/completions", data={
"max_tokens": n_predict,
"messages": [
server.n_predict = n_predict
server.jinja = True
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start()
do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED)
server.n_predict = n_predict
server.jinja = True
server.chat_template_file = f'../../../models/templates/{template_name}.jinja'
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start(timeout_seconds=TIMEOUT_START_SLOW)
do_test_completion_without_tool_call(server, n_predict, tools, tool_choice, stream=stream == CompletionMode.STREAMED)
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
elif isinstance(template_override, str):
server.chat_template = template_override
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start()
do_test_weather(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict)
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
elif isinstance(template_override, str):
server.chat_template = template_override
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start(timeout_seconds=TIMEOUT_START_SLOW)
do_test_calc_result(server, result_override, n_predict, stream=stream == CompletionMode.STREAMED)
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
elif isinstance(template_override, str):
server.chat_template = template_override
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start()
body = server.make_any_request("POST", "/v1/chat/completions", data={
"max_tokens": n_predict,
"messages": [
assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template."
elif isinstance(template_override, str):
server.chat_template = template_override
- server.start(timeout_seconds=TIMEOUT_SERVER_START)
+ server.start(timeout_seconds=TIMEOUT_START_SLOW)
do_test_hello_world(server, stream=stream == CompletionMode.STREAMED, max_tokens=n_predict)
server: ServerProcess
-IMG_URL_0 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png"
-IMG_URL_1 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/91_cat.png"
-
-response = requests.get(IMG_URL_0)
-response.raise_for_status() # Raise an exception for bad status codes
-IMG_BASE64_URI_0 = "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
-IMG_BASE64_0 = base64.b64encode(response.content).decode("utf-8")
-
-response = requests.get(IMG_URL_1)
-response.raise_for_status() # Raise an exception for bad status codes
-IMG_BASE64_URI_1 = "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
-IMG_BASE64_1 = base64.b64encode(response.content).decode("utf-8")
+def get_img_url(id: str) -> str:
+ IMG_URL_0 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/11_truck.png"
+ IMG_URL_1 = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/test/91_cat.png"
+ if id == "IMG_URL_0":
+ return IMG_URL_0
+ elif id == "IMG_URL_1":
+ return IMG_URL_1
+ elif id == "IMG_BASE64_URI_0":
+ response = requests.get(IMG_URL_0)
+ response.raise_for_status() # Raise an exception for bad status codes
+ return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
+ elif id == "IMG_BASE64_0":
+ response = requests.get(IMG_URL_0)
+ response.raise_for_status() # Raise an exception for bad status codes
+ return base64.b64encode(response.content).decode("utf-8")
+ elif id == "IMG_BASE64_URI_1":
+ response = requests.get(IMG_URL_1)
+ response.raise_for_status() # Raise an exception for bad status codes
+ return "data:image/png;base64," + base64.b64encode(response.content).decode("utf-8")
+ elif id == "IMG_BASE64_1":
+ response = requests.get(IMG_URL_1)
+ response.raise_for_status() # Raise an exception for bad status codes
+ return base64.b64encode(response.content).decode("utf-8")
+ else:
+ return id
JSON_MULTIMODAL_KEY = "multimodal_data"
JSON_PROMPT_STRING_KEY = "prompt_string"
def test_models_supports_multimodal_capability():
global server
- server.start() # vision model may take longer to load due to download size
+ server.start()
res = server.make_request("GET", "/models", data={})
assert res.status_code == 200
model_info = res.body["models"][0]
def test_v1_models_supports_multimodal_capability():
global server
- server.start() # vision model may take longer to load due to download size
+ server.start()
res = server.make_request("GET", "/v1/models", data={})
assert res.status_code == 200
model_info = res.body["models"][0]
"prompt, image_url, success, re_content",
[
# test model is trained on CIFAR-10, but it's quite dumb due to small size
- ("What is this:\n", IMG_URL_0, True, "(cat)+"),
- ("What is this:\n", "IMG_BASE64_URI_0", True, "(cat)+"), # exceptional, so that we don't cog up the log
- ("What is this:\n", IMG_URL_1, True, "(frog)+"),
- ("Test test\n", IMG_URL_1, True, "(frog)+"), # test invalidate cache
+ ("What is this:\n", "IMG_URL_0", True, "(cat)+"),
+ ("What is this:\n", "IMG_BASE64_URI_0", True, "(cat)+"),
+ ("What is this:\n", "IMG_URL_1", True, "(frog)+"),
+ ("Test test\n", "IMG_URL_1", True, "(frog)+"), # test invalidate cache
("What is this:\n", "malformed", False, None),
("What is this:\n", "https://google.com/404", False, None), # non-existent image
("What is this:\n", "https://ggml.ai", False, None), # non-image data
)
def test_vision_chat_completion(prompt, image_url, success, re_content):
global server
- server.start(timeout_seconds=60) # vision model may take longer to load due to download size
- if image_url == "IMG_BASE64_URI_0":
- image_url = IMG_BASE64_URI_0
+ server.start()
res = server.make_request("POST", "/chat/completions", data={
"temperature": 0.0,
"top_k": 1,
{"role": "user", "content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {
- "url": image_url,
+ "url": get_img_url(image_url),
}},
]},
],
"prompt, image_data, success, re_content",
[
# test model is trained on CIFAR-10, but it's quite dumb due to small size
- ("What is this: <__media__>\n", IMG_BASE64_0, True, "(cat)+"),
- ("What is this: <__media__>\n", IMG_BASE64_1, True, "(frog)+"),
+ ("What is this: <__media__>\n", "IMG_BASE64_0", True, "(cat)+"),
+ ("What is this: <__media__>\n", "IMG_BASE64_1", True, "(frog)+"),
("What is this: <__media__>\n", "malformed", False, None), # non-image data
("What is this:\n", "", False, None), # empty string
]
)
def test_vision_completion(prompt, image_data, success, re_content):
global server
- server.start() # vision model may take longer to load due to download size
+ server.start()
res = server.make_request("POST", "/completions", data={
"temperature": 0.0,
"top_k": 1,
- "prompt": { JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] },
+ "prompt": {
+ JSON_PROMPT_STRING_KEY: prompt,
+ JSON_MULTIMODAL_KEY: [ get_img_url(image_data) ],
+ },
})
if success:
assert res.status_code == 200
"prompt, image_data, success",
[
# test model is trained on CIFAR-10, but it's quite dumb due to small size
- ("What is this: <__media__>\n", IMG_BASE64_0, True), # exceptional, so that we don't cog up the log
- ("What is this: <__media__>\n", IMG_BASE64_1, True),
+ ("What is this: <__media__>\n", "IMG_BASE64_0", True),
+ ("What is this: <__media__>\n", "IMG_BASE64_1", True),
("What is this: <__media__>\n", "malformed", False), # non-image data
("What is this:\n", "base64", False), # non-image data
]
)
def test_vision_embeddings(prompt, image_data, success):
global server
- server.server_embeddings=True
- server.n_batch=512
- server.start() # vision model may take longer to load due to download size
+ server.server_embeddings = True
+ server.n_batch = 512
+ server.start()
+ image_data = get_img_url(image_data)
res = server.make_request("POST", "/embeddings", data={
"content": [
{ JSON_PROMPT_STRING_KEY: prompt, JSON_MULTIMODAL_KEY: [ image_data ] },
import wget
-DEFAULT_HTTP_TIMEOUT = 30
+DEFAULT_HTTP_TIMEOUT = 60
class ServerResponse:
model_alias: str = "tinyllama-2"
temperature: float = 0.8
seed: int = 42
+ offline: bool = False
# custom options
model_alias: str | None = None
"--seed",
self.seed,
]
+ if self.offline:
+ server_args.append("--offline")
if self.model_file:
server_args.extend(["--model", self.model_file])
if self.model_url:
class ServerPreset:
+ @staticmethod
+ def load_all() -> None:
+ """ Load all server presets to ensure model files are cached. """
+ servers: List[ServerProcess] = [
+ method()
+ for name, method in ServerPreset.__dict__.items()
+ if callable(method) and name != "load_all"
+ ]
+ for server in servers:
+ server.offline = False
+ server.start()
+ server.stop()
+
@staticmethod
def tinyllama2() -> ServerProcess:
server = ServerProcess()
@staticmethod
def bert_bge_small() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
server.model_hf_repo = "ggml-org/models"
server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
server.model_alias = "bert-bge-small"
@staticmethod
def bert_bge_small_with_fa() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
server.model_hf_repo = "ggml-org/models"
server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
server.model_alias = "bert-bge-small"
@staticmethod
def tinyllama_infill() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
server.model_hf_repo = "ggml-org/models"
server.model_hf_file = "tinyllamas/stories260K-infill.gguf"
server.model_alias = "tinyllama-infill"
@staticmethod
def stories15m_moe() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
server.model_hf_repo = "ggml-org/stories15M_MOE"
server.model_hf_file = "stories15M_MOE-F16.gguf"
server.model_alias = "stories15m-moe"
@staticmethod
def jina_reranker_tiny() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
server.model_hf_repo = "ggml-org/models"
server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf"
server.model_alias = "jina-reranker"
@staticmethod
def tinygemma3() -> ServerProcess:
server = ServerProcess()
+ server.offline = True # will be downloaded by load_all()
# mmproj is already provided by HF registry API
server.model_hf_repo = "ggml-org/tinygemma3-GGUF"
server.model_hf_file = "tinygemma3-Q8_0.gguf"