//
export async function* llama(prompt, params = {}, config = {}) {
let controller = config.controller;
- const api_url = config.api_url || "";
+ const api_url = config.api_url?.replace(/\/+$/, '') || "";
if (!controller) {
controller = new AbortController();
// Get the model info from the server. This is useful for getting the context window and so on.
export const llamaModelInfo = async (config = {}) => {
if (!generation_settings) {
- const api_url = config.api_url || "";
+ const api_url = config.api_url?.replace(/\/+$/, '') || "";
const props = await fetch(`${api_url}/props`).then(r => r.json());
generation_settings = props.default_generation_settings;
}
<script type="module">
import {
html, h, signal, effect, computed, render, useSignal, useEffect, useRef, Component
- } from '/index.js';
+ } from './index.js';
- import { llama } from '/completion.js';
- import { SchemaConverter } from '/json-schema-to-grammar.mjs';
+ import { llama } from './completion.js';
+ import { SchemaConverter } from './json-schema-to-grammar.mjs';
import { promptFormats } from './prompt-formats.js';
import { systemPrompts } from './system-prompts.js'; // multilingual is wip
let selected_image = false;
throw new Error("already running");
}
controller.value = new AbortController();
- for await (const chunk of llama(prompt, llamaParams, { controller: controller.value })) {
+ for await (const chunk of llama(prompt, llamaParams, { controller: controller.value, api_url: URL.parse('.', document.baseURI).href })) {
const data = chunk.data;
if (data.stop) {
while (
throw new Error("already running");
}
controller.value = new AbortController();
- for await (const chunk of llama(prompt, llamaParams, { controller: controller.value, api_url: location.pathname.replace(/\/+$/, '') })) {
+ for await (const chunk of llama(prompt, llamaParams, { controller: controller.value, api_url: URL.parse('.', document.baseURI).href })) {
const data = chunk.data;
if (data.stop) {