set horizon up to use twilight ollama models

This commit is contained in:
Leyla Becker 2025-05-26 17:54:09 -05:00
parent 8c36fe5a72
commit f96f9f7675
3 changed files with 26 additions and 2 deletions

View file

@ -23,6 +23,29 @@
hardware = {
directAccess.enable = true;
};
ai = {
enable = true;
models = {
"Llama 3.1 8B" = {
model = "lamma3.1:8b";
roles = ["chat" "edit" "apply"];
apiBase = "http://twilight:11434";
};
"qwen2.5-coder:1.5b-base" = {
model = "qwen2.5-coder:1.5b-base";
roles = ["autocomplete"];
apiBase = "http://twilight:11434";
};
"nomic-embed-text:latest" = {
model = "nomic-embed-text:latest";
roles = ["embed"];
apiBase = "http://twilight:11434";
};
};
};
};
environment.systemPackages = [

View file

@ -83,6 +83,7 @@
services = {
ollama = {
enable = true;
exposePort = true;
loadModels = [
# conversation models

View file

@ -40,7 +40,7 @@
};
config = {
# TODO: configure ollama to download any modules listed in options.host.ai.models.{name}.model if options.host.ai.models.{name}.apiBase is the default value
# TODO: if we have any models that have a non null options.host.ai.models.{name}.apiBase then set services.ollama.enable to a lib.mkAfter true
# TODO: configure ollama to download any modules listed in options.host.ai.models.{name}.model if options.host.ai.models.{name}.apiBase is localhost
# TODO: if we have any models that have a non localhost options.host.ai.models.{name}.apiBase then set services.ollama.enable to a lib.mkAfter true
};
}