Compare commits
No commits in common. "77ab4781ac005f89a76eef72113f766ed2b6828b" and "c6ec40a3f7a12e2be5eed3fc7419a4c75accd105" have entirely different histories.
77ab4781ac
...
c6ec40a3f7
8 changed files with 55 additions and 187 deletions
|
@ -123,7 +123,7 @@
|
|||
"browser.bookmarks.addedImportButton" = true;
|
||||
"browser.newtabpage.activity-stream.feeds.section.topstories" = false;
|
||||
|
||||
# Usage Experience
|
||||
# Usage Experiance
|
||||
"browser.startup.homepage" = "about:home";
|
||||
"browser.download.useDownloadDir" = false;
|
||||
"browser.uiCustomization.state" = builtins.toJSON {
|
||||
|
|
|
@ -65,6 +65,7 @@ in {
|
|||
# builtins.elemAt osConfig.services.ollama.loadModels 0;
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
extensions = (
|
||||
with open-vsx;
|
||||
|
@ -118,5 +119,4 @@ in {
|
|||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
[
|
||||
"leyla"
|
||||
"webdav"
|
||||
"ollama"
|
||||
"optimise"
|
||||
]
|
||||
|
|
|
@ -201,28 +201,13 @@
|
|||
};
|
||||
|
||||
ollama = {
|
||||
enable = true;
|
||||
exposePort = true;
|
||||
enable = false;
|
||||
|
||||
loadModels = [
|
||||
# conversation models
|
||||
"llama3.1:8b"
|
||||
"deepseek-coder:6.7b"
|
||||
"deepseek-r1:8b"
|
||||
"deepseek-r1:32b"
|
||||
"deepseek-r1:70b"
|
||||
|
||||
# auto complete models
|
||||
"qwen2.5-coder:1.5b-base"
|
||||
"qwen2.5-coder:7b"
|
||||
"deepseek-coder:6.7b"
|
||||
"deepseek-coder:33b"
|
||||
|
||||
# agent models
|
||||
"qwen3:8b"
|
||||
"qwen3:32b"
|
||||
|
||||
# embedding models
|
||||
"nomic-embed-text:latest"
|
||||
];
|
||||
};
|
||||
tailscale = {
|
||||
|
|
|
@ -30,65 +30,8 @@
|
|||
graphicsAcceleration.enable = true;
|
||||
directAccess.enable = true;
|
||||
};
|
||||
ai = {
|
||||
enable = true;
|
||||
# TODO: benchmark twilight against defiant and prune this list of models that are faster on defiant
|
||||
models = {
|
||||
# conversation models
|
||||
"lamma3.1:8b" = {
|
||||
model = "lamma3.1:8b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
"deepseek-r1:8b" = {
|
||||
model = "deepseek-r1:8b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
"deepseek-r1:32b" = {
|
||||
model = "deepseek-r1:32b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
|
||||
# auto complete models
|
||||
"qwen2.5-coder:1.5b-base" = {
|
||||
model = "qwen2.5-coder:1.5b-base";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
"qwen2.5-coder:7b" = {
|
||||
model = "qwen2.5-coder:7b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
"deepseek-coder:6.7b" = {
|
||||
model = "deepseek-coder:6.7b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
"deepseek-coder:33b" = {
|
||||
model = "deepseek-coder:33b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
|
||||
# agent models
|
||||
"qwen3:32b" = {
|
||||
model = "qwen3:32b";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
|
||||
# embedding models
|
||||
"nomic-embed-text:latest" = {
|
||||
model = "nomic-embed-text:latest";
|
||||
# TODO: figure out what should be in this array
|
||||
# roles = [""];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
services = {
|
||||
ollama = {
|
||||
enable = true;
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
{lib, ...}: {
|
||||
options.host = {
|
||||
ai = {
|
||||
enable = lib.mkEnableOption "should we use AI on this machine";
|
||||
models = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({name, ...}: {
|
||||
option = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
model = {
|
||||
type = lib.types.str;
|
||||
};
|
||||
provider = {
|
||||
type = lib.types.str;
|
||||
default = "ollama";
|
||||
};
|
||||
apiBase = {
|
||||
type = lib.types.str;
|
||||
default = null;
|
||||
};
|
||||
roles = {
|
||||
type = lib.types.listOf lib.types.enumOf [
|
||||
"chat"
|
||||
"autocomplete"
|
||||
"embed"
|
||||
"rerank"
|
||||
"edit"
|
||||
"apply"
|
||||
"summarize"
|
||||
];
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
# TODO: configure ollama to download any modules listed in options.host.ai.models.{name}.model if options.host.ai.models.{name}.apiBase is null
|
||||
# TODO: if we have any models that have a non null options.host.ai.models.{name}.apiBase then set services.ollama.enable to a lib.mkAfter true
|
||||
};
|
||||
}
|
|
@ -12,7 +12,6 @@
|
|||
./impermanence.nix
|
||||
./disko.nix
|
||||
./ollama.nix
|
||||
./continue.nix
|
||||
./tailscale.nix
|
||||
./server
|
||||
];
|
||||
|
|
|
@ -3,10 +3,6 @@
|
|||
lib,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
services.ollama.exposePort = lib.mkEnableOption "should we expose ollama on tailscale";
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
services.ollama = {
|
||||
|
@ -26,15 +22,6 @@
|
|||
}
|
||||
];
|
||||
};
|
||||
networking.firewall.interfaces.${config.services.tailscale.interfaceName} = let
|
||||
ports = [
|
||||
config.services.ollama.port
|
||||
];
|
||||
in
|
||||
lib.mkIf config.services.ollama.exposePort {
|
||||
allowedTCPPorts = ports;
|
||||
allowedUDPPorts = ports;
|
||||
};
|
||||
}))
|
||||
];
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue