refactor: moved nixos modules to dendrite pattern

This commit is contained in:
Leyla Becker 2026-04-07 15:39:45 -05:00
parent df8dd110ad
commit 0ea11e0236
219 changed files with 4802 additions and 4820 deletions

View file

@ -77,7 +77,7 @@
libreoffice.enable = true;
noita-entangled-worlds.enable = true;
opencode.enable = osConfig.host.ai.enable;
opencode.enable = true;
e621-downloader.enable = true;

View file

@ -32,8 +32,8 @@
proxmark3.enable = true;
openrgb.enable = hardware.openRGB.enable;
via.enable = hardware.viaKeyboard.enable;
claude-code.enable = osConfig.host.ai.enable;
opencode.enable = osConfig.host.ai.enable;
claude-code.enable = true;
opencode.enable = true;
davinci-resolve.enable = hardware.graphicsAcceleration.enable;
mfoc.enable = true;
})

View file

@ -7,7 +7,7 @@
...
}: let
nix-development-enabled = osConfig.host.nix-development.enable;
ai-tooling-enabled = osConfig.host.ai.enable;
ai-tooling-enabled = true;
in {
config = lib.mkIf config.user.isDesktopUser {
programs = {

View file

@ -260,40 +260,10 @@
};
};
ollama = {
enable = true;
exposePort = true;
impermanence.enable = false;
environmentVariables = {
OLLAMA_KEEP_ALIVE = "24h";
};
loadModels = [
# conversation models
"llama3.1:8b"
"deepseek-r1:8b"
"deepseek-r1:32b"
"deepseek-r1:70b"
# auto complete models
"qwen2.5-coder:1.5b-base"
"qwen2.5-coder:7b"
"deepseek-coder:6.7b"
"deepseek-coder:33b"
# agent models
"qwen3:8b"
"qwen3:32b"
"qwen3:235b-a22b"
"qwen3-coder:30b"
"qwen3-coder:30b-a3b-fp16"
# embedding models
"nomic-embed-text:latest"
];
};
# ollama = {
# enable = true;
# exposePort = true;
# };
tailscale = {
enable = true;
authKeyFile = config.sops.secrets."vpn-keys/tailscale-authkey/defiant".path;

View file

@ -45,7 +45,7 @@
services.desktopManager.gnome.enable = true;
host = {
ai.enable = true;
# ai.enable = true;
users = {
eve = {
isDesktopUser = true;

View file

@ -39,50 +39,9 @@
directAccess.enable = true;
};
ai = {
enable = true;
models = {
"Llama 3.1 8B" = {
model = "llama3.1:8b";
roles = ["chat" "edit" "apply"];
apiBase = "http://defiant:11434";
};
"Deepseek Coder:6.7B" = {
model = "deepseek-coder:6.7b";
roles = ["chat" "edit" "apply"];
apiBase = "http://defiant:11434";
};
"Deepseek Coder:33B" = {
model = "deepseek-coder:33b";
roles = ["chat" "edit" "apply"];
apiBase = "http://defiant:11434";
};
"Deepseek r1:8B" = {
model = "deepseek-r1:8b";
roles = ["chat"];
apiBase = "http://defiant:11434";
};
"Deepseek r1:32B" = {
model = "deepseek-r1:32b";
roles = ["chat"];
apiBase = "http://defiant:11434";
};
"qwen2.5-coder:1.5b-base" = {
model = "qwen2.5-coder:1.5b-base";
roles = ["autocomplete"];
apiBase = "http://defiant:11434";
};
"nomic-embed-text:latest" = {
model = "nomic-embed-text:latest";
roles = ["embed"];
apiBase = "http://defiant:11434";
};
};
};
# ai = {
# enable = true;
# };
};
virtualisation.docker.enable = true;
@ -127,12 +86,12 @@
syncthing.enable = true;
ollama = {
enable = true;
loadModels = [
"llama3.1:8b"
];
};
# ollama = {
# enable = true;
# loadModels = [
# "llama3.1:8b"
# ];
# };
};
# Enable network-online.target for better network dependency handling

View file

@ -36,80 +36,15 @@
graphicsAcceleration.enable = true;
directAccess.enable = true;
};
ai = {
enable = true;
# TODO: benchmark twilight against defiant and prune this list of models that are faster on defiant
models = {
# conversation models
"Llama 3.1 8B" = {
model = "lamma3.1:8b";
roles = ["chat" "edit" "apply"];
};
"deepseek-r1:8b" = {
model = "deepseek-r1:8b";
roles = ["chat" "edit" "apply"];
};
"deepseek-r1:32b" = {
model = "deepseek-r1:32b";
roles = ["chat" "edit" "apply"];
};
# auto complete models
"qwen2.5-coder:1.5b-base" = {
model = "qwen2.5-coder:1.5b-base";
roles = ["autocomplete"];
};
"qwen2.5-coder:7b" = {
model = "qwen2.5-coder:7b";
roles = ["autocomplete"];
};
"deepseek-coder:6.7b" = {
model = "deepseek-coder:6.7b";
roles = ["autocomplete"];
};
"deepseek-coder:33b" = {
model = "deepseek-coder:33b";
roles = ["autocomplete"];
};
# agent models
"qwen3:32b" = {
model = "qwen3:32b";
roles = ["chat" "edit" "apply"];
};
# embedding models
"nomic-embed-text:latest" = {
model = "nomic-embed-text:latest";
roles = ["embed"];
};
};
};
# ai = {
# enable = true;
# };
};
services = {
ollama = {
enable = true;
exposePort = true;
loadModels = [
# conversation models
"llama3.1:8b"
"deepseek-r1:8b"
"deepseek-r1:32b"
# auto complete models
"qwen2.5-coder:1.5b-base"
"qwen2.5-coder:7b"
"deepseek-coder:6.7b"
"deepseek-coder:33b"
# agent models
"qwen3:32b"
# embedding models
"nomic-embed-text:latest"
];
};
# ollama = {
# enable = true;
# exposePort = true;
# };
tailscale = {
enable = true;