diff --git a/.planning/PROJECT.md b/.planning/PROJECT.md new file mode 100644 index 0000000..4199d13 --- /dev/null +++ b/.planning/PROJECT.md @@ -0,0 +1,59 @@ +# NixOS Infrastructure with AI Assistant + +## What This Is + +This project manages a NixOS-based infrastructure with Docker services, integrated with OpenCode AI assistant for automated management. The system supports: + +- Reproducible NixOS infrastructure configuration +- Docker service management via Docker Compose +- AI-assisted infrastructure operations +- Automatic service deployment and lifecycle management +- Integration with existing Docker stacks (ai, cloudstorage, homeautomation, network, passwordmanager, versioncontrol) + +## Core Value + +The core value is a **reproducible and evolvable NixOS infrastructure** that can be managed through natural language interactions with the OpenCode AI assistant. The system should automatically detect and integrate new Docker services while maintaining consistency across all deployments. + +## Requirements + +### Validated + +- NixOS configuration management with flakes +- Docker service integration via docker_manager.nix +- Traefik reverse proxy with automatic TLS certificates +- Environment variable management via agenix secrets +- Standardized service patterns across all Docker stacks + +### Active + +- [ ] Automatic detection and integration of new Docker Compose files in `assets/compose/` +- [ ] AI assistant integration for service lifecycle management +- [ ] Service health monitoring and logging verification +- [ ] Documentation of integration patterns in SKILL.md +- [ ] Automated system update workflow (`nh os switch`) + +### Out of Scope + +- Full n8n integration for automated workflows - deferring to future milestone +- Self-healing infrastructure with automatic problem detection - future enhancement +- Multi-host orchestration - single-host focus for v1 + +## Key Decisions + +| Decision | Rationale | Outcome | +|----------|-----------|---------| +| NixOS with Flakes | Reproducible infrastructure, better dependency management | Good | +| Docker Compose integration | Preserves existing service configurations, flexibility | Good | +| agenix for secrets | Secure secrets management, Nix native integration | Good | +| Traefik reverse proxy | Unified HTTPS entrypoint, automatic certificate management | Good | +| Standardized service patterns | Consistency across services, easier maintenance | Pending | + +## Context + +- **Existing Services**: ai (Llama.cpp, Open WebUI, n8n), cloudstorage (Nextcloud), homeautomation (Home Assistant), network (Traefik, DDNS), passwordmanager (Vaultwarden), versioncontrol (Gitea) +- **Tech Stack**: NixOS unstable, Docker, Docker Compose, Traefik, agenix, OpenCode AI +- **Hardware**: AMD MI50 GPUs for AI workloads +- **Network**: Traefik-net bridge network for all services +- **Storage**: `/mnt/HoardingCow_docker_data/` for persistent data + +**Last updated: 2026-01-01 after init** diff --git a/.planning/config.json b/.planning/config.json new file mode 100644 index 0000000..3deca74 --- /dev/null +++ b/.planning/config.json @@ -0,0 +1,17 @@ +{ + "mode": "interactive", + "gates": { + "confirm_project": true, + "confirm_phases": true, + "confirm_roadmap": true, + "confirm_breakdown": true, + "confirm_plan": true, + "execute_next_plan": true, + "issues_review": true, + "confirm_transition": true + }, + "safety": { + "always_confirm_destructive": true, + "always_confirm_external_services": true + } +} diff --git a/modules/nixos/services/docker_manager.nix b/modules/nixos/services/docker_manager.nix new file mode 100644 index 0000000..d8fa94e --- /dev/null +++ b/modules/nixos/services/docker_manager.nix @@ -0,0 +1,36 @@ +{ config, lib, pkgs, ... }: + +with lib; + +{ + options.services.myDockerStacks = mkOption { + type = types.attrsOf (types.submodule { + options = { + path = mkOption { type = types.path; }; + ports = mkOption { type = types.listOf types.int; default = []; }; + }; + }); + default = {}; + description = "Attribute set of docker-compose stacks to run."; + }; + + config = { + # Generate the systemd services based on the options provided above + systemd.services = mapAttrs' (name: value: nameValuePair "${name}_stack" { + description = "${name} via Docker Compose"; + after = [ "network-online.target" "docker.service" ]; + wants = [ "network-online.target" "docker.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + WorkingDirectory = value.path; + ExecStartPre = "${pkgs.docker-compose}/bin/docker-compose down"; + ExecStart = "${pkgs.docker-compose}/bin/docker-compose up -d"; + ExecStop = "${pkgs.docker-compose}/bin/docker-compose down"; + RemainAfterExit = true; + }; + }) config.services.myDockerStacks; + + # Automatically open firewall ports + networking.firewall.allowedTCPPorts = flatten (mapAttrsToList (n: v: v.ports) config.services.myDockerStacks); + }; +} diff --git a/modules/nixos/services/fancontrol.nix b/modules/nixos/services/fancontrol.nix new file mode 100644 index 0000000..8b08ec9 --- /dev/null +++ b/modules/nixos/services/fancontrol.nix @@ -0,0 +1,37 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.systemd-fancon; +in +{ + options.services.systemd-fancon = { + enable = mkEnableOption "systemd-fancon service for fan control"; + config = mkOption { + type = types.lines; + default = ""; + description = "Configuration for systemd-fancon."; + }; + }; + + config = mkIf cfg.enable { + environment.systemPackages = with pkgs; [ + systemd-fancon + lm_sensors + ]; + + boot.kernelModules = [ "amdgpu" ]; + + systemd.services.systemd-fancon = { + description = "systemd-fancon service"; + wantedBy = [ "multi-user.target" ]; + after = [ "network-online.target" ]; + serviceConfig = { + ExecStart = "${pkgs.systemd-fancon}/bin/systemd-fancon -c ${cfg.configFile}"; + Restart = "on-failure"; + }; + configFile = pkgs.writeText "systemd-fancon.conf" cfg.config; + }; + }; +} diff --git a/modules/nixos/services/ollama_init_custom_models.nix b/modules/nixos/services/ollama_init_custom_models.nix new file mode 100644 index 0000000..766727b --- /dev/null +++ b/modules/nixos/services/ollama_init_custom_models.nix @@ -0,0 +1,31 @@ +systemd.services.init-ollama-model = { + description = "Initialize nemotron 3 with extra context in Ollama Docker"; + after = [ "docker-ollama.service" ]; # Ensure it runs after your ollama container + wantedBy = [ "multi-user.target" ]; + script = '' + # Wait for Ollama + while ! ${pkgs.curl}/bin/curl -s http://localhost:11434/api/tags > /dev/null; do + sleep 2 + done + + # Check if the model already exists in the persistent volume + if ! ${pkgs.docker}/bin/docker exec ollama ollama list | grep -q "nemotron-3-nano:30b-128k"; then + echo "nemotron-3-nano:30b-128k not found, creating..." + + ${pkgs.docker}/bin/docker exec ollama sh -c 'cat < /root/.ollama/nemotron-3-nano:30b-128k.modelfile + FROM nemotron-3-nano:30b + PARAMETER num_ctx 131072 + PARAMETER num_predict 4096 + PARAMETER repeat_penalty 1.1 + EOF' + + ${pkgs.docker}/bin/docker exec ollama ollama create nemotron-3-nano:30b-128k -f /root/.ollama/nemotron-3-nano:30b-128k.modelfile + else + echo "nemotron-3-nano:30b-128k already exists, skipping creation." + fi + ''; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + }; +}; diff --git a/modules/nixos/services/open_code_server.nix b/modules/nixos/services/open_code_server.nix new file mode 100644 index 0000000..f66c713 --- /dev/null +++ b/modules/nixos/services/open_code_server.nix @@ -0,0 +1,67 @@ +{ config, pkgs, lib, ... }: + +let + cfg = config.services.opencode; +in { + options.services.opencode = { + enable = lib.mkEnableOption "OpenCode AI Service"; + port = lib.mkOption { + type = lib.types.port; + default = 4099; + }; + ollamaUrl = lib.mkOption { + type = lib.types.str; + default = "http://127.0.0.1:11434/v1"; + }; + }; + + config = lib.mkIf cfg.enable { + programs.nix-ld.enable = true; + + environment.etc."opencode/opencode.json".text = builtins.toJSON { + "$schema" = "https://opencode.ai/config.json"; + + "model" = "ollama/nemotron-3-nano:30b"; + + "provider" = { + "ollama" = { + "name" = "Ollama (Local)"; + "npm" = "@ai-sdk/openai-compatible"; + "options" = { + "baseURL" = cfg.ollamaUrl; + }; + "models" = { + # The exact model ID as seen in 'ollama list' + "nemotron-3-nano:30b" = { + "name" = "NVIDIA Nemotron 3 Nano (30B)"; + }; + }; + }; + }; + }; + + systemd.services.opencode = { + description = "OpenCode AI Coding Agent Server"; + after = [ "network.target" "ai_stack.service" ]; + requires = [ "ai_stack.service" ]; + wantedBy = [ "multi-user.target" ]; + + serviceConfig = { + Type = "simple"; + User = "gortium"; + ExecStart = "${pkgs.nodejs}/bin/npx -y opencode-ai serve --hostname 0.0.0.0 --port ${toString cfg.port}"; + Restart = "on-failure"; + # Loads your ANTHROPIC_API_KEY etc from your single Agenix file + # EnvironmentFile = config.age.secrets.opencode-secrets.path; + }; + + environment = { + OLLAMA_BASE_URL = "http://127.0.0.1:11434"; + OPENCODE_CONFIG = "/etc/opencode/opencode.json"; + HOME = "/home/gortium"; + }; + }; + + networking.firewall.allowedTCPPorts = [ cfg.port ]; + }; +} diff --git a/secrets/n8n_ssh_key.age b/secrets/n8n_ssh_key.age new file mode 100644 index 0000000..32ef61f Binary files /dev/null and b/secrets/n8n_ssh_key.age differ diff --git a/users/n8n-worker.nix b/users/n8n-worker.nix new file mode 100644 index 0000000..fff14f1 --- /dev/null +++ b/users/n8n-worker.nix @@ -0,0 +1,11 @@ +{ pkgs, inputs, config, keys, ... }: { + users.users.n8n-worker = { + isSystemUser = true; + group = "n8n-worker"; + extraGroups = [ "docker" ]; + shell = pkgs.bashInteractive; + openssh.authorizedKeys.keys = [ + keys.users.n8n-worker.main + ]; + }; + users.groups.n8n-worker = {};