Files
infra/modules/nixos/services/ollama_init_custom_models.nix

46 lines
1.4 KiB
Nix
Raw Normal View History

{ pkgs, ... }: {
systemd.services.init-ollama-model = {
description = "Initialize LLM models with extra context in Ollama Docker";
after = [ "docker-ollama.service" ];
wantedBy = [ "multi-user.target" ];
script = ''
# Wait for Ollama
while ! ${pkgs.curl}/bin/curl -s http://localhost:11434/api/tags > /dev/null; do
sleep 2
done
create_model_if_missing() {
local model_name=$1
local base_model=$2
if ! ${pkgs.docker}/bin/docker exec ollama ollama list | grep -q "$model_name"; then
echo "$model_name not found, creating from $base_model..."
${pkgs.docker}/bin/docker exec ollama sh -c "cat <<EOF > /root/.ollama/$model_name.modelfile
FROM $base_model
PARAMETER num_ctx 131072
PARAMETER num_predict 4096
PARAMETER num_keep 1024
PARAMETER repeat_penalty 1.1
PARAMETER top_k 40
PARAMETER stop \"[INST]\"
PARAMETER stop \"[/INST]\"
PARAMETER stop \"</s>\"
EOF"
${pkgs.docker}/bin/docker exec ollama ollama create "$model_name" -f "/root/.ollama/$model_name.modelfile"
else
echo "$model_name already exists, skipping."
fi
}
# Create Nemotron
create_model_if_missing "nemotron-3-nano:30b-128k" "nemotron-3-nano:30b"
# Create Devstral
create_model_if_missing "devstral-small-2:24b-128k" "devstral-small-2:24b"
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
};
}