#!/bin/bash # Ollama Setup — ThinkCentre 1 # Usage: bash setup.sh [model] # Default model: qwen2.5:7b set -e MODEL="${1:-qwen2.5:7b}" echo "=== Ollama Setup ===" echo "[1/3] Installing Ollama..." curl -fsSL https://ollama.ai/install.sh | sh echo "[2/3] Enabling service..." sudo systemctl enable ollama sudo systemctl start ollama sleep 5 echo "[3/3] Pulling default model: $MODEL (~5 GB, dauert je nach Netz 5-15 min)..." ollama pull "$MODEL" echo "" echo "=== Done ===" ollama list echo "" echo "Test: curl http://localhost:11434/api/generate -d '{\"model\":\"$MODEL\",\"prompt\":\"Hello\",\"stream\":false}'"