mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
152 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c1cc79684 | ||
|
|
12bbcee374 | ||
|
|
6e73ae0157 | ||
|
|
d90b951a8c | ||
|
|
14558131ed | ||
|
|
a422363337 | ||
|
|
1278d0b3af | ||
|
|
c8ab6271a8 | ||
|
|
957e828a5b | ||
|
|
5afda8cbb2 | ||
|
|
1bbf324f1e | ||
|
|
1be5b987a2 | ||
|
|
6b684b19f2 | ||
|
|
4a37982e30 | ||
|
|
55cb44e0e7 | ||
|
|
5c242d08bf | ||
|
|
5bf65d3849 | ||
|
|
21d5dfa175 | ||
|
|
e49ad29999 | ||
|
|
cdbe74ed89 | ||
|
|
129970379c | ||
|
|
c5c47d14dd | ||
|
|
95f19e7333 | ||
|
|
9c6edd3f18 | ||
|
|
bda9210354 | ||
|
|
f57951e8dc | ||
|
|
ba8c04caf8 | ||
|
|
ff0658117d | ||
|
|
920b593d5f | ||
|
|
27d9b08ce2 | ||
|
|
700cdacb4d | ||
|
|
3c7a532704 | ||
|
|
6048f37ad5 | ||
|
|
f18952633f | ||
|
|
437257e631 | ||
|
|
c720170f26 | ||
|
|
d9c03d6509 | ||
|
|
3b7066711f | ||
|
|
6a630c40a1 | ||
|
|
9f9c042b66 | ||
|
|
2a6d7d0b85 | ||
|
|
6d813ccd84 | ||
|
|
af9c760fb8 | ||
|
|
90656b05e3 | ||
|
|
d7a3d4e8c7 | ||
|
|
35f0b8bf99 | ||
|
|
be6b391121 | ||
|
|
7f56ba6a41 | ||
|
|
4b3d7a861e | ||
|
|
affed2edcf | ||
|
|
34642e8b8e | ||
|
|
4c8b6c5209 | ||
|
|
2b38ed28c0 | ||
|
|
26b57895ce | ||
|
|
367da13fae | ||
|
|
d6ecd42559 | ||
|
|
233c33fa52 | ||
|
|
43974c5743 | ||
|
|
cf94a62f37 | ||
|
|
81b4074827 | ||
|
|
455657c8df | ||
|
|
ee5a92788a | ||
|
|
2ba396a419 | ||
|
|
7144d58160 | ||
|
|
279fa2e5ef | ||
|
|
dbe0b8b597 | ||
|
|
b7315d255a | ||
|
|
f003d2931f | ||
|
|
6f7c557065 | ||
|
|
ecb6ee46b1 | ||
|
|
354967010f | ||
|
|
57122f31a3 | ||
|
|
cbbcec0d14 | ||
|
|
de38c35b8a | ||
|
|
def996ddf4 | ||
|
|
790e32e96b | ||
|
|
fd75c4d87f | ||
|
|
411a99cbc4 | ||
|
|
d2c6ab72b2 | ||
|
|
3656584eda | ||
|
|
8be370098d | ||
|
|
45057cb6df | ||
|
|
3f24484d60 | ||
|
|
b6d50a22b4 | ||
|
|
8a658210e1 | ||
|
|
583aaaa080 | ||
|
|
22ca4f64e8 | ||
|
|
32e798fcaa | ||
|
|
ced81c8b50 | ||
|
|
7ec4b71101 | ||
|
|
94aa58d380 | ||
|
|
f8d88e6f97 | ||
|
|
a95f6309b0 | ||
|
|
502de018af | ||
|
|
a3e8daad33 | ||
|
|
78a2f65c94 | ||
|
|
1689a6833a | ||
|
|
6d2f32eadf | ||
|
|
c549dd50c9 | ||
|
|
82312e9421 | ||
|
|
e13b367188 | ||
|
|
d73049cc1b | ||
|
|
4373b23cd3 | ||
|
|
73eb6ccf41 | ||
|
|
6ca48d0d56 | ||
|
|
b82599005e | ||
|
|
b044053674 | ||
|
|
e4f03bcd94 | ||
|
|
ac3797912f | ||
|
|
429a1f6e7e | ||
|
|
fab20e0796 | ||
|
|
1bc6baa0b0 | ||
|
|
996e0748f8 | ||
|
|
ca46fdfaa4 | ||
|
|
b480797e5b | ||
|
|
c47fdf847e | ||
|
|
3ca9562013 | ||
|
|
3104d5de28 | ||
|
|
fd141cbc8c | ||
|
|
aa0c15b6b3 | ||
|
|
4630a3e551 | ||
|
|
b70d5c52f1 | ||
|
|
5d8635ba7b | ||
|
|
27dad9d9d5 | ||
|
|
abb4417b15 | ||
|
|
388cca5591 | ||
|
|
8aa019e25f | ||
|
|
e4061cfbde | ||
|
|
9a1f20e2d4 | ||
|
|
3b45736729 | ||
|
|
1d88fa450a | ||
|
|
31ee6be163 | ||
|
|
096a2ca5f4 | ||
|
|
fb04f6f64d | ||
|
|
d8e54aa347 | ||
|
|
b2b6b421ba | ||
|
|
c6b35f02f0 | ||
|
|
7e43b0a6b8 | ||
|
|
2915b287ba | ||
|
|
ae561db0c9 | ||
|
|
2d132747c4 | ||
|
|
2848163a04 | ||
|
|
76aa6e11d2 | ||
|
|
d377df15b4 | ||
|
|
334c17cc28 | ||
|
|
f148b5bd3a | ||
|
|
54af649d76 | ||
|
|
f6e5a5fa56 | ||
|
|
01aa24d0db | ||
|
|
3e702ef72e | ||
|
|
a31218f7e5 | ||
|
|
5decb3ed95 |
88
.github/check_readme_commands.py
vendored
Executable file
88
.github/check_readme_commands.py
vendored
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Check that all CLI commands are documented in the README."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import typer
|
||||
|
||||
from compose_farm.cli import app
|
||||
|
||||
|
||||
def get_all_commands(typer_app: typer.Typer, prefix: str = "cf") -> set[str]:
|
||||
"""Extract all command names from a Typer app, including nested subcommands."""
|
||||
commands = set()
|
||||
|
||||
# Get registered commands (skip hidden ones like aliases)
|
||||
for command in typer_app.registered_commands:
|
||||
if command.hidden:
|
||||
continue
|
||||
name = command.name
|
||||
if not name and command.callback:
|
||||
name = command.callback.__name__
|
||||
if name:
|
||||
commands.add(f"{prefix} {name}")
|
||||
|
||||
# Get registered sub-apps (like 'config')
|
||||
for group in typer_app.registered_groups:
|
||||
sub_app = group.typer_instance
|
||||
sub_name = group.name
|
||||
if sub_app and sub_name:
|
||||
commands.add(f"{prefix} {sub_name}")
|
||||
# Don't recurse into subcommands - we only document the top-level subcommand
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def get_documented_commands(readme_path: Path) -> set[str]:
|
||||
"""Extract commands documented in README from help output sections."""
|
||||
content = readme_path.read_text()
|
||||
|
||||
# Match patterns like: <code>cf command --help</code>
|
||||
pattern = r"<code>(cf\s+[\w-]+)\s+--help</code>"
|
||||
matches = re.findall(pattern, content)
|
||||
|
||||
return set(matches)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""Check that all CLI commands are documented in the README."""
|
||||
readme_path = Path(__file__).parent.parent / "README.md"
|
||||
|
||||
if not readme_path.exists():
|
||||
print(f"ERROR: README.md not found at {readme_path}")
|
||||
return 1
|
||||
|
||||
cli_commands = get_all_commands(app)
|
||||
documented_commands = get_documented_commands(readme_path)
|
||||
|
||||
# Also check for the main 'cf' help
|
||||
if "<code>cf --help</code>" in readme_path.read_text():
|
||||
documented_commands.add("cf")
|
||||
cli_commands.add("cf")
|
||||
|
||||
missing = cli_commands - documented_commands
|
||||
extra = documented_commands - cli_commands
|
||||
|
||||
if missing or extra:
|
||||
if missing:
|
||||
print("ERROR: Commands missing from README --help documentation:")
|
||||
for cmd in sorted(missing):
|
||||
print(f" - {cmd}")
|
||||
if extra:
|
||||
print("WARNING: Commands documented but not in CLI:")
|
||||
for cmd in sorted(extra):
|
||||
print(f" - {cmd}")
|
||||
return 1
|
||||
|
||||
print(f"✓ All {len(cli_commands)} commands documented in README")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -16,10 +16,10 @@ jobs:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
@@ -39,10 +39,10 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12
|
||||
@@ -50,11 +50,5 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-extras --dev
|
||||
|
||||
- name: Run ruff check
|
||||
run: uv run ruff check .
|
||||
|
||||
- name: Run ruff format check
|
||||
run: uv run ruff format --check .
|
||||
|
||||
- name: Run mypy
|
||||
run: uv run mypy src/compose_farm
|
||||
- name: Run pre-commit (via prek)
|
||||
uses: j178/prek-action@v1
|
||||
|
||||
92
.github/workflows/docker.yml
vendored
Normal file
92
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Upload Python Package"]
|
||||
types: [completed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to build (leave empty for latest)'
|
||||
required: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run if PyPI upload succeeded (or manual dispatch)
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_run" ]; then
|
||||
# Get version from the tag that triggered the release
|
||||
VERSION="${{ github.event.workflow_run.head_branch }}"
|
||||
# Strip 'v' prefix if present
|
||||
VERSION="${VERSION#v}"
|
||||
elif [ -n "${{ github.event.inputs.version }}" ]; then
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
else
|
||||
VERSION=""
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Wait for PyPI
|
||||
if: steps.version.outputs.version != ''
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
echo "Waiting for compose-farm==$VERSION on PyPI..."
|
||||
for i in {1..30}; do
|
||||
if curl -sf "https://pypi.org/pypi/compose-farm/$VERSION/json" > /dev/null; then
|
||||
echo "✓ Version $VERSION available on PyPI"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $i: not yet available, waiting 10s..."
|
||||
sleep 10
|
||||
done
|
||||
echo "✗ Timeout waiting for PyPI"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}},value=v${{ steps.version.outputs.version }}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -13,9 +13,9 @@ jobs:
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
- name: Build
|
||||
run: uv build
|
||||
- name: Publish package distributions to PyPI
|
||||
|
||||
6
.github/workflows/update-readme.yml
vendored
6
.github/workflows/update-readme.yml
vendored
@@ -11,16 +11,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Run markdown-code-runner
|
||||
env:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -42,3 +42,5 @@ htmlcov/
|
||||
compose-farm.yaml
|
||||
!examples/compose-farm.yaml
|
||||
coverage.xml
|
||||
.env
|
||||
homepage/
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-readme-commands
|
||||
name: Check README documents all CLI commands
|
||||
entry: uv run python .github/check_readme_commands.py
|
||||
language: system
|
||||
files: ^(README\.md|src/compose_farm/cli/.*)$
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
@@ -10,7 +19,7 @@ repos:
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.4
|
||||
rev: v0.14.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
|
||||
37
CLAUDE.md
37
CLAUDE.md
@@ -10,19 +10,31 @@
|
||||
|
||||
```
|
||||
compose_farm/
|
||||
├── cli.py # Typer commands (thin layer, delegates to operations)
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which service on which host)
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
└── traefik.py # Traefik file-provider config generation from labels
|
||||
├── cli/ # CLI subpackage
|
||||
│ ├── __init__.py # Imports modules to trigger command registration
|
||||
│ ├── app.py # Shared Typer app instance, version callback
|
||||
│ ├── common.py # Shared helpers, options, progress bar utilities
|
||||
│ ├── config.py # Config subcommand (init, show, path, validate, edit)
|
||||
│ ├── lifecycle.py # up, down, pull, restart, update, apply commands
|
||||
│ ├── management.py # refresh, check, init-network, traefik-file commands
|
||||
│ └── monitoring.py # logs, ps, stats commands
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── console.py # Shared Rich console instances
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which service on which host)
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
└── traefik.py # Traefik file-provider config generation from labels
|
||||
```
|
||||
|
||||
## Web UI Icons
|
||||
|
||||
Icons use [Lucide](https://lucide.dev/). Add new icons as macros in `web/templates/partials/icons.html` by copying SVG paths from their site. The `action_btn`, `stat_card`, and `collapse` macros in `components.html` accept an optional `icon` parameter.
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
1. **asyncssh over Paramiko/Fabric**: Native async support, built-in streaming
|
||||
1. **Hybrid SSH approach**: asyncssh for parallel streaming with prefixes; native `ssh -t` for raw mode (progress bars)
|
||||
2. **Parallel by default**: Multiple services run concurrently via `asyncio.gather`
|
||||
3. **Streaming output**: Real-time stdout/stderr with `[service]` prefix using Rich
|
||||
4. **SSH key auth only**: Uses ssh-agent, no password handling (YAGNI)
|
||||
@@ -48,13 +60,16 @@ CLI available as `cf` or `compose-farm`.
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `up` | Start services (`docker compose up -d`), auto-migrates if host changed |
|
||||
| `down` | Stop services (`docker compose down`) |
|
||||
| `down` | Stop services (`docker compose down`). Use `--orphaned` to stop services removed from config |
|
||||
| `pull` | Pull latest images |
|
||||
| `restart` | `down` + `up -d` |
|
||||
| `update` | `pull` + `down` + `up -d` |
|
||||
| `apply` | Make reality match config: migrate services + stop orphans. Use `--dry-run` to preview |
|
||||
| `logs` | Show service logs |
|
||||
| `ps` | Show status of all services |
|
||||
| `sync` | Discover running services, update state, capture image digests |
|
||||
| `stats` | Show overview (hosts, services, pending migrations; `--live` for container counts) |
|
||||
| `refresh` | Update state from reality: discover running services, capture image digests |
|
||||
| `check` | Validate config, traefik labels, mounts, networks; show host compatibility |
|
||||
| `init-network` | Create Docker network on hosts with consistent subnet/gateway |
|
||||
| `traefik-file` | Generate Traefik file-provider config from compose labels |
|
||||
| `config` | Manage config files (init, show, path, validate, edit) |
|
||||
|
||||
20
Dockerfile
Normal file
20
Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Build stage - install with uv
|
||||
FROM ghcr.io/astral-sh/uv:python3.14-alpine AS builder
|
||||
|
||||
ARG VERSION
|
||||
RUN uv tool install --compile-bytecode "compose-farm[web]${VERSION:+==$VERSION}"
|
||||
|
||||
# Runtime stage - minimal image without uv
|
||||
FROM python:3.14-alpine
|
||||
|
||||
# Install only runtime requirements
|
||||
RUN apk add --no-cache openssh-client
|
||||
|
||||
# Copy installed tool virtualenv and bin symlinks from builder
|
||||
COPY --from=builder /root/.local/share/uv/tools/compose-farm /root/.local/share/uv/tools/compose-farm
|
||||
COPY --from=builder /usr/local/bin/cf /usr/local/bin/compose-farm /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["cf"]
|
||||
CMD ["--help"]
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Bas Nijholt
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
35
PLAN.md
35
PLAN.md
@@ -1,35 +0,0 @@
|
||||
# Compose Farm – Traefik Multihost Ingress Plan
|
||||
|
||||
## Goal
|
||||
Generate a Traefik file-provider fragment from existing docker-compose Traefik labels (no config duplication) so a single front-door Traefik on 192.168.1.66 with wildcard `*.lab.mydomain.org` can route to services running on other hosts. Keep the current simplicity (SSH + docker compose); no Swarm/K8s.
|
||||
|
||||
## Requirements
|
||||
- Traefik stays on main host; keep current `dynamic.yml` and Docker provider for local containers.
|
||||
- Add a watched directory provider (any path works) and load a generated fragment (e.g., `compose-farm.generated.yml`).
|
||||
- No edits to compose files: reuse existing `traefik.*` labels as the single source of truth; Compose Farm only reads them.
|
||||
- Generator infers routing from labels and reachability from `ports:` mappings; prefer host-published ports so Traefik can reach services across hosts. Upstreams point to `<host address>:<published host port>`; warn if no published port is found.
|
||||
- Only minimal data in `compose-farm.yaml`: hosts map and service→host mapping (already present).
|
||||
- No new orchestration/discovery layers; respect KISS/YAGNI/DRY.
|
||||
|
||||
## Non-Goals
|
||||
- No Swarm/Kubernetes adoption.
|
||||
- No global Docker provider across hosts.
|
||||
- No health checks/service discovery layer.
|
||||
|
||||
## Current State (Dec 2025)
|
||||
- Compose Farm: Typer CLI wrapping `docker compose` over SSH; config in `compose-farm.yaml`; parallel by default; snapshot/log tooling present.
|
||||
- Traefik: single instance on 192.168.1.66, wildcard `*.lab.mydomain.org`, Docker provider for local services, file provider via `dynamic.yml` already in use.
|
||||
|
||||
## Proposed Implementation Steps
|
||||
1) Add generator command: `compose-farm traefik-file --output <path>`.
|
||||
2) Resolve per-service host from `compose-farm.yaml`; read compose file at `{compose_dir}/{service}/docker-compose.yml`.
|
||||
3) Parse `traefik.*` labels to build routers/services/middlewares as in compose; map container port to published host port (from `ports:`) to form upstream URLs with host address.
|
||||
4) Emit file-provider YAML to the watched directory (recommended default: `/mnt/data/traefik/dynamic.d/compose-farm.generated.yml`, but user chooses via `--output`).
|
||||
5) Warnings: if no published port is found, warn that cross-host reachability requires L3 reachability to container IPs.
|
||||
6) Tests: label parsing, port mapping, YAML render; scenario with published port; scenario without published port.
|
||||
7) Docs: update README/CLAUDE to describe directory provider flags and the generator workflow; note that compose files remain unchanged.
|
||||
|
||||
## Open Questions
|
||||
- How to derive target host address: use `hosts.<name>.address` verbatim, or allow override per service? (Default: use host address.)
|
||||
- Should we support multiple hosts/backends per service for LB/HA? (Start with single server.)
|
||||
- Where to store generated file by default? (Default to user-specified `--output`; maybe fallback to `./compose-farm-traefik.yml`.)
|
||||
790
README.md
790
README.md
@@ -1,11 +1,16 @@
|
||||
# Compose Farm
|
||||
|
||||
[](https://pypi.org/project/compose-farm/)
|
||||
[](https://pypi.org/project/compose-farm/)
|
||||
[](LICENSE)
|
||||
[](https://github.com/basnijholt/compose-farm/stargazers)
|
||||
|
||||
<img src="http://files.nijho.lt/compose-farm.png" align="right" style="width: 300px;" />
|
||||
|
||||
A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
|
||||
> [!NOTE]
|
||||
> Run `docker compose` commands across multiple hosts via SSH. One YAML maps services to hosts. Change the mapping, run `up`, and it auto-migrates. No Kubernetes, no Swarm, no magic.
|
||||
> Run `docker compose` commands across multiple hosts via SSH. One YAML maps services to hosts. Run `cf apply` and reality matches your config—services start, migrate, or stop as needed. No Kubernetes, no Swarm, no magic.
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
@@ -19,29 +24,59 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [What Compose Farm doesn't do](#what-compose-farm-doesnt-do)
|
||||
- [Installation](#installation)
|
||||
- [Configuration](#configuration)
|
||||
- [Multi-Host Services](#multi-host-services)
|
||||
- [Config Command](#config-command)
|
||||
- [Usage](#usage)
|
||||
- [CLI `--help` Output](#cli---help-output)
|
||||
- [Auto-Migration](#auto-migration)
|
||||
- [Traefik Multihost Ingress (File Provider)](#traefik-multihost-ingress-file-provider)
|
||||
- [Comparison with Alternatives](#comparison-with-alternatives)
|
||||
- [License](#license)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Why Compose Farm?
|
||||
|
||||
I run 100+ Docker Compose stacks on an LXC container that frequently runs out of memory. I needed a way to distribute services across multiple machines without the complexity of:
|
||||
I used to run 100+ Docker Compose stacks on a single machine that kept running out of memory. I needed a way to distribute services across multiple machines without the complexity of:
|
||||
|
||||
- **Kubernetes**: Overkill for my use case. I don't need pods, services, ingress controllers, or YAML manifests 10x the size of my compose files.
|
||||
- **Docker Swarm**: Effectively in maintenance mode—no longer being invested in by Docker.
|
||||
|
||||
**Compose Farm is intentionally simple**: one YAML config mapping services to hosts, and a CLI that runs `docker compose` commands over SSH. That's it.
|
||||
Both require changes to your compose files. **Compose Farm requires zero changes**—your existing `docker-compose.yml` files work as-is.
|
||||
|
||||
I also wanted a declarative setup—one config file that defines where everything runs. Change the config, run `cf apply`, and everything reconciles—services start, migrate, or stop as needed. See [Comparison with Alternatives](#comparison-with-alternatives) for how this compares to other approaches.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://xkcd.com/927/">
|
||||
<img src="https://imgs.xkcd.com/comics/standards.png" alt="xkcd: Standards" width="400" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
Before you say it—no, this is not a new standard. I changed nothing about my existing setup. When I added more hosts, I just mounted my drives at the same paths, and everything worked. You can do all of this manually today—SSH into a host and run `docker compose up`.
|
||||
|
||||
Compose Farm just automates what you'd do by hand:
|
||||
- Runs `docker compose` commands over SSH
|
||||
- Tracks which service runs on which host
|
||||
- **One command (`cf apply`) to reconcile everything**—start missing services, migrate moved ones, stop removed ones
|
||||
- Generates Traefik file-provider config for cross-host routing
|
||||
|
||||
**It's a convenience wrapper, not a new paradigm.**
|
||||
|
||||
## How It Works
|
||||
|
||||
1. You run `cf up plex`
|
||||
2. Compose Farm looks up which host runs `plex` (e.g., `server-1`)
|
||||
3. It SSHs to `server-1` (or runs locally if `localhost`)
|
||||
4. It executes `docker compose -f /opt/compose/plex/docker-compose.yml up -d`
|
||||
5. Output is streamed back with `[plex]` prefix
|
||||
**The declarative way** — run `cf apply` and reality matches your config:
|
||||
|
||||
1. Compose Farm compares your config to what's actually running
|
||||
2. Services in config but not running? **Starts them**
|
||||
3. Services on the wrong host? **Migrates them** (stops on old host, starts on new)
|
||||
4. Services running but removed from config? **Stops them**
|
||||
|
||||
**Under the hood** — each service operation is just SSH + docker compose:
|
||||
|
||||
1. Look up which host runs the service (e.g., `plex` → `server-1`)
|
||||
2. SSH to `server-1` (or run locally if `localhost`)
|
||||
3. Execute `docker compose -f /opt/compose/plex/docker-compose.yml up -d`
|
||||
4. Stream output back with `[plex]` prefix
|
||||
|
||||
That's it. No orchestration, no service discovery, no magic.
|
||||
|
||||
@@ -107,6 +142,23 @@ uv tool install compose-farm
|
||||
pip install compose-farm
|
||||
```
|
||||
|
||||
<details><summary>🐳 Docker</summary>
|
||||
|
||||
Using the provided `docker-compose.yml`:
|
||||
```bash
|
||||
docker compose run --rm cf up --all
|
||||
```
|
||||
|
||||
Or directly:
|
||||
```bash
|
||||
docker run --rm \
|
||||
-v $SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent \
|
||||
-v ./compose-farm.yaml:/root/.config/compose-farm/compose-farm.yaml:ro \
|
||||
ghcr.io/basnijholt/compose-farm up --all
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml` (or `./compose-farm.yaml` in your working directory):
|
||||
@@ -128,21 +180,93 @@ services:
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
radarr: local # Runs on the machine where you invoke compose-farm
|
||||
|
||||
# Multi-host services (run on multiple/all hosts)
|
||||
autokuma: all # Runs on ALL configured hosts
|
||||
dozzle: [server-1, server-2] # Explicit list of hosts
|
||||
```
|
||||
|
||||
Compose files are expected at `{compose_dir}/{service}/compose.yaml` (also supports `compose.yml`, `docker-compose.yml`, `docker-compose.yaml`).
|
||||
|
||||
### Multi-Host Services
|
||||
|
||||
Some services need to run on every host. This is typically required for tools that access **host-local resources** like the Docker socket (`/var/run/docker.sock`), which cannot be accessed remotely without security risks.
|
||||
|
||||
Common use cases:
|
||||
- **AutoKuma** - auto-creates Uptime Kuma monitors from container labels (needs local Docker socket)
|
||||
- **Dozzle** - real-time log viewer (needs local Docker socket)
|
||||
- **Promtail/Alloy** - log shipping agents (needs local Docker socket and log files)
|
||||
- **node-exporter** - Prometheus host metrics (needs access to host /proc, /sys)
|
||||
|
||||
This is the same pattern as Docker Swarm's `deploy.mode: global`.
|
||||
|
||||
Use the `all` keyword or an explicit list:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
# Run on all configured hosts
|
||||
autokuma: all
|
||||
dozzle: all
|
||||
|
||||
# Run on specific hosts
|
||||
node-exporter: [server-1, server-2, server-3]
|
||||
```
|
||||
|
||||
When you run `cf up autokuma`, it starts the service on all hosts in parallel. Multi-host services:
|
||||
- Are excluded from migration logic (they always run everywhere)
|
||||
- Show output with `[service@host]` prefix for each host
|
||||
- Track all running hosts in state
|
||||
|
||||
### Config Command
|
||||
|
||||
Compose Farm includes a `config` subcommand to help manage configuration files:
|
||||
|
||||
```bash
|
||||
cf config init # Create a new config file with documented example
|
||||
cf config show # Display current config with syntax highlighting
|
||||
cf config path # Print the config file path (useful for scripting)
|
||||
cf config validate # Validate config syntax and schema
|
||||
cf config edit # Open config in $EDITOR
|
||||
```
|
||||
|
||||
Use `cf config init` to get started with a fully documented template.
|
||||
|
||||
## Usage
|
||||
|
||||
The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
|
||||
```bash
|
||||
# Start services (auto-migrates if host changed in config)
|
||||
cf up plex jellyfin
|
||||
cf up --all
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| **`cf apply`** | **Make reality match config (start + migrate + stop orphans)** |
|
||||
| `cf up <svc>` | Start service (auto-migrates if host changed) |
|
||||
| `cf down <svc>` | Stop service |
|
||||
| `cf restart <svc>` | down + up |
|
||||
| `cf update <svc>` | pull + down + up |
|
||||
| `cf pull <svc>` | Pull latest images |
|
||||
| `cf logs -f <svc>` | Follow logs |
|
||||
| `cf ps` | Show status of all services |
|
||||
| `cf refresh` | Update state from running services |
|
||||
| `cf check` | Validate config, mounts, networks |
|
||||
| `cf init-network` | Create Docker network on hosts |
|
||||
| `cf traefik-file` | Generate Traefik file-provider config |
|
||||
| `cf config <cmd>` | Manage config files (init, show, path, validate, edit) |
|
||||
|
||||
# Stop services
|
||||
cf down plex
|
||||
All commands support `--all` to operate on all services.
|
||||
|
||||
Each command replaces: look up host → SSH → find compose file → run `ssh host "cd /opt/compose/plex && docker compose up -d"`.
|
||||
|
||||
```bash
|
||||
# The main command: make reality match your config
|
||||
cf apply # start missing + migrate + stop orphans
|
||||
cf apply --dry-run # preview what would change
|
||||
cf apply --no-orphans # skip stopping orphaned services
|
||||
cf apply --full # also refresh all services (picks up config changes)
|
||||
|
||||
# Or operate on individual services
|
||||
cf up plex jellyfin # start services (auto-migrates if host changed)
|
||||
cf up --all
|
||||
cf down plex # stop services
|
||||
cf down --orphaned # stop services removed from config
|
||||
|
||||
# Pull latest images
|
||||
cf pull --all
|
||||
@@ -153,9 +277,9 @@ cf restart plex
|
||||
# Update (pull + down + up) - the end-to-end update command
|
||||
cf update --all
|
||||
|
||||
# Sync state with reality (discovers running services + captures image digests)
|
||||
cf sync # updates state.yaml and dockerfarm-log.toml
|
||||
cf sync --dry-run # preview without writing
|
||||
# Update state from reality (discovers running services + captures digests)
|
||||
cf refresh # updates state.yaml and dockerfarm-log.toml
|
||||
cf refresh --dry-run # preview without writing
|
||||
|
||||
# Validate config, traefik labels, mounts, and networks
|
||||
cf check # full validation (includes SSH checks)
|
||||
@@ -174,6 +298,603 @@ cf logs -f plex # follow
|
||||
cf ps
|
||||
```
|
||||
|
||||
### CLI `--help` Output
|
||||
|
||||
Full `--help` output for each command. See the [Usage](#usage) table above for a quick overview.
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Compose Farm - run docker compose commands across multiple hosts
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to │
|
||||
│ copy it or customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ──────────────────────────────────────────────────────────────────╮
|
||||
│ up Start services (docker compose up -d). Auto-migrates if host │
|
||||
│ changed. │
|
||||
│ down Stop services (docker compose down). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart services (down + up). │
|
||||
│ update Update services (pull + build + down + up). │
|
||||
│ apply Make reality match config (start, migrate, stop as needed). │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ──────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose │
|
||||
│ Traefik labels. │
|
||||
│ refresh Update local state from running services. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ─────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show service logs. │
|
||||
│ ps Show status of all services. │
|
||||
│ stats Show overview statistics for hosts and services. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ─────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
**Lifecycle**
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf up --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf up --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf up [OPTIONS] [SERVICES]...
|
||||
|
||||
Start services (docker compose up -d). Auto-migrates if host changed.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf down --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf down --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf down [OPTIONS] [SERVICES]...
|
||||
|
||||
Stop services (docker compose down).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --orphaned Stop orphaned services (in state but removed from │
|
||||
│ config) │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf pull --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf pull --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf pull [OPTIONS] [SERVICES]...
|
||||
|
||||
Pull latest images (docker compose pull).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf restart --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf restart --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf restart [OPTIONS] [SERVICES]...
|
||||
|
||||
Restart services (down + up).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf update --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf update --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf update [OPTIONS] [SERVICES]...
|
||||
|
||||
Update services (pull + build + down + up).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf apply --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf apply --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf apply [OPTIONS]
|
||||
|
||||
Make reality match config (start, migrate, stop as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running services match your
|
||||
config file. It will:
|
||||
1. Stop orphaned services (in state but removed from config) 2. Migrate
|
||||
services on wrong host (host in state ≠ host in config) 3. Start missing
|
||||
services (in config but not in state)
|
||||
Use --dry-run to preview changes before applying. Use --no-orphans to only
|
||||
migrate/start without stopping orphaned services. Use --full to also run 'up'
|
||||
on all services (picks up compose/env changes).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned services │
|
||||
│ --full -f Also run up on all services to apply config │
|
||||
│ changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
**Configuration**
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf traefik-file --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf traefik-file --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf traefik-file [OPTIONS] [SERVICES]...
|
||||
|
||||
Generate a Traefik file-provider fragment from compose Traefik labels.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path │
|
||||
│ (stdout if omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf refresh --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf refresh --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf refresh [OPTIONS]
|
||||
|
||||
Update local state from running services.
|
||||
|
||||
Discovers which services are running on which hosts, updates the state file,
|
||||
and captures image digests. This is a read operation - it updates your local
|
||||
state to match reality, not the other way around.
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf check --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf check --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf check [OPTIONS] [SERVICES]...
|
||||
|
||||
Validate configuration, traefik labels, mounts, and networks.
|
||||
|
||||
Without arguments: validates all services against configured hosts. With
|
||||
service arguments: validates specific services and shows host compatibility.
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf init-network --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf init-network --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf init-network [OPTIONS] [HOSTS]...
|
||||
|
||||
Create Docker network on hosts with consistent settings.
|
||||
|
||||
Creates an external Docker network that services can use for cross-host
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure consistent
|
||||
networking.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ hosts [HOSTS]... Hosts to create network on (default: all) │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --network -n TEXT Network name [default: mynetwork] │
|
||||
│ --subnet -s TEXT Network subnet [default: 172.20.0.0/16] │
|
||||
│ --gateway -g TEXT Network gateway [default: 172.20.0.1] │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf config --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf config --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf config [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Manage compose-farm configuration files.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ───────────────────────────────────────────────────────────────────╮
|
||||
│ init Create a new config file with documented example. │
|
||||
│ edit Open the config file in your default editor. │
|
||||
│ show Display the config file location and contents. │
|
||||
│ path Print the config file path (useful for scripting). │
|
||||
│ validate Validate the config file syntax and schema. │
|
||||
│ symlink Create a symlink from the default config location to a config │
|
||||
│ file. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
**Monitoring**
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf logs --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf logs --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf logs [OPTIONS] [SERVICES]...
|
||||
|
||||
Show service logs.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 │
|
||||
│ otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf ps --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf ps --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf ps [OPTIONS]
|
||||
|
||||
Show status of all services.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf stats --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf stats --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf stats [OPTIONS]
|
||||
|
||||
Show overview statistics for hosts and services.
|
||||
|
||||
Without --live: Shows config/state info (hosts, services, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --live -l Query Docker for live container stats │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
**Server**
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf web --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf web --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
|
||||
</details>
|
||||
|
||||
### Auto-Migration
|
||||
|
||||
When you change a service's host assignment in config and run `up`, Compose Farm automatically:
|
||||
@@ -182,6 +903,8 @@ When you change a service's host assignment in config and run `up`, Compose Farm
|
||||
3. Runs `up -d` on the new host
|
||||
4. Updates state tracking
|
||||
|
||||
Use `cf apply` to automatically reconcile all services—it finds and migrates services on wrong hosts, stops orphaned services, and starts missing services.
|
||||
|
||||
```yaml
|
||||
# Before: plex runs on server-1
|
||||
services:
|
||||
@@ -192,6 +915,14 @@ services:
|
||||
plex: server-2 # Compose Farm will migrate automatically
|
||||
```
|
||||
|
||||
**Orphaned services**: When you remove (or comment out) a service from config, it becomes "orphaned"—tracked in state but no longer in config. Use these commands to handle orphans:
|
||||
|
||||
- `cf apply` — Migrate services AND stop orphans (the full reconcile)
|
||||
- `cf down --orphaned` — Only stop orphaned services
|
||||
- `cf apply --dry-run` — Preview what would change before applying
|
||||
|
||||
This makes the config truly declarative: comment out a service, run `cf apply`, and it stops.
|
||||
|
||||
## Traefik Multihost Ingress (File Provider)
|
||||
|
||||
If you run a single Traefik instance on one "front‑door" host and want it to route to
|
||||
@@ -292,6 +1023,31 @@ Update your Traefik config to use directory watching instead of a single file:
|
||||
- --providers.file.watch=true
|
||||
```
|
||||
|
||||
## Comparison with Alternatives
|
||||
|
||||
There are many ways to run containers on multiple hosts. Here is where Compose Farm sits:
|
||||
|
||||
| | Compose Farm | Docker Contexts | K8s / Swarm | Ansible / Terraform | Portainer / Coolify |
|
||||
|---|:---:|:---:|:---:|:---:|:---:|
|
||||
| No compose rewrites | ✅ | ✅ | ❌ | ✅ | ✅ |
|
||||
| Version controlled | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| State tracking | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Auto-migration | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Interactive CLI | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||
| Parallel execution | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Agentless | ✅ | ✅ | ❌ | ✅ | ❌ |
|
||||
| High availability | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
|
||||
**Docker Contexts** — You can use `docker context create remote ssh://...` and `docker compose --context remote up`. But it's manual: you must remember which host runs which service, there's no global view, no parallel execution, and no auto-migration.
|
||||
|
||||
**Kubernetes / Docker Swarm** — Full orchestration that abstracts away the hardware. But they require cluster initialization, separate control planes, and often rewriting compose files. They introduce complexity (consensus, overlay networks) unnecessary for static "pet" servers.
|
||||
|
||||
**Ansible / Terraform** — Infrastructure-as-Code tools that can SSH in and deploy containers. But they're push-based configuration management, not interactive CLIs. Great for setting up state, clumsy for day-to-day operations like `cf logs -f` or quickly restarting a service.
|
||||
|
||||
**Portainer / Coolify** — Web-based management UIs. But they're UI-first and often require agents on your servers. Compose Farm is CLI-first and agentless.
|
||||
|
||||
**Compose Farm is the middle ground:** a robust CLI that productizes the manual SSH pattern. You get the "cluster feel" (unified commands, state tracking) without the "cluster cost" (complexity, agents, control planes).
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
34
docker-compose.yml
Normal file
34
docker-compose.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
services:
|
||||
cf:
|
||||
image: ghcr.io/basnijholt/compose-farm:latest
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# Compose directory (contains compose files AND compose-farm.yaml config)
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
# Config file path (state stored alongside it)
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/stacks}/compose-farm.yaml
|
||||
|
||||
web:
|
||||
image: ghcr.io/basnijholt/compose-farm:latest
|
||||
command: web --host 0.0.0.0 --port 9000
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/stacks}/compose-farm.yaml
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.compose-farm.rule=Host(`compose-farm.${DOMAIN}`)
|
||||
- traefik.http.routers.compose-farm.entrypoints=websecure
|
||||
- traefik.http.routers.compose-farm-local.rule=Host(`compose-farm.local`)
|
||||
- traefik.http.routers.compose-farm-local.entrypoints=web
|
||||
- traefik.http.services.compose-farm.loadbalancer.server.port=9000
|
||||
networks:
|
||||
- mynetwork
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
79
docs/reddit-post.md
Normal file
79
docs/reddit-post.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Title options
|
||||
|
||||
- Multi-host Docker Compose without Kubernetes or file changes
|
||||
- I built a CLI to run Docker Compose across hosts. Zero changes to your files.
|
||||
- I made a CLI to run Docker Compose across multiple hosts without Kubernetes or Swarm
|
||||
---
|
||||
|
||||
I've been running 100+ Docker Compose stacks on a single machine, and it kept running out of memory. I needed to spread services across multiple hosts, but:
|
||||
|
||||
- **Kubernetes** felt like overkill. I don't need pods, ingress controllers, or 10x more YAML.
|
||||
- **Docker Swarm** is basically in maintenance mode.
|
||||
- Both require rewriting my compose files.
|
||||
|
||||
So I built **Compose Farm**, a simple CLI that runs `docker compose` commands over SSH. No agents, no cluster setup, no changes to your existing compose files.
|
||||
|
||||
## How it works
|
||||
|
||||
One YAML file maps services to hosts:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/stacks
|
||||
|
||||
hosts:
|
||||
nuc: 192.168.1.10
|
||||
hp: 192.168.1.11
|
||||
|
||||
services:
|
||||
plex: nuc
|
||||
jellyfin: hp
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
```
|
||||
|
||||
Then just:
|
||||
|
||||
```bash
|
||||
cf up plex # runs on nuc via SSH
|
||||
cf apply # makes config state match desired state on all hosts (like Terraform apply)
|
||||
cf up --all # starts everything on their assigned hosts
|
||||
cf logs -f plex # streams logs
|
||||
cf ps # shows status across all hosts
|
||||
```
|
||||
|
||||
## Auto-migration
|
||||
|
||||
Change a service's host in the config and run `cf up`. It stops the service on the old host and starts it on the new one. No manual SSH needed.
|
||||
|
||||
```yaml
|
||||
# Before
|
||||
plex: nuc
|
||||
|
||||
# After (just change this)
|
||||
plex: hp
|
||||
```
|
||||
|
||||
```bash
|
||||
cf up plex # migrates automatically
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- SSH key auth to your hosts
|
||||
- Same paths on all hosts (I use NFS from my NAS)
|
||||
- That's it. No agents, no daemons.
|
||||
|
||||
## What it doesn't do
|
||||
|
||||
- No high availability (if a host goes down, services don't auto-migrate)
|
||||
- No overlay networking (containers on different hosts can't talk via Docker DNS)
|
||||
- No health checks or automatic restarts
|
||||
|
||||
It's a convenience wrapper around `docker compose` + SSH. If you need failover or cross-host container networking, you probably do need Swarm or Kubernetes.
|
||||
|
||||
## Links
|
||||
|
||||
- GitHub: https://github.com/basnijholt/compose-farm
|
||||
- Install: `uv tool install compose-farm` or `pip install compose-farm`
|
||||
|
||||
Happy to answer questions or take feedback!
|
||||
65
docs/truenas-nfs-root-squash.md
Normal file
65
docs/truenas-nfs-root-squash.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# TrueNAS NFS: Disabling Root Squash
|
||||
|
||||
When running Docker containers on NFS-mounted storage, containers that run as root will fail to write files unless root squash is disabled. This document explains the problem and solution.
|
||||
|
||||
## The Problem
|
||||
|
||||
By default, NFS uses "root squash" which maps the root user (UID 0) on clients to `nobody` on the server. This is a security feature to prevent remote root users from having root access to the NFS server's files.
|
||||
|
||||
However, many Docker containers run as root internally. When these containers try to write to NFS-mounted volumes, the writes fail with "Permission denied" because the NFS server sees them as `nobody`, not `root`.
|
||||
|
||||
Example error in container logs:
|
||||
```
|
||||
System.UnauthorizedAccessException: Access to the path '/data' is denied.
|
||||
Error: EACCES: permission denied, mkdir '/app/data'
|
||||
```
|
||||
|
||||
## The Solution
|
||||
|
||||
In TrueNAS, configure the NFS share to map remote root to local root:
|
||||
|
||||
### TrueNAS SCALE UI
|
||||
|
||||
1. Go to **Shares → NFS**
|
||||
2. Edit your share
|
||||
3. Under **Advanced Options**:
|
||||
- **Maproot User**: `root`
|
||||
- **Maproot Group**: `wheel`
|
||||
4. Save
|
||||
|
||||
### Result in /etc/exports
|
||||
|
||||
```
|
||||
"/mnt/pool/data"\
|
||||
192.168.1.25(sec=sys,rw,no_root_squash,no_subtree_check)\
|
||||
192.168.1.26(sec=sys,rw,no_root_squash,no_subtree_check)
|
||||
```
|
||||
|
||||
The `no_root_squash` option means remote root is treated as root on the server.
|
||||
|
||||
## Why `wheel`?
|
||||
|
||||
On FreeBSD/TrueNAS, the root user's primary group is `wheel` (GID 0), not `root` like on Linux. So `root:wheel` = `0:0`.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
Disabling root squash means any machine that can mount the NFS share has full root access to those files. This is acceptable when:
|
||||
|
||||
- The NFS clients are on a trusted private network
|
||||
- Only known hosts (by IP) are allowed to mount the share
|
||||
- The data isn't security-critical
|
||||
|
||||
For home lab setups with Docker containers, this is typically fine.
|
||||
|
||||
## Alternative: Run Containers as Non-Root
|
||||
|
||||
If you prefer to keep root squash enabled, you can run containers as a non-root user:
|
||||
|
||||
1. **LinuxServer.io images**: Set `PUID=1000` and `PGID=1000` environment variables
|
||||
2. **Other images**: Add `user: "1000:1000"` to the compose service
|
||||
|
||||
However, not all containers support running as non-root (they may need to bind to privileged ports, create system directories, etc.).
|
||||
|
||||
## Tested On
|
||||
|
||||
- TrueNAS SCALE 24.10
|
||||
@@ -1,82 +1,171 @@
|
||||
# Compose Farm Examples
|
||||
|
||||
This folder contains example Docker Compose services for testing Compose Farm locally.
|
||||
Real-world examples demonstrating compose-farm patterns for multi-host Docker deployments.
|
||||
|
||||
## Services
|
||||
|
||||
| Service | Type | Demonstrates |
|
||||
|---------|------|--------------|
|
||||
| [traefik](traefik/) | Infrastructure | Reverse proxy, Let's Encrypt, file-provider |
|
||||
| [mealie](mealie/) | Single container | Traefik labels, resource limits, environment vars |
|
||||
| [uptime-kuma](uptime-kuma/) | Single container | Docker socket, user mapping, custom DNS |
|
||||
| [paperless-ngx](paperless-ngx/) | Multi-container | Redis + App stack (SQLite) |
|
||||
| [autokuma](autokuma/) | Multi-host | Demonstrates `all` keyword (runs on every host) |
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### External Network
|
||||
|
||||
All services connect to a shared external network for inter-service communication:
|
||||
|
||||
```yaml
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
```
|
||||
|
||||
Create it on each host with consistent settings:
|
||||
|
||||
```bash
|
||||
compose-farm init-network --network mynetwork --subnet 172.20.0.0/16
|
||||
```
|
||||
|
||||
### Traefik Labels (Dual Routes)
|
||||
|
||||
Services expose two routes for different access patterns:
|
||||
|
||||
1. **HTTPS route** (`websecure` entrypoint): For your custom domain with Let's Encrypt TLS
|
||||
2. **HTTP route** (`web` entrypoint): For `.local` domains on your LAN (no TLS needed)
|
||||
|
||||
This pattern allows accessing services via:
|
||||
- `https://mealie.example.com` - from anywhere, with TLS
|
||||
- `http://mealie.local` - from your local network, no TLS overhead
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
# HTTPS route for custom domain (e.g., mealie.example.com)
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.myapp.rule=Host(`myapp.${DOMAIN}`)
|
||||
- traefik.http.routers.myapp.entrypoints=websecure
|
||||
- traefik.http.services.myapp.loadbalancer.server.port=8080
|
||||
# HTTP route for .local domain (e.g., myapp.local)
|
||||
- traefik.http.routers.myapp-local.rule=Host(`myapp.local`)
|
||||
- traefik.http.routers.myapp-local.entrypoints=web
|
||||
```
|
||||
|
||||
> **Note:** `.local` domains require local DNS (e.g., Pi-hole, Technitium) to resolve to your Traefik host.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Each service has a `.env` file for secrets and domain configuration.
|
||||
Edit these files to set your domain and credentials:
|
||||
|
||||
```bash
|
||||
# Example: set your domain
|
||||
echo "DOMAIN=example.com" > mealie/.env
|
||||
```
|
||||
|
||||
Variables like `${DOMAIN}` are substituted at runtime by Docker Compose.
|
||||
|
||||
### NFS Volume Mounts
|
||||
|
||||
All data is stored on shared NFS storage at `/mnt/data/`:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- /mnt/data/myapp:/app/data
|
||||
```
|
||||
|
||||
This allows services to migrate between hosts without data loss.
|
||||
|
||||
### Multi-Host Services
|
||||
|
||||
Services that need to run on every host (e.g., monitoring agents):
|
||||
|
||||
```yaml
|
||||
# In compose-farm.yaml
|
||||
services:
|
||||
autokuma: all # Runs on every configured host
|
||||
```
|
||||
|
||||
### Multi-Container Stacks
|
||||
|
||||
Database-backed apps with multiple services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
app:
|
||||
depends_on:
|
||||
- redis
|
||||
```
|
||||
|
||||
> **NFS + PostgreSQL Warning:** PostgreSQL should NOT run on NFS storage due to
|
||||
> fsync and file locking issues. Use SQLite (safe for single-writer on NFS) or
|
||||
> keep PostgreSQL data on local volumes (non-migratable).
|
||||
|
||||
### AutoKuma Labels (Optional)
|
||||
|
||||
The autokuma example demonstrates compose-farm's **multi-host feature** - running the same service on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers.
|
||||
|
||||
[AutoKuma](https://github.com/BigBoot/AutoKuma) automatically creates Uptime Kuma monitors from Docker labels:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- kuma.myapp.http.name=My App
|
||||
- kuma.myapp.http.url=https://myapp.${DOMAIN}
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
cd examples
|
||||
|
||||
# Check status of all services
|
||||
compose-farm ps
|
||||
# 1. Create the shared network on all hosts
|
||||
compose-farm init-network
|
||||
|
||||
# Pull images
|
||||
compose-farm pull --all
|
||||
|
||||
# Start hello-world (runs and exits)
|
||||
compose-farm up hello
|
||||
|
||||
# Start nginx (stays running)
|
||||
compose-farm up nginx
|
||||
|
||||
# Check nginx is running
|
||||
curl localhost:8080
|
||||
|
||||
# View logs
|
||||
compose-farm logs nginx
|
||||
|
||||
# Stop nginx
|
||||
compose-farm down nginx
|
||||
|
||||
# Update all (pull + restart)
|
||||
compose-farm update --all
|
||||
```
|
||||
|
||||
## Traefik Example
|
||||
|
||||
Start Traefik and a sample service with Traefik labels:
|
||||
|
||||
```bash
|
||||
cd examples
|
||||
|
||||
# Start Traefik (reverse proxy with dashboard)
|
||||
# 2. Start Traefik first (the reverse proxy)
|
||||
compose-farm up traefik
|
||||
|
||||
# Start whoami (test service with Traefik labels)
|
||||
compose-farm up whoami
|
||||
# 3. Start other services
|
||||
compose-farm up mealie uptime-kuma
|
||||
|
||||
# Access the services
|
||||
curl -H "Host: whoami.localhost" http://localhost # whoami via Traefik
|
||||
curl http://localhost:8081 # Traefik dashboard
|
||||
curl http://localhost:18082 # whoami direct
|
||||
# 4. Check status
|
||||
compose-farm ps
|
||||
|
||||
# Generate Traefik file-provider config (for multi-host setups)
|
||||
# 5. Generate Traefik file-provider config for cross-host routing
|
||||
compose-farm traefik-file --all
|
||||
|
||||
# Stop everything
|
||||
# 6. View logs
|
||||
compose-farm logs mealie
|
||||
|
||||
# 7. Stop everything
|
||||
compose-farm down --all
|
||||
```
|
||||
|
||||
The `whoami/docker-compose.yml` shows the standard Traefik label pattern:
|
||||
## Configuration
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.whoami.rule=Host(`whoami.localhost`)
|
||||
- traefik.http.routers.whoami.entrypoints=web
|
||||
- traefik.http.services.whoami.loadbalancer.server.port=80
|
||||
The `compose-farm.yaml` shows a multi-host setup:
|
||||
|
||||
- **primary** (192.168.1.10): Runs Traefik and heavy services
|
||||
- **secondary** (192.168.1.11): Runs lighter services
|
||||
- **autokuma**: Runs on ALL hosts to monitor local containers
|
||||
|
||||
When Traefik runs on `primary` and a service runs on `secondary`, compose-farm
|
||||
automatically generates file-provider config so Traefik can route to it.
|
||||
|
||||
## Traefik File-Provider
|
||||
|
||||
When services run on different hosts than Traefik, use `traefik-file` to generate routing config:
|
||||
|
||||
```bash
|
||||
# Generate config for all services
|
||||
compose-farm traefik-file --all -o traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
# Or configure auto-generation in compose-farm.yaml:
|
||||
traefik_file: /opt/stacks/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik
|
||||
```
|
||||
|
||||
## Services
|
||||
|
||||
| Service | Description | Ports |
|
||||
|---------|-------------|-------|
|
||||
| hello | Hello-world container (exits immediately) | - |
|
||||
| nginx | Nginx web server | 8080 |
|
||||
| traefik | Traefik reverse proxy with dashboard | 80, 8081 |
|
||||
| whoami | Test service with Traefik labels | 18082 |
|
||||
|
||||
## Config
|
||||
|
||||
The `compose-farm.yaml` in this directory configures all services to run locally (no SSH).
|
||||
It also demonstrates the `traefik_file` option for auto-regenerating Traefik file-provider config.
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, `restart`, and `update` commands.
|
||||
|
||||
4
examples/autokuma/.env
Normal file
4
examples/autokuma/.env
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
UPTIME_KUMA_USERNAME=admin
|
||||
UPTIME_KUMA_PASSWORD=your-uptime-kuma-password
|
||||
31
examples/autokuma/compose.yaml
Normal file
31
examples/autokuma/compose.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
# AutoKuma - Automatic Uptime Kuma monitor creation from Docker labels
|
||||
# Demonstrates: Multi-host service (runs on ALL hosts)
|
||||
#
|
||||
# This service monitors Docker containers on each host and automatically
|
||||
# creates Uptime Kuma monitors based on container labels.
|
||||
#
|
||||
# In compose-farm.yaml, configure as:
|
||||
# autokuma: all
|
||||
#
|
||||
# This runs the same container on every host, so each host's local
|
||||
# Docker socket is monitored.
|
||||
name: autokuma
|
||||
services:
|
||||
autokuma:
|
||||
image: ghcr.io/bigboot/autokuma:latest
|
||||
container_name: autokuma
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
# Connect to your Uptime Kuma instance
|
||||
AUTOKUMA__KUMA__URL: https://uptime.${DOMAIN}
|
||||
AUTOKUMA__KUMA__USERNAME: ${UPTIME_KUMA_USERNAME}
|
||||
AUTOKUMA__KUMA__PASSWORD: ${UPTIME_KUMA_PASSWORD}
|
||||
# Tag for auto-created monitors
|
||||
AUTOKUMA__TAG__NAME: autokuma
|
||||
AUTOKUMA__TAG__COLOR: "#10B981"
|
||||
volumes:
|
||||
# Access local Docker socket to discover containers
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
# Custom DNS for resolving internal domains
|
||||
dns:
|
||||
- 192.168.1.1 # Your local DNS server
|
||||
@@ -1 +1,9 @@
|
||||
deployed: {}
|
||||
deployed:
|
||||
autokuma:
|
||||
- primary
|
||||
- secondary
|
||||
- local
|
||||
mealie: secondary
|
||||
paperless-ngx: primary
|
||||
traefik: primary
|
||||
uptime-kuma: secondary
|
||||
|
||||
@@ -1,17 +1,40 @@
|
||||
# Example Compose Farm config for local testing
|
||||
# Run from the examples directory: cd examples && compose-farm ps
|
||||
# Example Compose Farm configuration
|
||||
# Demonstrates a multi-host setup with NFS shared storage
|
||||
#
|
||||
# To test locally: Update the host addresses and run from the examples directory
|
||||
|
||||
compose_dir: .
|
||||
compose_dir: /opt/stacks/compose-farm/examples
|
||||
|
||||
# Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
traefik_file: ./traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik # Skip services on same host (docker provider handles them)
|
||||
traefik_file: /opt/stacks/compose-farm/examples/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik # Skip Traefik's host in file-provider (docker provider handles it)
|
||||
|
||||
hosts:
|
||||
# Primary server - runs Traefik and most services
|
||||
# Full form with all options
|
||||
primary:
|
||||
address: 192.168.1.10
|
||||
user: deploy
|
||||
port: 22
|
||||
|
||||
# Secondary server - runs some services for load distribution
|
||||
# Short form (user defaults to current user, port defaults to 22)
|
||||
secondary: 192.168.1.11
|
||||
|
||||
# Local execution (no SSH) - for testing or when running on the host itself
|
||||
local: localhost
|
||||
|
||||
services:
|
||||
hello: local
|
||||
nginx: local
|
||||
traefik: local
|
||||
whoami: local
|
||||
# Infrastructure (runs on primary where Traefik is)
|
||||
traefik: primary
|
||||
|
||||
# Multi-host services (runs on ALL hosts)
|
||||
# AutoKuma monitors Docker containers on each host
|
||||
autokuma: all
|
||||
|
||||
# Primary server services
|
||||
paperless-ngx: primary
|
||||
|
||||
# Secondary server services (distributed for performance)
|
||||
mealie: secondary
|
||||
uptime-kuma: secondary
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
services:
|
||||
hello:
|
||||
image: hello-world
|
||||
container_name: sdc-hello
|
||||
2
examples/mealie/.env
Normal file
2
examples/mealie/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
47
examples/mealie/compose.yaml
Normal file
47
examples/mealie/compose.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
# Mealie - Recipe manager
|
||||
# Simple single-container service with Traefik labels
|
||||
#
|
||||
# Demonstrates:
|
||||
# - HTTPS route: mealie.${DOMAIN} (e.g., mealie.example.com) with Let's Encrypt
|
||||
# - HTTP route: mealie.local for LAN access without TLS
|
||||
# - External network, resource limits, environment variables
|
||||
name: mealie
|
||||
services:
|
||||
mealie:
|
||||
image: ghcr.io/mealie-recipes/mealie:latest
|
||||
container_name: mealie
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "9925:9000"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1000M
|
||||
volumes:
|
||||
- /mnt/data/mealie:/app/data
|
||||
environment:
|
||||
ALLOW_SIGNUP: "false"
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
TZ: America/Los_Angeles
|
||||
MAX_WORKERS: 1
|
||||
WEB_CONCURRENCY: 1
|
||||
BASE_URL: https://mealie.${DOMAIN}
|
||||
labels:
|
||||
# HTTPS route: mealie.example.com (requires DOMAIN in .env)
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.mealie.rule=Host(`mealie.${DOMAIN}`)
|
||||
- traefik.http.routers.mealie.entrypoints=websecure
|
||||
- traefik.http.services.mealie.loadbalancer.server.port=9000
|
||||
# HTTP route: mealie.local (for LAN access, no TLS)
|
||||
- traefik.http.routers.mealie-local.rule=Host(`mealie.local`)
|
||||
- traefik.http.routers.mealie-local.entrypoints=web
|
||||
# AutoKuma: automatically create Uptime Kuma monitor
|
||||
- kuma.mealie.http.name=Mealie
|
||||
- kuma.mealie.http.url=https://mealie.${DOMAIN}
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
@@ -1,6 +0,0 @@
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: sdc-nginx
|
||||
ports:
|
||||
- "8080:80"
|
||||
3
examples/paperless-ngx/.env
Normal file
3
examples/paperless-ngx/.env
Normal file
@@ -0,0 +1,3 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
PAPERLESS_SECRET_KEY=change-me-to-a-random-string
|
||||
60
examples/paperless-ngx/compose.yaml
Normal file
60
examples/paperless-ngx/compose.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
# Paperless-ngx - Document management system
|
||||
#
|
||||
# Demonstrates:
|
||||
# - HTTPS route: paperless.${DOMAIN} (e.g., paperless.example.com) with Let's Encrypt
|
||||
# - HTTP route: paperless.local for LAN access without TLS
|
||||
# - Multi-container stack (Redis + App with SQLite)
|
||||
#
|
||||
# NOTE: This example uses SQLite (the default) instead of PostgreSQL.
|
||||
# PostgreSQL should NOT be used with NFS storage due to fsync/locking issues.
|
||||
# If you need PostgreSQL, use local volumes for the database.
|
||||
name: paperless-ngx
|
||||
services:
|
||||
redis:
|
||||
image: redis:8
|
||||
container_name: paperless-redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
volumes:
|
||||
- /mnt/data/paperless/redis:/data
|
||||
|
||||
paperless:
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:latest
|
||||
container_name: paperless
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- redis
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
# SQLite database stored here (safe on NFS for single-writer)
|
||||
- /mnt/data/paperless/data:/usr/src/paperless/data
|
||||
- /mnt/data/paperless/media:/usr/src/paperless/media
|
||||
- /mnt/data/paperless/export:/usr/src/paperless/export
|
||||
- /mnt/data/paperless/consume:/usr/src/paperless/consume
|
||||
environment:
|
||||
PAPERLESS_REDIS: redis://redis:6379
|
||||
PAPERLESS_URL: https://paperless.${DOMAIN}
|
||||
PAPERLESS_SECRET_KEY: ${PAPERLESS_SECRET_KEY}
|
||||
USERMAP_UID: 1000
|
||||
USERMAP_GID: 1000
|
||||
labels:
|
||||
# HTTPS route: paperless.example.com (requires DOMAIN in .env)
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.paperless.rule=Host(`paperless.${DOMAIN}`)
|
||||
- traefik.http.routers.paperless.entrypoints=websecure
|
||||
- traefik.http.services.paperless.loadbalancer.server.port=8000
|
||||
- traefik.docker.network=mynetwork
|
||||
# HTTP route: paperless.local (for LAN access, no TLS)
|
||||
- traefik.http.routers.paperless-local.rule=Host(`paperless.local`)
|
||||
- traefik.http.routers.paperless-local.entrypoints=web
|
||||
# AutoKuma: automatically create Uptime Kuma monitor
|
||||
- kuma.paperless.http.name=Paperless
|
||||
- kuma.paperless.http.url=https://paperless.${DOMAIN}
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
5
examples/traefik/.env
Normal file
5
examples/traefik/.env
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
ACME_EMAIL=you@example.com
|
||||
CF_API_EMAIL=you@example.com
|
||||
CF_API_KEY=your-cloudflare-api-key
|
||||
58
examples/traefik/compose.yaml
Normal file
58
examples/traefik/compose.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
# Traefik reverse proxy with Let's Encrypt and file-provider support
|
||||
# This is the foundation service - other services route through it
|
||||
#
|
||||
# Entrypoints:
|
||||
# - web (port 80): HTTP for .local domains (no TLS needed on LAN)
|
||||
# - websecure (port 443): HTTPS with Let's Encrypt for custom domains
|
||||
name: traefik
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.2
|
||||
container_name: traefik
|
||||
command:
|
||||
- --api.dashboard=true
|
||||
- --providers.docker=true
|
||||
- --providers.docker.exposedbydefault=false
|
||||
- --providers.docker.network=mynetwork
|
||||
# File provider for routing to services on other hosts
|
||||
- --providers.file.directory=/dynamic.d
|
||||
- --providers.file.watch=true
|
||||
# HTTP entrypoint for .local domains (LAN access, no TLS)
|
||||
- --entrypoints.web.address=:80
|
||||
# HTTPS entrypoint for custom domains (with Let's Encrypt TLS)
|
||||
- --entrypoints.websecure.address=:443
|
||||
- --entrypoints.websecure.asDefault=true
|
||||
- --entrypoints.websecure.http.tls.certresolver=letsencrypt
|
||||
# Let's Encrypt DNS challenge (using Cloudflare as example)
|
||||
- --certificatesresolvers.letsencrypt.acme.email=${ACME_EMAIL}
|
||||
- --certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json
|
||||
- --certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare
|
||||
- --certificatesresolvers.letsencrypt.acme.dnschallenge.resolvers=1.1.1.1:53
|
||||
environment:
|
||||
# Cloudflare API token for DNS challenge
|
||||
CF_API_EMAIL: ${CF_API_EMAIL}
|
||||
CF_API_KEY: ${CF_API_KEY}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8080:8080" # Dashboard
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /mnt/data/traefik/letsencrypt:/letsencrypt
|
||||
- ./dynamic.d:/dynamic.d:ro
|
||||
networks:
|
||||
- mynetwork
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
# Dashboard accessible at traefik.yourdomain.com
|
||||
- traefik.http.routers.traefik.rule=Host(`traefik.${DOMAIN}`)
|
||||
- traefik.http.routers.traefik.entrypoints=websecure
|
||||
- traefik.http.routers.traefik.service=api@internal
|
||||
# AutoKuma: automatically create Uptime Kuma monitor
|
||||
- kuma.traefik.http.name=Traefik
|
||||
- kuma.traefik.http.url=https://traefik.${DOMAIN}
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
@@ -1,17 +0,0 @@
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.2
|
||||
container_name: traefik
|
||||
command:
|
||||
- --api.insecure=true
|
||||
- --providers.docker=true
|
||||
- --providers.docker.exposedbydefault=false
|
||||
- --providers.file.directory=/dynamic.d
|
||||
- --providers.file.watch=true
|
||||
- --entrypoints.web.address=:80
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8081:8080" # Traefik dashboard
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ./dynamic.d:/dynamic.d
|
||||
@@ -1 +1,40 @@
|
||||
{}
|
||||
# Auto-generated by compose-farm
|
||||
# https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file routes traffic to services running on hosts other than Traefik's host.
|
||||
# Services on Traefik's host use the Docker provider directly.
|
||||
#
|
||||
# Regenerate with: compose-farm traefik-file --all -o <this-file>
|
||||
# Or configure traefik_file in compose-farm.yaml for automatic updates.
|
||||
|
||||
http:
|
||||
routers:
|
||||
mealie:
|
||||
rule: Host(`mealie.example.com`)
|
||||
entrypoints:
|
||||
- websecure
|
||||
service: mealie
|
||||
mealie-local:
|
||||
rule: Host(`mealie.local`)
|
||||
entrypoints:
|
||||
- web
|
||||
service: mealie
|
||||
uptime:
|
||||
rule: Host(`uptime.example.com`)
|
||||
entrypoints:
|
||||
- websecure
|
||||
service: uptime
|
||||
uptime-local:
|
||||
rule: Host(`uptime.local`)
|
||||
entrypoints:
|
||||
- web
|
||||
service: uptime
|
||||
services:
|
||||
mealie:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: http://192.168.1.11:9925
|
||||
uptime:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: http://192.168.1.11:3001
|
||||
|
||||
2
examples/uptime-kuma/.env
Normal file
2
examples/uptime-kuma/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
43
examples/uptime-kuma/compose.yaml
Normal file
43
examples/uptime-kuma/compose.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
# Uptime Kuma - Monitoring dashboard
|
||||
#
|
||||
# Demonstrates:
|
||||
# - HTTPS route: uptime.${DOMAIN} (e.g., uptime.example.com) with Let's Encrypt
|
||||
# - HTTP route: uptime.local for LAN access without TLS
|
||||
# - Docker socket access, user mapping for NFS, custom DNS
|
||||
name: uptime-kuma
|
||||
services:
|
||||
uptime-kuma:
|
||||
image: louislam/uptime-kuma:2
|
||||
container_name: uptime-kuma
|
||||
restart: unless-stopped
|
||||
# Run as non-root user (important for NFS volumes)
|
||||
user: "1000:1000"
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "3001:3001"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /mnt/data/uptime-kuma:/app/data
|
||||
environment:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
# Custom DNS for internal domain resolution
|
||||
dns:
|
||||
- 192.168.1.1 # Your local DNS server
|
||||
labels:
|
||||
# HTTPS route: uptime.example.com (requires DOMAIN in .env)
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.uptime.rule=Host(`uptime.${DOMAIN}`)
|
||||
- traefik.http.routers.uptime.entrypoints=websecure
|
||||
- traefik.http.services.uptime.loadbalancer.server.port=3001
|
||||
# HTTP route: uptime.local (for LAN access, no TLS)
|
||||
- traefik.http.routers.uptime-local.rule=Host(`uptime.local`)
|
||||
- traefik.http.routers.uptime-local.entrypoints=web
|
||||
# AutoKuma: automatically create Uptime Kuma monitor
|
||||
- kuma.uptime.http.name=Uptime Kuma
|
||||
- kuma.uptime.http.url=https://uptime.${DOMAIN}
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
@@ -1,11 +0,0 @@
|
||||
services:
|
||||
whoami:
|
||||
image: traefik/whoami
|
||||
container_name: whoami
|
||||
ports:
|
||||
- "18082:80"
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.whoami.rule=Host(`whoami.localhost`)
|
||||
- traefik.http.routers.whoami.entrypoints=web
|
||||
- traefik.http.services.whoami.loadbalancer.server.port=80
|
||||
170
hatch_build.py
Normal file
170
hatch_build.py
Normal file
@@ -0,0 +1,170 @@
|
||||
"""Hatch build hook to vendor CDN assets for offline use.
|
||||
|
||||
During wheel builds, this hook:
|
||||
1. Parses base.html to find elements with data-vendor attributes
|
||||
2. Downloads each CDN asset to a temporary vendor directory
|
||||
3. Rewrites base.html to use local /static/vendor/ paths
|
||||
4. Fetches and bundles license information
|
||||
5. Includes everything in the wheel via force_include
|
||||
|
||||
The source base.html keeps CDN links for development; only the
|
||||
distributed wheel has vendored assets.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
|
||||
|
||||
# Matches elements with data-vendor attribute: extracts URL and target filename
|
||||
# Example: <script src="https://..." data-vendor="htmx.js">
|
||||
# Captures: (1) src/href, (2) URL, (3) attributes between, (4) vendor filename
|
||||
VENDOR_PATTERN = re.compile(r'(src|href)="(https://[^"]+)"([^>]*?)data-vendor="([^"]+)"')
|
||||
|
||||
# License URLs for each package (GitHub raw URLs)
|
||||
LICENSE_URLS: dict[str, tuple[str, str]] = {
|
||||
"htmx": ("MIT", "https://raw.githubusercontent.com/bigskysoftware/htmx/master/LICENSE"),
|
||||
"xterm": ("MIT", "https://raw.githubusercontent.com/xtermjs/xterm.js/master/LICENSE"),
|
||||
"daisyui": ("MIT", "https://raw.githubusercontent.com/saadeghi/daisyui/master/LICENSE"),
|
||||
"tailwindcss": (
|
||||
"MIT",
|
||||
"https://raw.githubusercontent.com/tailwindlabs/tailwindcss/master/LICENSE",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _download(url: str) -> bytes:
|
||||
"""Download a URL, trying urllib first then curl as fallback."""
|
||||
# Try urllib first
|
||||
try:
|
||||
req = Request( # noqa: S310
|
||||
url, headers={"User-Agent": "Mozilla/5.0 (compatible; compose-farm build)"}
|
||||
)
|
||||
with urlopen(req, timeout=30) as resp: # noqa: S310
|
||||
return resp.read() # type: ignore[no-any-return]
|
||||
except Exception: # noqa: S110
|
||||
pass # Fall through to curl
|
||||
|
||||
# Fallback to curl (handles SSL proxies better)
|
||||
result = subprocess.run(
|
||||
["curl", "-fsSL", "--max-time", "30", url], # noqa: S607
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
return bytes(result.stdout)
|
||||
|
||||
|
||||
def _generate_licenses_file(temp_dir: Path) -> None:
|
||||
"""Download and combine license files into LICENSES.txt."""
|
||||
lines = [
|
||||
"# Vendored Dependencies - License Information",
|
||||
"",
|
||||
"This file contains license information for JavaScript/CSS libraries",
|
||||
"bundled with compose-farm for offline use.",
|
||||
"",
|
||||
"=" * 70,
|
||||
"",
|
||||
]
|
||||
|
||||
for pkg_name, (license_type, license_url) in LICENSE_URLS.items():
|
||||
lines.append(f"## {pkg_name} ({license_type})")
|
||||
lines.append(f"Source: {license_url}")
|
||||
lines.append("")
|
||||
lines.append(_download(license_url).decode("utf-8"))
|
||||
lines.append("")
|
||||
lines.append("=" * 70)
|
||||
lines.append("")
|
||||
|
||||
(temp_dir / "LICENSES.txt").write_text("\n".join(lines))
|
||||
|
||||
|
||||
class VendorAssetsHook(BuildHookInterface): # type: ignore[misc]
|
||||
"""Hatch build hook that vendors CDN assets into the wheel."""
|
||||
|
||||
PLUGIN_NAME = "vendor-assets"
|
||||
|
||||
def initialize(
|
||||
self,
|
||||
_version: str,
|
||||
build_data: dict[str, Any],
|
||||
) -> None:
|
||||
"""Download CDN assets and prepare them for inclusion in the wheel."""
|
||||
# Only run for wheel builds
|
||||
if self.target_name != "wheel":
|
||||
return
|
||||
|
||||
# Paths
|
||||
src_dir = Path(self.root) / "src" / "compose_farm"
|
||||
base_html_path = src_dir / "web" / "templates" / "base.html"
|
||||
|
||||
if not base_html_path.exists():
|
||||
return
|
||||
|
||||
# Create temp directory for vendored assets
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="compose_farm_vendor_"))
|
||||
vendor_dir = temp_dir / "vendor"
|
||||
vendor_dir.mkdir()
|
||||
|
||||
# Read and parse base.html
|
||||
html_content = base_html_path.read_text()
|
||||
url_to_filename: dict[str, str] = {}
|
||||
|
||||
# Find all elements with data-vendor attribute and download them
|
||||
for match in VENDOR_PATTERN.finditer(html_content):
|
||||
url = match.group(2)
|
||||
filename = match.group(4)
|
||||
|
||||
if url in url_to_filename:
|
||||
continue
|
||||
|
||||
url_to_filename[url] = filename
|
||||
content = _download(url)
|
||||
(vendor_dir / filename).write_bytes(content)
|
||||
|
||||
if not url_to_filename:
|
||||
return
|
||||
|
||||
# Generate LICENSES.txt
|
||||
_generate_licenses_file(vendor_dir)
|
||||
|
||||
# Rewrite HTML to use local paths (remove data-vendor, update URL)
|
||||
def replace_vendor_tag(match: re.Match[str]) -> str:
|
||||
attr = match.group(1) # src or href
|
||||
url = match.group(2)
|
||||
between = match.group(3) # attributes between URL and data-vendor
|
||||
filename = match.group(4)
|
||||
if url in url_to_filename:
|
||||
return f'{attr}="/static/vendor/{filename}"{between}'
|
||||
return match.group(0)
|
||||
|
||||
modified_html = VENDOR_PATTERN.sub(replace_vendor_tag, html_content)
|
||||
|
||||
# Write modified base.html to temp
|
||||
templates_dir = temp_dir / "templates"
|
||||
templates_dir.mkdir()
|
||||
(templates_dir / "base.html").write_text(modified_html)
|
||||
|
||||
# Add to force_include to override files in the wheel
|
||||
force_include = build_data.setdefault("force_include", {})
|
||||
force_include[str(vendor_dir)] = "compose_farm/web/static/vendor"
|
||||
force_include[str(templates_dir / "base.html")] = "compose_farm/web/templates/base.html"
|
||||
|
||||
# Store temp_dir path for cleanup
|
||||
self._temp_dir = temp_dir
|
||||
|
||||
def finalize(
|
||||
self,
|
||||
_version: str,
|
||||
_build_data: dict[str, Any],
|
||||
_artifact_path: str,
|
||||
) -> None:
|
||||
"""Clean up temporary directory after build."""
|
||||
if hasattr(self, "_temp_dir") and self._temp_dir.exists():
|
||||
shutil.rmtree(self._temp_dir, ignore_errors=True)
|
||||
@@ -3,10 +3,43 @@ name = "compose-farm"
|
||||
dynamic = ["version"]
|
||||
description = "Compose Farm - run docker compose commands across multiple hosts"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
license-files = ["LICENSE"]
|
||||
authors = [
|
||||
{ name = "Bas Nijholt", email = "bas@nijho.lt" }
|
||||
]
|
||||
maintainers = [
|
||||
{ name = "Bas Nijholt", email = "bas@nijho.lt" }
|
||||
]
|
||||
requires-python = ">=3.11"
|
||||
keywords = [
|
||||
"docker",
|
||||
"docker-compose",
|
||||
"ssh",
|
||||
"devops",
|
||||
"deployment",
|
||||
"container",
|
||||
"orchestration",
|
||||
"multi-host",
|
||||
"homelab",
|
||||
"self-hosted",
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Environment :: Console",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: System Administrators",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Programming Language :: Python :: 3.14",
|
||||
"Topic :: System :: Systems Administration",
|
||||
"Topic :: Utilities",
|
||||
"Typing :: Typed",
|
||||
]
|
||||
dependencies = [
|
||||
"typer>=0.9.0",
|
||||
"pydantic>=2.0.0",
|
||||
@@ -15,6 +48,20 @@ dependencies = [
|
||||
"rich>=13.0.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
web = [
|
||||
"fastapi[standard]>=0.109.0",
|
||||
"jinja2>=3.1.0",
|
||||
"websockets>=12.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/basnijholt/compose-farm"
|
||||
Repository = "https://github.com/basnijholt/compose-farm"
|
||||
Documentation = "https://github.com/basnijholt/compose-farm#readme"
|
||||
Issues = "https://github.com/basnijholt/compose-farm/issues"
|
||||
Changelog = "https://github.com/basnijholt/compose-farm/releases"
|
||||
|
||||
[project.scripts]
|
||||
compose-farm = "compose_farm.cli:app"
|
||||
cf = "compose_farm.cli:app"
|
||||
@@ -32,6 +79,9 @@ version-file = "src/compose_farm/_version.py"
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/compose_farm"]
|
||||
|
||||
[tool.hatch.build.hooks.custom]
|
||||
# Vendors CDN assets (JS/CSS) into the wheel for offline use
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py311"
|
||||
line-length = 100
|
||||
@@ -61,7 +111,7 @@ ignore = [
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/*" = ["S101", "PLR2004", "S108", "D102", "D103"] # relaxed docstrings + asserts in tests
|
||||
"tests/*" = ["S101", "PLR2004", "S108", "D102", "D103", "PLC0415", "ARG001", "ARG002", "TC003"] # relaxed for tests
|
||||
|
||||
[tool.ruff.lint.mccabe]
|
||||
max-complexity = 18
|
||||
@@ -79,6 +129,10 @@ ignore_missing_imports = true
|
||||
module = "tests.*"
|
||||
disallow_untyped_decorators = false
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "compose_farm.web.*"
|
||||
disallow_untyped_decorators = false
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
testpaths = ["tests"]
|
||||
@@ -113,4 +167,11 @@ dev = [
|
||||
"ruff>=0.14.8",
|
||||
"types-pyyaml>=6.0.12.20250915",
|
||||
"markdown-code-runner>=0.7.0",
|
||||
# Web deps for type checking (these ship with inline types)
|
||||
"fastapi>=0.109.0",
|
||||
"uvicorn[standard]>=0.27.0",
|
||||
"jinja2>=3.1.0",
|
||||
"websockets>=12.0",
|
||||
# For FastAPI TestClient
|
||||
"httpx>=0.28.0",
|
||||
]
|
||||
|
||||
@@ -1,618 +0,0 @@
|
||||
"""CLI interface using Typer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Annotated, TypeVar
|
||||
|
||||
import typer
|
||||
import yaml
|
||||
from rich.console import Console
|
||||
|
||||
from . import __version__
|
||||
from .config import Config, load_config
|
||||
from .executor import CommandResult, run_command, run_on_services, run_sequential_on_services
|
||||
from .logs import snapshot_services
|
||||
from .operations import (
|
||||
check_host_compatibility,
|
||||
check_mounts_on_configured_hosts,
|
||||
check_networks_on_configured_hosts,
|
||||
discover_running_services,
|
||||
up_services,
|
||||
)
|
||||
from .state import get_services_needing_migration, load_state, remove_service, save_state
|
||||
from .traefik import generate_traefik_config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Coroutine
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
console = Console(highlight=False)
|
||||
err_console = Console(stderr=True, highlight=False)
|
||||
|
||||
|
||||
def _load_config_or_exit(config_path: Path | None) -> Config:
|
||||
"""Load config or exit with a friendly error message."""
|
||||
try:
|
||||
return load_config(config_path)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def _maybe_regenerate_traefik(cfg: Config) -> None:
|
||||
"""Regenerate traefik config if traefik_file is configured."""
|
||||
if cfg.traefik_file is None:
|
||||
return
|
||||
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys()))
|
||||
new_content = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
|
||||
# Check if content changed
|
||||
old_content = ""
|
||||
if cfg.traefik_file.exists():
|
||||
old_content = cfg.traefik_file.read_text()
|
||||
|
||||
if new_content != old_content:
|
||||
cfg.traefik_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
cfg.traefik_file.write_text(new_content)
|
||||
console.print() # Ensure we're on a new line after streaming output
|
||||
console.print(f"[green]✓[/] Traefik config updated: {cfg.traefik_file}")
|
||||
|
||||
for warning in warnings:
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
err_console.print(f"[yellow]![/] Failed to update traefik config: {exc}")
|
||||
|
||||
|
||||
def _version_callback(value: bool) -> None:
|
||||
"""Print version and exit."""
|
||||
if value:
|
||||
typer.echo(f"compose-farm {__version__}")
|
||||
raise typer.Exit
|
||||
|
||||
|
||||
app = typer.Typer(
|
||||
name="compose-farm",
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def main(
|
||||
version: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--version",
|
||||
"-v",
|
||||
help="Show version and exit",
|
||||
callback=_version_callback,
|
||||
is_eager=True,
|
||||
),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Compose Farm - run docker compose commands across multiple hosts."""
|
||||
|
||||
|
||||
def _get_services(
|
||||
services: list[str],
|
||||
all_services: bool,
|
||||
config_path: Path | None,
|
||||
) -> tuple[list[str], Config]:
|
||||
"""Resolve service list and load config."""
|
||||
config = _load_config_or_exit(config_path)
|
||||
|
||||
if all_services:
|
||||
return list(config.services.keys()), config
|
||||
if not services:
|
||||
err_console.print("[red]✗[/] Specify services or use --all")
|
||||
raise typer.Exit(1)
|
||||
return list(services), config
|
||||
|
||||
|
||||
def _run_async(coro: Coroutine[None, None, T]) -> T:
|
||||
"""Run async coroutine."""
|
||||
return asyncio.run(coro)
|
||||
|
||||
|
||||
def _report_results(results: list[CommandResult]) -> None:
|
||||
"""Report command results and exit with appropriate code."""
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
for r in failed:
|
||||
err_console.print(
|
||||
f"[cyan]\\[{r.service}][/] [red]Failed with exit code {r.exit_code}[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
ServicesArg = Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Services to operate on"),
|
||||
]
|
||||
AllOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--all", "-a", help="Run on all services"),
|
||||
]
|
||||
ConfigOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--config", "-c", help="Path to config file"),
|
||||
]
|
||||
LogPathOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--log-path", "-l", help="Path to Dockerfarm TOML log"),
|
||||
]
|
||||
|
||||
MISSING_PATH_PREVIEW_LIMIT = 2
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def up(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
migrate: Annotated[
|
||||
bool, typer.Option("--migrate", "-m", help="Only services needing migration")
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start services (docker compose up -d). Auto-migrates if host changed."""
|
||||
if migrate:
|
||||
cfg = _load_config_or_exit(config)
|
||||
svc_list = get_services_needing_migration(cfg)
|
||||
if not svc_list:
|
||||
console.print("[green]✓[/] No services need migration")
|
||||
return
|
||||
console.print(f"[cyan]Migrating {len(svc_list)} service(s):[/] {', '.join(svc_list)}")
|
||||
else:
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
# Always use raw output - migrations are sequential anyway
|
||||
results = _run_async(up_services(cfg, svc_list, raw=True))
|
||||
_maybe_regenerate_traefik(cfg)
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def down(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services (docker compose down)."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = _run_async(run_on_services(cfg, svc_list, "down", raw=raw))
|
||||
|
||||
# Remove from state on success
|
||||
for result in results:
|
||||
if result.success:
|
||||
remove_service(cfg, result.service)
|
||||
|
||||
_maybe_regenerate_traefik(cfg)
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Pull latest images (docker compose pull)."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = _run_async(run_on_services(cfg, svc_list, "pull", raw=raw))
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def restart(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart services (down + up)."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = _run_async(run_sequential_on_services(cfg, svc_list, ["down", "up -d"], raw=raw))
|
||||
_maybe_regenerate_traefik(cfg)
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def update(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update services (pull + down + up)."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = _run_async(
|
||||
run_sequential_on_services(cfg, svc_list, ["pull", "down", "up -d"], raw=raw)
|
||||
)
|
||||
_maybe_regenerate_traefik(cfg)
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def logs(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
|
||||
tail: Annotated[int, typer.Option("--tail", "-n", help="Number of lines")] = 100,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show service logs."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
cmd = f"logs --tail {tail}"
|
||||
if follow:
|
||||
cmd += " -f"
|
||||
results = _run_async(run_on_services(cfg, svc_list, cmd))
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def ps(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of all services."""
|
||||
cfg = _load_config_or_exit(config)
|
||||
results = _run_async(run_on_services(cfg, list(cfg.services.keys()), "ps"))
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command("traefik-file", rich_help_panel="Configuration")
|
||||
def traefik_file(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
"--output",
|
||||
"-o",
|
||||
help="Write Traefik file-provider YAML to this path (stdout if omitted)",
|
||||
),
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Generate a Traefik file-provider fragment from compose Traefik labels."""
|
||||
svc_list, cfg = _get_services(services or [], all_services, config)
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, svc_list)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
err_console.print(f"[red]✗[/] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
rendered = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
|
||||
if output:
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
output.write_text(rendered)
|
||||
console.print(f"[green]✓[/] Traefik config written to {output}")
|
||||
else:
|
||||
console.print(rendered)
|
||||
|
||||
for warning in warnings:
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
|
||||
|
||||
def _report_sync_changes(
|
||||
added: list[str],
|
||||
removed: list[str],
|
||||
changed: list[tuple[str, str, str]],
|
||||
discovered: dict[str, str],
|
||||
current_state: dict[str, str],
|
||||
) -> None:
|
||||
"""Report sync changes to the user."""
|
||||
if added:
|
||||
console.print(f"\nNew services found ({len(added)}):")
|
||||
for service in sorted(added):
|
||||
console.print(f" [green]+[/] [cyan]{service}[/] on [magenta]{discovered[service]}[/]")
|
||||
|
||||
if changed:
|
||||
console.print(f"\nServices on different hosts ({len(changed)}):")
|
||||
for service, old_host, new_host in sorted(changed):
|
||||
console.print(
|
||||
f" [yellow]~[/] [cyan]{service}[/]: "
|
||||
f"[magenta]{old_host}[/] → [magenta]{new_host}[/]"
|
||||
)
|
||||
|
||||
if removed:
|
||||
console.print(f"\nServices no longer running ({len(removed)}):")
|
||||
for service in sorted(removed):
|
||||
console.print(
|
||||
f" [red]-[/] [cyan]{service}[/] (was on [magenta]{current_state[service]}[/])"
|
||||
)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def sync(
|
||||
config: ConfigOption = None,
|
||||
log_path: LogPathOption = None,
|
||||
dry_run: Annotated[
|
||||
bool,
|
||||
typer.Option("--dry-run", "-n", help="Show what would be synced without writing"),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Sync local state with running services.
|
||||
|
||||
Discovers which services are running on which hosts, updates the state
|
||||
file, and captures image digests. Combines service discovery with
|
||||
image snapshot into a single command.
|
||||
"""
|
||||
cfg = _load_config_or_exit(config)
|
||||
current_state = load_state(cfg)
|
||||
|
||||
console.print("Discovering running services...")
|
||||
discovered = _run_async(discover_running_services(cfg))
|
||||
|
||||
# Calculate changes
|
||||
added = [s for s in discovered if s not in current_state]
|
||||
removed = [s for s in current_state if s not in discovered]
|
||||
changed = [
|
||||
(s, current_state[s], discovered[s])
|
||||
for s in discovered
|
||||
if s in current_state and current_state[s] != discovered[s]
|
||||
]
|
||||
|
||||
# Report state changes
|
||||
state_changed = bool(added or removed or changed)
|
||||
if state_changed:
|
||||
_report_sync_changes(added, removed, changed, discovered, current_state)
|
||||
else:
|
||||
console.print("[green]✓[/] State is already in sync.")
|
||||
|
||||
if dry_run:
|
||||
console.print("\n[dim](dry-run: no changes made)[/]")
|
||||
return
|
||||
|
||||
# Update state file
|
||||
if state_changed:
|
||||
save_state(cfg, discovered)
|
||||
console.print(f"\n[green]✓[/] State updated: {len(discovered)} services tracked.")
|
||||
|
||||
# Capture image digests for running services
|
||||
if discovered:
|
||||
console.print("\nCapturing image digests...")
|
||||
try:
|
||||
path = _run_async(snapshot_services(cfg, list(discovered.keys()), log_path=log_path))
|
||||
console.print(f"[green]✓[/] Digests written to {path}")
|
||||
except RuntimeError as exc:
|
||||
err_console.print(f"[yellow]![/] {exc}")
|
||||
|
||||
|
||||
def _report_config_status(cfg: Config) -> bool:
|
||||
"""Check and report config vs disk status. Returns True if errors found."""
|
||||
configured = set(cfg.services.keys())
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
missing_from_config = sorted(on_disk - configured)
|
||||
missing_from_disk = sorted(configured - on_disk)
|
||||
|
||||
if missing_from_config:
|
||||
console.print(f"\n[yellow]On disk but not in config[/] ({len(missing_from_config)}):")
|
||||
for name in missing_from_config:
|
||||
console.print(f" [yellow]+[/] [cyan]{name}[/]")
|
||||
|
||||
if missing_from_disk:
|
||||
console.print(f"\n[red]In config but no compose file[/] ({len(missing_from_disk)}):")
|
||||
for name in missing_from_disk:
|
||||
console.print(f" [red]-[/] [cyan]{name}[/]")
|
||||
|
||||
if not missing_from_config and not missing_from_disk:
|
||||
console.print("[green]✓[/] Config matches disk")
|
||||
|
||||
return bool(missing_from_disk)
|
||||
|
||||
|
||||
def _report_traefik_status(cfg: Config, services: list[str]) -> None:
|
||||
"""Check and report traefik label status."""
|
||||
try:
|
||||
_, warnings = generate_traefik_config(cfg, services, check_all=True)
|
||||
except (FileNotFoundError, ValueError):
|
||||
return
|
||||
|
||||
if warnings:
|
||||
console.print(f"\n[yellow]Traefik issues[/] ({len(warnings)}):")
|
||||
for warning in warnings:
|
||||
console.print(f" [yellow]![/] {warning}")
|
||||
else:
|
||||
console.print("[green]✓[/] Traefik labels valid")
|
||||
|
||||
|
||||
def _report_mount_errors(mount_errors: list[tuple[str, str, str]]) -> None:
|
||||
"""Report mount errors grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, path in mount_errors:
|
||||
by_service.setdefault(svc, []).append((host, path))
|
||||
|
||||
console.print(f"\n[red]Missing mounts[/] ({len(mount_errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
host = items[0][0]
|
||||
paths = [p for _, p in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
for path in paths:
|
||||
console.print(f" [red]✗[/] {path}")
|
||||
|
||||
|
||||
def _report_network_errors(network_errors: list[tuple[str, str, str]]) -> None:
|
||||
"""Report network errors grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, net in network_errors:
|
||||
by_service.setdefault(svc, []).append((host, net))
|
||||
|
||||
console.print(f"\n[red]Missing networks[/] ({len(network_errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
host = items[0][0]
|
||||
networks = [n for _, n in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
for net in networks:
|
||||
console.print(f" [red]✗[/] {net}")
|
||||
|
||||
|
||||
def _report_host_compatibility(
|
||||
compat: dict[str, tuple[int, int, list[str]]],
|
||||
current_host: str,
|
||||
) -> None:
|
||||
"""Report host compatibility for a service."""
|
||||
for host_name, (found, total, missing) in sorted(compat.items()):
|
||||
is_current = host_name == current_host
|
||||
marker = " [dim](assigned)[/]" if is_current else ""
|
||||
|
||||
if found == total:
|
||||
console.print(f" [green]✓[/] [magenta]{host_name}[/] {found}/{total}{marker}")
|
||||
else:
|
||||
preview = ", ".join(missing[:MISSING_PATH_PREVIEW_LIMIT])
|
||||
if len(missing) > MISSING_PATH_PREVIEW_LIMIT:
|
||||
preview += f", +{len(missing) - MISSING_PATH_PREVIEW_LIMIT} more"
|
||||
console.print(
|
||||
f" [red]✗[/] [magenta]{host_name}[/] {found}/{total} "
|
||||
f"[dim](missing: {preview})[/]{marker}"
|
||||
)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def check(
|
||||
services: ServicesArg = None,
|
||||
local: Annotated[
|
||||
bool,
|
||||
typer.Option("--local", help="Skip SSH-based checks (faster)"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Validate configuration, traefik labels, mounts, and networks.
|
||||
|
||||
Without arguments: validates all services against configured hosts.
|
||||
With service arguments: validates specific services and shows host compatibility.
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
"""
|
||||
cfg = _load_config_or_exit(config)
|
||||
|
||||
# Determine which services to check and whether to show host compatibility
|
||||
if services:
|
||||
svc_list = list(services)
|
||||
invalid = [s for s in svc_list if s not in cfg.services]
|
||||
if invalid:
|
||||
for svc in invalid:
|
||||
err_console.print(f"[red]✗[/] Service '{svc}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
show_host_compat = True
|
||||
else:
|
||||
svc_list = list(cfg.services.keys())
|
||||
show_host_compat = False
|
||||
|
||||
# Run checks
|
||||
has_errors = _report_config_status(cfg)
|
||||
_report_traefik_status(cfg, svc_list)
|
||||
|
||||
if not local:
|
||||
console.print("\nChecking mounts and networks...")
|
||||
mount_errors = _run_async(check_mounts_on_configured_hosts(cfg, svc_list))
|
||||
network_errors = _run_async(check_networks_on_configured_hosts(cfg, svc_list))
|
||||
|
||||
if mount_errors:
|
||||
_report_mount_errors(mount_errors)
|
||||
has_errors = True
|
||||
if network_errors:
|
||||
_report_network_errors(network_errors)
|
||||
has_errors = True
|
||||
if not mount_errors and not network_errors:
|
||||
console.print("[green]✓[/] All mounts and networks exist")
|
||||
|
||||
if show_host_compat:
|
||||
for service in svc_list:
|
||||
console.print(f"\n[bold]Host compatibility for[/] [cyan]{service}[/]:")
|
||||
compat = _run_async(check_host_compatibility(cfg, service))
|
||||
_report_host_compatibility(compat, cfg.services[service])
|
||||
|
||||
if has_errors:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
# Default network settings for cross-host Docker networking
|
||||
DEFAULT_NETWORK_NAME = "mynetwork"
|
||||
DEFAULT_NETWORK_SUBNET = "172.20.0.0/16"
|
||||
DEFAULT_NETWORK_GATEWAY = "172.20.0.1"
|
||||
|
||||
|
||||
@app.command("init-network", rich_help_panel="Configuration")
|
||||
def init_network(
|
||||
hosts: Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Hosts to create network on (default: all)"),
|
||||
] = None,
|
||||
network: Annotated[
|
||||
str,
|
||||
typer.Option("--network", "-n", help="Network name"),
|
||||
] = DEFAULT_NETWORK_NAME,
|
||||
subnet: Annotated[
|
||||
str,
|
||||
typer.Option("--subnet", "-s", help="Network subnet"),
|
||||
] = DEFAULT_NETWORK_SUBNET,
|
||||
gateway: Annotated[
|
||||
str,
|
||||
typer.Option("--gateway", "-g", help="Network gateway"),
|
||||
] = DEFAULT_NETWORK_GATEWAY,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Create Docker network on hosts with consistent settings.
|
||||
|
||||
Creates an external Docker network that services can use for cross-host
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
"""
|
||||
cfg = _load_config_or_exit(config)
|
||||
|
||||
target_hosts = list(hosts) if hosts else list(cfg.hosts.keys())
|
||||
invalid = [h for h in target_hosts if h not in cfg.hosts]
|
||||
if invalid:
|
||||
for h in invalid:
|
||||
err_console.print(f"[red]✗[/] Host '{h}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
|
||||
async def create_network_on_host(host_name: str) -> CommandResult:
|
||||
host = cfg.hosts[host_name]
|
||||
# Check if network already exists
|
||||
check_cmd = f"docker network inspect '{network}' >/dev/null 2>&1"
|
||||
check_result = await run_command(host, check_cmd, host_name, stream=False)
|
||||
|
||||
if check_result.success:
|
||||
console.print(f"[cyan]\\[{host_name}][/] Network '{network}' already exists")
|
||||
return CommandResult(service=host_name, exit_code=0, success=True)
|
||||
|
||||
# Create the network
|
||||
create_cmd = (
|
||||
f"docker network create "
|
||||
f"--driver bridge "
|
||||
f"--subnet '{subnet}' "
|
||||
f"--gateway '{gateway}' "
|
||||
f"'{network}'"
|
||||
)
|
||||
result = await run_command(host, create_cmd, host_name, stream=False)
|
||||
|
||||
if result.success:
|
||||
console.print(f"[cyan]\\[{host_name}][/] [green]✓[/] Created network '{network}'")
|
||||
else:
|
||||
err_console.print(
|
||||
f"[cyan]\\[{host_name}][/] [red]✗[/] Failed to create network: "
|
||||
f"{result.stderr.strip()}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def run_all() -> list[CommandResult]:
|
||||
return await asyncio.gather(*[create_network_on_host(h) for h in target_hosts])
|
||||
|
||||
results = _run_async(run_all())
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
20
src/compose_farm/cli/__init__.py
Normal file
20
src/compose_farm/cli/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""CLI interface using Typer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Import command modules to trigger registration via @app.command() decorators
|
||||
from compose_farm.cli import (
|
||||
config, # noqa: F401
|
||||
lifecycle, # noqa: F401
|
||||
management, # noqa: F401
|
||||
monitoring, # noqa: F401
|
||||
web, # noqa: F401
|
||||
)
|
||||
|
||||
# Import the shared app instance
|
||||
from compose_farm.cli.app import app
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
42
src/compose_farm/cli/app.py
Normal file
42
src/compose_farm/cli/app.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""Shared Typer app instance."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm import __version__
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
|
||||
def _version_callback(value: bool) -> None:
|
||||
"""Print version and exit."""
|
||||
if value:
|
||||
typer.echo(f"compose-farm {__version__}")
|
||||
raise typer.Exit
|
||||
|
||||
|
||||
app = typer.Typer(
|
||||
name="compose-farm",
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def main(
|
||||
version: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--version",
|
||||
"-v",
|
||||
help="Show version and exit",
|
||||
callback=_version_callback,
|
||||
is_eager=True,
|
||||
),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Compose Farm - run docker compose commands across multiple hosts."""
|
||||
243
src/compose_farm/cli/common.py
Normal file
243
src/compose_farm/cli/common.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""Shared CLI helpers, options, and utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Annotated, TypeVar
|
||||
|
||||
import typer
|
||||
from rich.progress import (
|
||||
BarColumn,
|
||||
MofNCompleteColumn,
|
||||
Progress,
|
||||
SpinnerColumn,
|
||||
TaskID,
|
||||
TextColumn,
|
||||
TimeElapsedColumn,
|
||||
)
|
||||
|
||||
from compose_farm.console import console, err_console
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Coroutine, Generator
|
||||
|
||||
from compose_farm.config import Config
|
||||
from compose_farm.executor import CommandResult
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
# --- Shared CLI Options ---
|
||||
ServicesArg = Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Services to operate on"),
|
||||
]
|
||||
AllOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--all", "-a", help="Run on all services"),
|
||||
]
|
||||
ConfigOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--config", "-c", help="Path to config file"),
|
||||
]
|
||||
LogPathOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--log-path", "-l", help="Path to Dockerfarm TOML log"),
|
||||
]
|
||||
HostOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--host", "-H", help="Filter to services on this host"),
|
||||
]
|
||||
|
||||
# --- Constants (internal) ---
|
||||
_MISSING_PATH_PREVIEW_LIMIT = 2
|
||||
_STATS_PREVIEW_LIMIT = 3 # Max number of pending migrations to show by name
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def progress_bar(
|
||||
label: str, total: int, *, initial_description: str = "[dim]connecting...[/]"
|
||||
) -> Generator[tuple[Progress, TaskID], None, None]:
|
||||
"""Create a standardized progress bar with consistent styling.
|
||||
|
||||
Yields (progress, task_id). Use progress.update(task_id, advance=1, description=...)
|
||||
to advance.
|
||||
"""
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn(f"[bold blue]{label}[/]"),
|
||||
BarColumn(),
|
||||
MofNCompleteColumn(),
|
||||
TextColumn("•"),
|
||||
TimeElapsedColumn(),
|
||||
TextColumn("•"),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console,
|
||||
transient=True,
|
||||
) as progress:
|
||||
task_id = progress.add_task(initial_description, total=total)
|
||||
yield progress, task_id
|
||||
|
||||
|
||||
def load_config_or_exit(config_path: Path | None) -> Config:
|
||||
"""Load config or exit with a friendly error message."""
|
||||
# Lazy import: pydantic adds ~50ms to startup, only load when actually needed
|
||||
from compose_farm.config import load_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
return load_config(config_path)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def get_services(
|
||||
services: list[str],
|
||||
all_services: bool,
|
||||
config_path: Path | None,
|
||||
) -> tuple[list[str], Config]:
|
||||
"""Resolve service list and load config.
|
||||
|
||||
Supports "." as shorthand for the current directory name.
|
||||
"""
|
||||
config = load_config_or_exit(config_path)
|
||||
|
||||
if all_services:
|
||||
return list(config.services.keys()), config
|
||||
if not services:
|
||||
err_console.print("[red]✗[/] Specify services or use --all")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Resolve "." to current directory name
|
||||
resolved = [Path.cwd().name if svc == "." else svc for svc in services]
|
||||
|
||||
# Validate all services exist in config
|
||||
unknown = [svc for svc in resolved if svc not in config.services]
|
||||
if unknown:
|
||||
for svc in unknown:
|
||||
err_console.print(f"[red]✗[/] Unknown service: [cyan]{svc}[/]")
|
||||
err_console.print("[dim]Hint: Add the service to compose-farm.yaml or use --all[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
return resolved, config
|
||||
|
||||
|
||||
def run_async(coro: Coroutine[None, None, _T]) -> _T:
|
||||
"""Run async coroutine."""
|
||||
try:
|
||||
return asyncio.run(coro)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Interrupted[/]")
|
||||
raise typer.Exit(130) from None # Standard exit code for SIGINT
|
||||
|
||||
|
||||
def report_results(results: list[CommandResult]) -> None:
|
||||
"""Report command results and exit with appropriate code."""
|
||||
succeeded = [r for r in results if r.success]
|
||||
failed = [r for r in results if not r.success]
|
||||
|
||||
# Always print summary when there are multiple results
|
||||
if len(results) > 1:
|
||||
console.print() # Blank line before summary
|
||||
if failed:
|
||||
for r in failed:
|
||||
err_console.print(
|
||||
f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}"
|
||||
)
|
||||
console.print()
|
||||
console.print(
|
||||
f"[green]✓[/] {len(succeeded)}/{len(results)} services succeeded, "
|
||||
f"[red]✗[/] {len(failed)} failed"
|
||||
)
|
||||
else:
|
||||
console.print(f"[green]✓[/] All {len(results)} services succeeded")
|
||||
|
||||
elif failed:
|
||||
# Single service failed
|
||||
r = failed[0]
|
||||
err_console.print(f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}")
|
||||
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def maybe_regenerate_traefik(
|
||||
cfg: Config,
|
||||
results: list[CommandResult] | None = None,
|
||||
) -> None:
|
||||
"""Regenerate traefik config if traefik_file is configured.
|
||||
|
||||
If results are provided, skips regeneration if all services failed.
|
||||
"""
|
||||
if cfg.traefik_file is None:
|
||||
return
|
||||
|
||||
# Skip if all services failed
|
||||
if results and not any(r.success for r in results):
|
||||
return
|
||||
|
||||
# Lazy import: traefik/yaml adds startup time, only load when traefik_file is configured
|
||||
from compose_farm.traefik import ( # noqa: PLC0415
|
||||
generate_traefik_config,
|
||||
render_traefik_config,
|
||||
)
|
||||
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys()))
|
||||
new_content = render_traefik_config(dynamic)
|
||||
|
||||
# Check if content changed
|
||||
old_content = ""
|
||||
if cfg.traefik_file.exists():
|
||||
old_content = cfg.traefik_file.read_text()
|
||||
|
||||
if new_content != old_content:
|
||||
cfg.traefik_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
cfg.traefik_file.write_text(new_content)
|
||||
console.print() # Ensure we're on a new line after streaming output
|
||||
console.print(f"[green]✓[/] Traefik config updated: {cfg.traefik_file}")
|
||||
|
||||
for warning in warnings:
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
err_console.print(f"[yellow]![/] Failed to update traefik config: {exc}")
|
||||
|
||||
|
||||
def validate_host_for_service(cfg: Config, service: str, host: str) -> None:
|
||||
"""Validate that a host is valid for a service."""
|
||||
if host not in cfg.hosts:
|
||||
err_console.print(f"[red]✗[/] Host '{host}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
allowed_hosts = cfg.get_hosts(service)
|
||||
if host not in allowed_hosts:
|
||||
err_console.print(
|
||||
f"[red]✗[/] Service '{service}' is not configured for host '{host}' "
|
||||
f"(configured: {', '.join(allowed_hosts)})"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def run_host_operation(
|
||||
cfg: Config,
|
||||
svc_list: list[str],
|
||||
host: str,
|
||||
command: str,
|
||||
action_verb: str,
|
||||
state_callback: Callable[[Config, str, str], None],
|
||||
) -> None:
|
||||
"""Run an operation on a specific host for multiple services."""
|
||||
from compose_farm.executor import run_compose_on_host # noqa: PLC0415
|
||||
|
||||
results: list[CommandResult] = []
|
||||
for service in svc_list:
|
||||
validate_host_for_service(cfg, service, host)
|
||||
console.print(f"[cyan]\\[{service}][/] {action_verb} on [magenta]{host}[/]...")
|
||||
result = run_async(run_compose_on_host(cfg, service, host, command, raw=True))
|
||||
print() # Newline after raw output
|
||||
results.append(result)
|
||||
if result.success:
|
||||
state_callback(cfg, service, host)
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
313
src/compose_farm/cli/config.py
Normal file
313
src/compose_farm/cli/config.py
Normal file
@@ -0,0 +1,313 @@
|
||||
"""Configuration management commands for compose-farm."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
from importlib import resources
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.paths import config_search_paths, default_config_path, find_config_path
|
||||
|
||||
config_app = typer.Typer(
|
||||
name="config",
|
||||
help="Manage compose-farm configuration files.",
|
||||
no_args_is_help=True,
|
||||
)
|
||||
|
||||
|
||||
# --- CLI Options (same pattern as cli.py) ---
|
||||
_PathOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--path", "-p", help="Path to config file. Uses auto-detection if not specified."),
|
||||
]
|
||||
_ForceOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--force", "-f", help="Overwrite existing config without confirmation."),
|
||||
]
|
||||
_RawOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--raw", "-r", help="Output raw file contents (for copy-paste)."),
|
||||
]
|
||||
|
||||
|
||||
def _get_editor() -> str:
|
||||
"""Get the user's preferred editor.
|
||||
|
||||
Checks $EDITOR, then $VISUAL, then falls back to platform defaults.
|
||||
"""
|
||||
for env_var in ("EDITOR", "VISUAL"):
|
||||
editor = os.environ.get(env_var)
|
||||
if editor:
|
||||
return editor
|
||||
|
||||
if platform.system() == "Windows":
|
||||
return "notepad"
|
||||
|
||||
# Try common editors on Unix-like systems
|
||||
for editor in ("nano", "vim", "vi"):
|
||||
if shutil.which(editor):
|
||||
return editor
|
||||
|
||||
return "vi"
|
||||
|
||||
|
||||
def _generate_template() -> str:
|
||||
"""Generate a config template with documented schema."""
|
||||
try:
|
||||
template_file = resources.files("compose_farm") / "example-config.yaml"
|
||||
return template_file.read_text(encoding="utf-8")
|
||||
except FileNotFoundError as e:
|
||||
err_console.print("[red]Example config template is missing from the package.[/red]")
|
||||
err_console.print("Reinstall compose-farm or report this issue.")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def _get_config_file(path: Path | None) -> Path | None:
|
||||
"""Resolve config path, or auto-detect from standard locations."""
|
||||
if path:
|
||||
return path.expanduser().resolve()
|
||||
|
||||
config_path = find_config_path()
|
||||
return config_path.resolve() if config_path else None
|
||||
|
||||
|
||||
@config_app.command("init")
|
||||
def config_init(
|
||||
path: _PathOption = None,
|
||||
force: _ForceOption = False,
|
||||
) -> None:
|
||||
"""Create a new config file with documented example.
|
||||
|
||||
The generated config file serves as a template showing all available
|
||||
options with explanatory comments.
|
||||
"""
|
||||
target_path = (path.expanduser().resolve() if path else None) or default_config_path()
|
||||
|
||||
if target_path.exists() and not force:
|
||||
console.print(
|
||||
f"[bold yellow]Config file already exists at:[/bold yellow] [cyan]{target_path}[/cyan]",
|
||||
)
|
||||
if not typer.confirm("Overwrite existing config file?"):
|
||||
console.print("[dim]Aborted.[/dim]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
# Create parent directories
|
||||
target_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate and write template
|
||||
template_content = _generate_template()
|
||||
target_path.write_text(template_content, encoding="utf-8")
|
||||
|
||||
console.print(f"[green]✓[/] Config file created at: {target_path}")
|
||||
console.print("\n[dim]Edit the file to customize your settings:[/dim]")
|
||||
console.print(" [cyan]cf config edit[/cyan]")
|
||||
|
||||
|
||||
@config_app.command("edit")
|
||||
def config_edit(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Open the config file in your default editor.
|
||||
|
||||
The editor is determined by: $EDITOR > $VISUAL > platform default.
|
||||
"""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
console.print("\nSearched locations:")
|
||||
for p in config_search_paths():
|
||||
console.print(f" - {p}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if not config_file.exists():
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
editor = _get_editor()
|
||||
console.print(f"[dim]Opening {config_file} with {editor}...[/dim]")
|
||||
|
||||
try:
|
||||
editor_cmd = shlex.split(editor, posix=os.name != "nt")
|
||||
except ValueError as e:
|
||||
err_console.print("[red]Invalid editor command. Check $EDITOR/$VISUAL.[/red]")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
if not editor_cmd:
|
||||
err_console.print("[red]Editor command is empty.[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
subprocess.run([*editor_cmd, str(config_file)], check=True)
|
||||
except FileNotFoundError:
|
||||
err_console.print(f"[red]Editor '{editor_cmd[0]}' not found.[/red]")
|
||||
err_console.print("Set $EDITOR environment variable to your preferred editor.")
|
||||
raise typer.Exit(1) from None
|
||||
except subprocess.CalledProcessError as e:
|
||||
err_console.print(f"[red]Editor exited with error code {e.returncode}[/red]")
|
||||
raise typer.Exit(e.returncode) from None
|
||||
|
||||
|
||||
@config_app.command("show")
|
||||
def config_show(
|
||||
path: _PathOption = None,
|
||||
raw: _RawOption = False,
|
||||
) -> None:
|
||||
"""Display the config file location and contents."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nSearched locations:")
|
||||
for p in config_search_paths():
|
||||
status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]"
|
||||
console.print(f" - {p} ({status})")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(0)
|
||||
|
||||
if not config_file.exists():
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
content = config_file.read_text(encoding="utf-8")
|
||||
|
||||
if raw:
|
||||
print(content, end="")
|
||||
return
|
||||
|
||||
from rich.syntax import Syntax # noqa: PLC0415
|
||||
|
||||
console.print(f"[bold green]Config file:[/bold green] [cyan]{config_file}[/cyan]")
|
||||
console.print()
|
||||
syntax = Syntax(content, "yaml", theme="monokai", line_numbers=True, word_wrap=True)
|
||||
console.print(syntax)
|
||||
console.print()
|
||||
console.print("[dim]Tip: Use -r for copy-paste friendly output[/dim]")
|
||||
|
||||
|
||||
@config_app.command("path")
|
||||
def config_path(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Print the config file path (useful for scripting)."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nSearched locations:")
|
||||
for p in config_search_paths():
|
||||
status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]"
|
||||
console.print(f" - {p} ({status})")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Just print the path for easy piping
|
||||
print(config_file)
|
||||
|
||||
|
||||
@config_app.command("validate")
|
||||
def config_validate(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Validate the config file syntax and schema."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
err_console.print("[red]✗[/] No config file found")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Lazy import: pydantic adds ~50ms to startup, only load when actually needed
|
||||
from compose_farm.config import load_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
cfg = load_config(config_file)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
err_console.print(f"[red]✗[/] Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
console.print(f"[green]✓[/] Valid config: {config_file}")
|
||||
console.print(f" Hosts: {len(cfg.hosts)}")
|
||||
console.print(f" Services: {len(cfg.services)}")
|
||||
|
||||
|
||||
@config_app.command("symlink")
|
||||
def config_symlink(
|
||||
target: Annotated[
|
||||
Path | None,
|
||||
typer.Argument(help="Config file to link to. Defaults to ./compose-farm.yaml"),
|
||||
] = None,
|
||||
force: _ForceOption = False,
|
||||
) -> None:
|
||||
"""Create a symlink from the default config location to a config file.
|
||||
|
||||
This makes a local config file discoverable globally without copying.
|
||||
Always uses absolute paths to avoid broken symlinks.
|
||||
|
||||
Examples:
|
||||
cf config symlink # Link to ./compose-farm.yaml
|
||||
cf config symlink /opt/compose/config.yaml # Link to specific file
|
||||
|
||||
"""
|
||||
# Default to compose-farm.yaml in current directory
|
||||
target_path = (target or Path("compose-farm.yaml")).expanduser().resolve()
|
||||
|
||||
if not target_path.exists():
|
||||
err_console.print(f"[red]✗[/] Target config file not found: {target_path}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if not target_path.is_file():
|
||||
err_console.print(f"[red]✗[/] Target is not a file: {target_path}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
symlink_path = default_config_path()
|
||||
|
||||
# Check if symlink location already exists
|
||||
if symlink_path.exists() or symlink_path.is_symlink():
|
||||
if symlink_path.is_symlink():
|
||||
current_target = symlink_path.resolve() if symlink_path.exists() else None
|
||||
if current_target == target_path:
|
||||
console.print(f"[green]✓[/] Symlink already points to: {target_path}")
|
||||
return
|
||||
# Update existing symlink
|
||||
if not force:
|
||||
existing = symlink_path.readlink()
|
||||
console.print(f"[yellow]Symlink exists:[/] {symlink_path} -> {existing}")
|
||||
if not typer.confirm(f"Update to point to {target_path}?"):
|
||||
console.print("[dim]Aborted.[/dim]")
|
||||
raise typer.Exit(0)
|
||||
symlink_path.unlink()
|
||||
else:
|
||||
# Regular file exists
|
||||
err_console.print(f"[red]✗[/] A regular file exists at: {symlink_path}")
|
||||
err_console.print(" Back it up or remove it first, then retry.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Create parent directories
|
||||
symlink_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create symlink with absolute path
|
||||
symlink_path.symlink_to(target_path)
|
||||
|
||||
console.print("[green]✓[/] Created symlink:")
|
||||
console.print(f" {symlink_path}")
|
||||
console.print(f" -> {target_path}")
|
||||
|
||||
|
||||
# Register config subcommand on the shared app
|
||||
app.add_typer(config_app, name="config", rich_help_panel="Configuration")
|
||||
299
src/compose_farm/cli/lifecycle.py
Normal file
299
src/compose_farm/cli/lifecycle.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""Lifecycle commands: up, down, pull, restart, update, apply."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
load_config_or_exit,
|
||||
maybe_regenerate_traefik,
|
||||
report_results,
|
||||
run_async,
|
||||
run_host_operation,
|
||||
)
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.executor import run_on_services, run_sequential_on_services
|
||||
from compose_farm.operations import stop_orphaned_services, up_services
|
||||
from compose_farm.state import (
|
||||
add_service_to_host,
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
get_services_needing_migration,
|
||||
get_services_not_in_state,
|
||||
remove_service,
|
||||
remove_service_from_host,
|
||||
)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def up(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start services (docker compose up -d). Auto-migrates if host changed."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Per-host operation: run on specific host only
|
||||
if host:
|
||||
run_host_operation(cfg, svc_list, host, "up -d", "Starting", add_service_to_host)
|
||||
return
|
||||
|
||||
# Normal operation: use up_services with migration logic
|
||||
results = run_async(up_services(cfg, svc_list, raw=True))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def down(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
orphaned: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--orphaned", help="Stop orphaned services (in state but removed from config)"
|
||||
),
|
||||
] = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services (docker compose down)."""
|
||||
# Handle --orphaned flag
|
||||
if orphaned:
|
||||
if services or all_services or host:
|
||||
err_console.print("[red]✗[/] Cannot use --orphaned with services, --all, or --host")
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config)
|
||||
orphaned_services = get_orphaned_services(cfg)
|
||||
|
||||
if not orphaned_services:
|
||||
console.print("[green]✓[/] No orphaned services to stop")
|
||||
return
|
||||
|
||||
console.print(
|
||||
f"[yellow]Stopping {len(orphaned_services)} orphaned service(s):[/] "
|
||||
f"{', '.join(orphaned_services.keys())}"
|
||||
)
|
||||
results = run_async(stop_orphaned_services(cfg))
|
||||
report_results(results)
|
||||
return
|
||||
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Per-host operation: run on specific host only
|
||||
if host:
|
||||
run_host_operation(cfg, svc_list, host, "down", "Stopping", remove_service_from_host)
|
||||
return
|
||||
|
||||
# Normal operation
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "down", raw=raw))
|
||||
|
||||
# Remove from state on success
|
||||
# For multi-host services, result.service is "svc@host", extract base name
|
||||
removed_services: set[str] = set()
|
||||
for result in results:
|
||||
if result.success:
|
||||
base_service = result.service.split("@")[0]
|
||||
if base_service not in removed_services:
|
||||
remove_service(cfg, base_service)
|
||||
removed_services.add(base_service)
|
||||
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Pull latest images (docker compose pull)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "pull", raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def restart(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart services (down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_sequential_on_services(cfg, svc_list, ["down", "up -d"], raw=raw))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def update(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update services (pull + build + down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_services(
|
||||
cfg, svc_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw
|
||||
)
|
||||
)
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
def _format_host(host: str | list[str]) -> str:
|
||||
"""Format a host value for display."""
|
||||
if isinstance(host, list):
|
||||
return ", ".join(host)
|
||||
return host
|
||||
|
||||
|
||||
def _report_pending_migrations(cfg: Config, migrations: list[str]) -> None:
|
||||
"""Report services that need migration."""
|
||||
console.print(f"[cyan]Services to migrate ({len(migrations)}):[/]")
|
||||
for svc in migrations:
|
||||
current = get_service_host(cfg, svc)
|
||||
target = cfg.get_hosts(svc)[0]
|
||||
console.print(f" [cyan]{svc}[/]: [magenta]{current}[/] → [magenta]{target}[/]")
|
||||
|
||||
|
||||
def _report_pending_orphans(orphaned: dict[str, str | list[str]]) -> None:
|
||||
"""Report orphaned services that will be stopped."""
|
||||
console.print(f"[yellow]Orphaned services to stop ({len(orphaned)}):[/]")
|
||||
for svc, hosts in orphaned.items():
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{_format_host(hosts)}[/]")
|
||||
|
||||
|
||||
def _report_pending_starts(cfg: Config, missing: list[str]) -> None:
|
||||
"""Report services that will be started."""
|
||||
console.print(f"[green]Services to start ({len(missing)}):[/]")
|
||||
for svc in missing:
|
||||
target = _format_host(cfg.get_hosts(svc))
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{target}[/]")
|
||||
|
||||
|
||||
def _report_pending_refresh(cfg: Config, to_refresh: list[str]) -> None:
|
||||
"""Report services that will be refreshed."""
|
||||
console.print(f"[blue]Services to refresh ({len(to_refresh)}):[/]")
|
||||
for svc in to_refresh:
|
||||
target = _format_host(cfg.get_hosts(svc))
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{target}[/]")
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def apply(
|
||||
dry_run: Annotated[
|
||||
bool,
|
||||
typer.Option("--dry-run", "-n", help="Show what would change without executing"),
|
||||
] = False,
|
||||
no_orphans: Annotated[
|
||||
bool,
|
||||
typer.Option("--no-orphans", help="Only migrate, don't stop orphaned services"),
|
||||
] = False,
|
||||
full: Annotated[
|
||||
bool,
|
||||
typer.Option("--full", "-f", help="Also run up on all services to apply config changes"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Make reality match config (start, migrate, stop as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running services match your
|
||||
config file. It will:
|
||||
|
||||
1. Stop orphaned services (in state but removed from config)
|
||||
2. Migrate services on wrong host (host in state ≠ host in config)
|
||||
3. Start missing services (in config but not in state)
|
||||
|
||||
Use --dry-run to preview changes before applying.
|
||||
Use --no-orphans to only migrate/start without stopping orphaned services.
|
||||
Use --full to also run 'up' on all services (picks up compose/env changes).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
migrations = get_services_needing_migration(cfg)
|
||||
missing = get_services_not_in_state(cfg)
|
||||
|
||||
# For --full: refresh all services not already being started/migrated
|
||||
handled = set(migrations) | set(missing)
|
||||
to_refresh = [svc for svc in cfg.services if svc not in handled] if full else []
|
||||
|
||||
has_orphans = bool(orphaned) and not no_orphans
|
||||
has_migrations = bool(migrations)
|
||||
has_missing = bool(missing)
|
||||
has_refresh = bool(to_refresh)
|
||||
|
||||
if not has_orphans and not has_migrations and not has_missing and not has_refresh:
|
||||
console.print("[green]✓[/] Nothing to apply - reality matches config")
|
||||
return
|
||||
|
||||
# Report what will be done
|
||||
if has_orphans:
|
||||
_report_pending_orphans(orphaned)
|
||||
if has_migrations:
|
||||
_report_pending_migrations(cfg, migrations)
|
||||
if has_missing:
|
||||
_report_pending_starts(cfg, missing)
|
||||
if has_refresh:
|
||||
_report_pending_refresh(cfg, to_refresh)
|
||||
|
||||
if dry_run:
|
||||
console.print("\n[dim](dry-run: no changes made)[/]")
|
||||
return
|
||||
|
||||
# Execute changes
|
||||
console.print()
|
||||
all_results = []
|
||||
|
||||
# 1. Stop orphaned services first
|
||||
if has_orphans:
|
||||
console.print("[yellow]Stopping orphaned services...[/]")
|
||||
all_results.extend(run_async(stop_orphaned_services(cfg)))
|
||||
|
||||
# 2. Migrate services on wrong host
|
||||
if has_migrations:
|
||||
console.print("[cyan]Migrating services...[/]")
|
||||
migrate_results = run_async(up_services(cfg, migrations, raw=True))
|
||||
all_results.extend(migrate_results)
|
||||
maybe_regenerate_traefik(cfg, migrate_results)
|
||||
|
||||
# 3. Start missing services (reuse up_services which handles state updates)
|
||||
if has_missing:
|
||||
console.print("[green]Starting missing services...[/]")
|
||||
start_results = run_async(up_services(cfg, missing, raw=True))
|
||||
all_results.extend(start_results)
|
||||
maybe_regenerate_traefik(cfg, start_results)
|
||||
|
||||
# 4. Refresh remaining services (--full: run up to apply config changes)
|
||||
if has_refresh:
|
||||
console.print("[blue]Refreshing services...[/]")
|
||||
refresh_results = run_async(up_services(cfg, to_refresh, raw=True))
|
||||
all_results.extend(refresh_results)
|
||||
maybe_regenerate_traefik(cfg, refresh_results)
|
||||
|
||||
report_results(all_results)
|
||||
|
||||
|
||||
# Alias: cf a = cf apply
|
||||
app.command("a", hidden=True)(apply)
|
||||
632
src/compose_farm/cli/management.py
Normal file
632
src/compose_farm/cli/management.py
Normal file
@@ -0,0 +1,632 @@
|
||||
"""Management commands: sync, check, init-network, traefik-file."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path # noqa: TC003
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
from rich.progress import Progress, TaskID # noqa: TC002
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
_MISSING_PATH_PREVIEW_LIMIT,
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
LogPathOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
load_config_or_exit,
|
||||
progress_bar,
|
||||
run_async,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.executor import (
|
||||
CommandResult,
|
||||
is_local,
|
||||
run_command,
|
||||
)
|
||||
from compose_farm.logs import (
|
||||
DEFAULT_LOG_PATH,
|
||||
SnapshotEntry,
|
||||
collect_service_entries,
|
||||
isoformat,
|
||||
load_existing_entries,
|
||||
merge_entries,
|
||||
write_toml,
|
||||
)
|
||||
from compose_farm.operations import (
|
||||
check_host_compatibility,
|
||||
check_service_requirements,
|
||||
discover_service_host,
|
||||
)
|
||||
from compose_farm.state import get_orphaned_services, load_state, save_state
|
||||
from compose_farm.traefik import generate_traefik_config, render_traefik_config
|
||||
|
||||
# --- Sync helpers ---
|
||||
|
||||
|
||||
def _discover_services(cfg: Config) -> dict[str, str | list[str]]:
|
||||
"""Discover running services with a progress bar."""
|
||||
|
||||
async def gather_with_progress(
|
||||
progress: Progress, task_id: TaskID
|
||||
) -> dict[str, str | list[str]]:
|
||||
tasks = [asyncio.create_task(discover_service_host(cfg, s)) for s in cfg.services]
|
||||
discovered: dict[str, str | list[str]] = {}
|
||||
for coro in asyncio.as_completed(tasks):
|
||||
service, host = await coro
|
||||
if host is not None:
|
||||
discovered[service] = host
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{service}[/]")
|
||||
return discovered
|
||||
|
||||
with progress_bar("Discovering", len(cfg.services)) as (progress, task_id):
|
||||
return asyncio.run(gather_with_progress(progress, task_id))
|
||||
|
||||
|
||||
def _snapshot_services(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
log_path: Path | None,
|
||||
) -> Path:
|
||||
"""Capture image digests with a progress bar."""
|
||||
|
||||
async def collect_service(service: str, now: datetime) -> list[SnapshotEntry]:
|
||||
try:
|
||||
return await collect_service_entries(cfg, service, now=now)
|
||||
except RuntimeError:
|
||||
return []
|
||||
|
||||
async def gather_with_progress(
|
||||
progress: Progress, task_id: TaskID, now: datetime, svc_list: list[str]
|
||||
) -> list[SnapshotEntry]:
|
||||
# Map tasks to service names so we can update description
|
||||
task_to_service = {asyncio.create_task(collect_service(s, now)): s for s in svc_list}
|
||||
all_entries: list[SnapshotEntry] = []
|
||||
for coro in asyncio.as_completed(list(task_to_service.keys())):
|
||||
entries = await coro
|
||||
all_entries.extend(entries)
|
||||
# Find which service just completed (by checking done tasks)
|
||||
for t, svc in task_to_service.items():
|
||||
if t.done() and not hasattr(t, "_reported"):
|
||||
t._reported = True # type: ignore[attr-defined]
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{svc}[/]")
|
||||
break
|
||||
return all_entries
|
||||
|
||||
effective_log_path = log_path or DEFAULT_LOG_PATH
|
||||
now_dt = datetime.now(UTC)
|
||||
now_iso = isoformat(now_dt)
|
||||
|
||||
with progress_bar("Capturing", len(services)) as (progress, task_id):
|
||||
snapshot_entries = asyncio.run(gather_with_progress(progress, task_id, now_dt, services))
|
||||
|
||||
if not snapshot_entries:
|
||||
msg = "No image digests were captured"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
existing_entries = load_existing_entries(effective_log_path)
|
||||
merged_entries = merge_entries(existing_entries, snapshot_entries, now_iso=now_iso)
|
||||
meta = {"generated_at": now_iso, "compose_dir": str(cfg.compose_dir)}
|
||||
write_toml(effective_log_path, meta=meta, entries=merged_entries)
|
||||
return effective_log_path
|
||||
|
||||
|
||||
def _format_host(host: str | list[str]) -> str:
|
||||
"""Format a host value for display."""
|
||||
if isinstance(host, list):
|
||||
return ", ".join(host)
|
||||
return host
|
||||
|
||||
|
||||
def _report_sync_changes(
|
||||
added: list[str],
|
||||
removed: list[str],
|
||||
changed: list[tuple[str, str | list[str], str | list[str]]],
|
||||
discovered: dict[str, str | list[str]],
|
||||
current_state: dict[str, str | list[str]],
|
||||
) -> None:
|
||||
"""Report sync changes to the user."""
|
||||
if added:
|
||||
console.print(f"\nNew services found ({len(added)}):")
|
||||
for service in sorted(added):
|
||||
host_str = _format_host(discovered[service])
|
||||
console.print(f" [green]+[/] [cyan]{service}[/] on [magenta]{host_str}[/]")
|
||||
|
||||
if changed:
|
||||
console.print(f"\nServices on different hosts ({len(changed)}):")
|
||||
for service, old_host, new_host in sorted(changed):
|
||||
old_str = _format_host(old_host)
|
||||
new_str = _format_host(new_host)
|
||||
console.print(
|
||||
f" [yellow]~[/] [cyan]{service}[/]: [magenta]{old_str}[/] → [magenta]{new_str}[/]"
|
||||
)
|
||||
|
||||
if removed:
|
||||
console.print(f"\nServices no longer running ({len(removed)}):")
|
||||
for service in sorted(removed):
|
||||
host_str = _format_host(current_state[service])
|
||||
console.print(f" [red]-[/] [cyan]{service}[/] (was on [magenta]{host_str}[/])")
|
||||
|
||||
|
||||
# --- Check helpers ---
|
||||
|
||||
|
||||
def _check_ssh_connectivity(cfg: Config) -> list[str]:
|
||||
"""Check SSH connectivity to all hosts. Returns list of unreachable hosts."""
|
||||
# Filter out local hosts - no SSH needed
|
||||
remote_hosts = [h for h in cfg.hosts if not is_local(cfg.hosts[h])]
|
||||
|
||||
if not remote_hosts:
|
||||
return []
|
||||
|
||||
console.print() # Spacing before progress bar
|
||||
|
||||
async def check_host(host_name: str) -> tuple[str, bool]:
|
||||
host = cfg.hosts[host_name]
|
||||
result = await run_command(host, "echo ok", host_name, stream=False)
|
||||
return host_name, result.success
|
||||
|
||||
async def gather_with_progress(progress: Progress, task_id: TaskID) -> list[str]:
|
||||
tasks = [asyncio.create_task(check_host(h)) for h in remote_hosts]
|
||||
unreachable: list[str] = []
|
||||
for coro in asyncio.as_completed(tasks):
|
||||
host_name, success = await coro
|
||||
if not success:
|
||||
unreachable.append(host_name)
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{host_name}[/]")
|
||||
return unreachable
|
||||
|
||||
with progress_bar("Checking SSH connectivity", len(remote_hosts)) as (progress, task_id):
|
||||
return asyncio.run(gather_with_progress(progress, task_id))
|
||||
|
||||
|
||||
def _check_service_requirements(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]], list[tuple[str, str, str]]]:
|
||||
"""Check mounts, networks, and devices for all services with a progress bar.
|
||||
|
||||
Returns (mount_errors, network_errors, device_errors) where each is a list of
|
||||
(service, host, missing_item) tuples.
|
||||
"""
|
||||
|
||||
async def check_service(
|
||||
service: str,
|
||||
) -> tuple[
|
||||
str,
|
||||
list[tuple[str, str, str]],
|
||||
list[tuple[str, str, str]],
|
||||
list[tuple[str, str, str]],
|
||||
]:
|
||||
"""Check requirements for a single service on all its hosts."""
|
||||
host_names = cfg.get_hosts(service)
|
||||
mount_errors: list[tuple[str, str, str]] = []
|
||||
network_errors: list[tuple[str, str, str]] = []
|
||||
device_errors: list[tuple[str, str, str]] = []
|
||||
|
||||
for host_name in host_names:
|
||||
missing_paths, missing_nets, missing_devs = await check_service_requirements(
|
||||
cfg, service, host_name
|
||||
)
|
||||
mount_errors.extend((service, host_name, p) for p in missing_paths)
|
||||
network_errors.extend((service, host_name, n) for n in missing_nets)
|
||||
device_errors.extend((service, host_name, d) for d in missing_devs)
|
||||
|
||||
return service, mount_errors, network_errors, device_errors
|
||||
|
||||
async def gather_with_progress(
|
||||
progress: Progress, task_id: TaskID
|
||||
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]], list[tuple[str, str, str]]]:
|
||||
tasks = [asyncio.create_task(check_service(s)) for s in services]
|
||||
all_mount_errors: list[tuple[str, str, str]] = []
|
||||
all_network_errors: list[tuple[str, str, str]] = []
|
||||
all_device_errors: list[tuple[str, str, str]] = []
|
||||
|
||||
for coro in asyncio.as_completed(tasks):
|
||||
service, mount_errs, net_errs, dev_errs = await coro
|
||||
all_mount_errors.extend(mount_errs)
|
||||
all_network_errors.extend(net_errs)
|
||||
all_device_errors.extend(dev_errs)
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{service}[/]")
|
||||
|
||||
return all_mount_errors, all_network_errors, all_device_errors
|
||||
|
||||
with progress_bar(
|
||||
"Checking requirements", len(services), initial_description="[dim]checking...[/]"
|
||||
) as (progress, task_id):
|
||||
return asyncio.run(gather_with_progress(progress, task_id))
|
||||
|
||||
|
||||
def _report_config_status(cfg: Config) -> bool:
|
||||
"""Check and report config vs disk status. Returns True if errors found."""
|
||||
configured = set(cfg.services.keys())
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
unmanaged = sorted(on_disk - configured)
|
||||
missing_from_disk = sorted(configured - on_disk)
|
||||
|
||||
if unmanaged:
|
||||
console.print(f"\n[yellow]Unmanaged[/] (on disk but not in config, {len(unmanaged)}):")
|
||||
for name in unmanaged:
|
||||
console.print(f" [yellow]+[/] [cyan]{name}[/]")
|
||||
|
||||
if missing_from_disk:
|
||||
console.print(f"\n[red]In config but no compose file[/] ({len(missing_from_disk)}):")
|
||||
for name in missing_from_disk:
|
||||
console.print(f" [red]-[/] [cyan]{name}[/]")
|
||||
|
||||
if not unmanaged and not missing_from_disk:
|
||||
console.print("[green]✓[/] Config matches disk")
|
||||
|
||||
return bool(missing_from_disk)
|
||||
|
||||
|
||||
def _report_orphaned_services(cfg: Config) -> bool:
|
||||
"""Check for services in state but not in config. Returns True if orphans found."""
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
|
||||
if orphaned:
|
||||
console.print("\n[yellow]Orphaned services[/] (in state but not in config):")
|
||||
console.print(
|
||||
"[dim]Run 'cf apply' to stop them, or 'cf down --orphaned' for just orphans.[/]"
|
||||
)
|
||||
for name, hosts in sorted(orphaned.items()):
|
||||
host_str = ", ".join(hosts) if isinstance(hosts, list) else hosts
|
||||
console.print(f" [yellow]![/] [cyan]{name}[/] on [magenta]{host_str}[/]")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _report_traefik_status(cfg: Config, services: list[str]) -> None:
|
||||
"""Check and report traefik label status."""
|
||||
try:
|
||||
_, warnings = generate_traefik_config(cfg, services, check_all=True)
|
||||
except (FileNotFoundError, ValueError):
|
||||
return
|
||||
|
||||
if warnings:
|
||||
console.print(f"\n[yellow]Traefik issues[/] ({len(warnings)}):")
|
||||
for warning in warnings:
|
||||
console.print(f" [yellow]![/] {warning}")
|
||||
else:
|
||||
console.print("[green]✓[/] Traefik labels valid")
|
||||
|
||||
|
||||
def _report_mount_errors(mount_errors: list[tuple[str, str, str]]) -> None:
|
||||
"""Report mount errors grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, path in mount_errors:
|
||||
by_service.setdefault(svc, []).append((host, path))
|
||||
|
||||
console.print(f"[red]Missing mounts[/] ({len(mount_errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
host = items[0][0]
|
||||
paths = [p for _, p in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
for path in paths:
|
||||
console.print(f" [red]✗[/] {path}")
|
||||
|
||||
|
||||
def _report_network_errors(network_errors: list[tuple[str, str, str]]) -> None:
|
||||
"""Report network errors grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, net in network_errors:
|
||||
by_service.setdefault(svc, []).append((host, net))
|
||||
|
||||
console.print(f"[red]Missing networks[/] ({len(network_errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
host = items[0][0]
|
||||
networks = [n for _, n in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
for net in networks:
|
||||
console.print(f" [red]✗[/] {net}")
|
||||
|
||||
|
||||
def _report_device_errors(device_errors: list[tuple[str, str, str]]) -> None:
|
||||
"""Report device errors grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, dev in device_errors:
|
||||
by_service.setdefault(svc, []).append((host, dev))
|
||||
|
||||
console.print(f"[red]Missing devices[/] ({len(device_errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
host = items[0][0]
|
||||
devices = [d for _, d in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
for dev in devices:
|
||||
console.print(f" [red]✗[/] {dev}")
|
||||
|
||||
|
||||
def _report_ssh_status(unreachable_hosts: list[str]) -> bool:
|
||||
"""Report SSH connectivity status. Returns True if there are errors."""
|
||||
if unreachable_hosts:
|
||||
console.print(f"[red]Unreachable hosts[/] ({len(unreachable_hosts)}):")
|
||||
for host in sorted(unreachable_hosts):
|
||||
console.print(f" [red]✗[/] [magenta]{host}[/]")
|
||||
return True
|
||||
console.print("[green]✓[/] All hosts reachable")
|
||||
return False
|
||||
|
||||
|
||||
def _report_host_compatibility(
|
||||
compat: dict[str, tuple[int, int, list[str]]],
|
||||
assigned_hosts: list[str],
|
||||
) -> None:
|
||||
"""Report host compatibility for a service."""
|
||||
for host_name, (found, total, missing) in sorted(compat.items()):
|
||||
is_assigned = host_name in assigned_hosts
|
||||
marker = " [dim](assigned)[/]" if is_assigned else ""
|
||||
|
||||
if found == total:
|
||||
console.print(f" [green]✓[/] [magenta]{host_name}[/] {found}/{total}{marker}")
|
||||
else:
|
||||
preview = ", ".join(missing[:_MISSING_PATH_PREVIEW_LIMIT])
|
||||
if len(missing) > _MISSING_PATH_PREVIEW_LIMIT:
|
||||
preview += f", +{len(missing) - _MISSING_PATH_PREVIEW_LIMIT} more"
|
||||
console.print(
|
||||
f" [red]✗[/] [magenta]{host_name}[/] {found}/{total} "
|
||||
f"[dim](missing: {preview})[/]{marker}"
|
||||
)
|
||||
|
||||
|
||||
def _run_remote_checks(cfg: Config, svc_list: list[str], *, show_host_compat: bool) -> bool:
|
||||
"""Run SSH-based checks for mounts, networks, and host compatibility.
|
||||
|
||||
Returns True if any errors were found.
|
||||
"""
|
||||
has_errors = False
|
||||
|
||||
# Check SSH connectivity first
|
||||
if _report_ssh_status(_check_ssh_connectivity(cfg)):
|
||||
has_errors = True
|
||||
|
||||
console.print() # Spacing before mounts/networks check
|
||||
|
||||
# Check mounts, networks, and devices
|
||||
mount_errors, network_errors, device_errors = _check_service_requirements(cfg, svc_list)
|
||||
|
||||
if mount_errors:
|
||||
_report_mount_errors(mount_errors)
|
||||
has_errors = True
|
||||
if network_errors:
|
||||
_report_network_errors(network_errors)
|
||||
has_errors = True
|
||||
if device_errors:
|
||||
_report_device_errors(device_errors)
|
||||
has_errors = True
|
||||
if not mount_errors and not network_errors and not device_errors:
|
||||
console.print("[green]✓[/] All mounts, networks, and devices exist")
|
||||
|
||||
if show_host_compat:
|
||||
for service in svc_list:
|
||||
console.print(f"\n[bold]Host compatibility for[/] [cyan]{service}[/]:")
|
||||
compat = run_async(check_host_compatibility(cfg, service))
|
||||
assigned_hosts = cfg.get_hosts(service)
|
||||
_report_host_compatibility(compat, assigned_hosts)
|
||||
|
||||
return has_errors
|
||||
|
||||
|
||||
# Default network settings for cross-host Docker networking
|
||||
_DEFAULT_NETWORK_NAME = "mynetwork"
|
||||
_DEFAULT_NETWORK_SUBNET = "172.20.0.0/16"
|
||||
_DEFAULT_NETWORK_GATEWAY = "172.20.0.1"
|
||||
|
||||
|
||||
@app.command("traefik-file", rich_help_panel="Configuration")
|
||||
def traefik_file(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
"--output",
|
||||
"-o",
|
||||
help="Write Traefik file-provider YAML to this path (stdout if omitted)",
|
||||
),
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Generate a Traefik file-provider fragment from compose Traefik labels."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, svc_list)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
err_console.print(f"[red]✗[/] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
rendered = render_traefik_config(dynamic)
|
||||
|
||||
if output:
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
output.write_text(rendered)
|
||||
console.print(f"[green]✓[/] Traefik config written to {output}")
|
||||
else:
|
||||
console.print(rendered)
|
||||
|
||||
for warning in warnings:
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def refresh(
|
||||
config: ConfigOption = None,
|
||||
log_path: LogPathOption = None,
|
||||
dry_run: Annotated[
|
||||
bool,
|
||||
typer.Option("--dry-run", "-n", help="Show what would change without writing"),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Update local state from running services.
|
||||
|
||||
Discovers which services are running on which hosts, updates the state
|
||||
file, and captures image digests. This is a read operation - it updates
|
||||
your local state to match reality, not the other way around.
|
||||
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
current_state = load_state(cfg)
|
||||
|
||||
discovered = _discover_services(cfg)
|
||||
|
||||
# Calculate changes
|
||||
added = [s for s in discovered if s not in current_state]
|
||||
removed = [s for s in current_state if s not in discovered]
|
||||
changed = [
|
||||
(s, current_state[s], discovered[s])
|
||||
for s in discovered
|
||||
if s in current_state and current_state[s] != discovered[s]
|
||||
]
|
||||
|
||||
# Report state changes
|
||||
state_changed = bool(added or removed or changed)
|
||||
if state_changed:
|
||||
_report_sync_changes(added, removed, changed, discovered, current_state)
|
||||
else:
|
||||
console.print("[green]✓[/] State is already in sync.")
|
||||
|
||||
if dry_run:
|
||||
console.print("\n[dim](dry-run: no changes made)[/]")
|
||||
return
|
||||
|
||||
# Update state file
|
||||
if state_changed:
|
||||
save_state(cfg, discovered)
|
||||
console.print(f"\n[green]✓[/] State updated: {len(discovered)} services tracked.")
|
||||
|
||||
# Capture image digests for running services
|
||||
if discovered:
|
||||
try:
|
||||
path = _snapshot_services(cfg, list(discovered.keys()), log_path)
|
||||
console.print(f"[green]✓[/] Digests written to {path}")
|
||||
except RuntimeError as exc:
|
||||
err_console.print(f"[yellow]![/] {exc}")
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def check(
|
||||
services: ServicesArg = None,
|
||||
local: Annotated[
|
||||
bool,
|
||||
typer.Option("--local", help="Skip SSH-based checks (faster)"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Validate configuration, traefik labels, mounts, and networks.
|
||||
|
||||
Without arguments: validates all services against configured hosts.
|
||||
With service arguments: validates specific services and shows host compatibility.
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Determine which services to check and whether to show host compatibility
|
||||
if services:
|
||||
svc_list = list(services)
|
||||
invalid = [s for s in svc_list if s not in cfg.services]
|
||||
if invalid:
|
||||
for svc in invalid:
|
||||
err_console.print(f"[red]✗[/] Service '{svc}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
show_host_compat = True
|
||||
else:
|
||||
svc_list = list(cfg.services.keys())
|
||||
show_host_compat = False
|
||||
|
||||
# Run checks
|
||||
has_errors = _report_config_status(cfg)
|
||||
_report_traefik_status(cfg, svc_list)
|
||||
|
||||
if not local and _run_remote_checks(cfg, svc_list, show_host_compat=show_host_compat):
|
||||
has_errors = True
|
||||
|
||||
# Check for orphaned services (in state but removed from config)
|
||||
if _report_orphaned_services(cfg):
|
||||
has_errors = True
|
||||
|
||||
if has_errors:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("init-network", rich_help_panel="Configuration")
|
||||
def init_network(
|
||||
hosts: Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Hosts to create network on (default: all)"),
|
||||
] = None,
|
||||
network: Annotated[
|
||||
str,
|
||||
typer.Option("--network", "-n", help="Network name"),
|
||||
] = _DEFAULT_NETWORK_NAME,
|
||||
subnet: Annotated[
|
||||
str,
|
||||
typer.Option("--subnet", "-s", help="Network subnet"),
|
||||
] = _DEFAULT_NETWORK_SUBNET,
|
||||
gateway: Annotated[
|
||||
str,
|
||||
typer.Option("--gateway", "-g", help="Network gateway"),
|
||||
] = _DEFAULT_NETWORK_GATEWAY,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Create Docker network on hosts with consistent settings.
|
||||
|
||||
Creates an external Docker network that services can use for cross-host
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
target_hosts = list(hosts) if hosts else list(cfg.hosts.keys())
|
||||
invalid = [h for h in target_hosts if h not in cfg.hosts]
|
||||
if invalid:
|
||||
for h in invalid:
|
||||
err_console.print(f"[red]✗[/] Host '{h}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
|
||||
async def create_network_on_host(host_name: str) -> CommandResult:
|
||||
host = cfg.hosts[host_name]
|
||||
# Check if network already exists
|
||||
check_cmd = f"docker network inspect '{network}' >/dev/null 2>&1"
|
||||
check_result = await run_command(host, check_cmd, host_name, stream=False)
|
||||
|
||||
if check_result.success:
|
||||
console.print(f"[cyan]\\[{host_name}][/] Network '{network}' already exists")
|
||||
return CommandResult(service=host_name, exit_code=0, success=True)
|
||||
|
||||
# Create the network
|
||||
create_cmd = (
|
||||
f"docker network create "
|
||||
f"--driver bridge "
|
||||
f"--subnet '{subnet}' "
|
||||
f"--gateway '{gateway}' "
|
||||
f"'{network}'"
|
||||
)
|
||||
result = await run_command(host, create_cmd, host_name, stream=False)
|
||||
|
||||
if result.success:
|
||||
console.print(f"[cyan]\\[{host_name}][/] [green]✓[/] Created network '{network}'")
|
||||
else:
|
||||
err_console.print(
|
||||
f"[cyan]\\[{host_name}][/] [red]✗[/] Failed to create network: "
|
||||
f"{result.stderr.strip()}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def run_all() -> list[CommandResult]:
|
||||
return await asyncio.gather(*[create_network_on_host(h) for h in target_hosts])
|
||||
|
||||
results = run_async(run_all())
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
236
src/compose_farm/cli/monitoring.py
Normal file
236
src/compose_farm/cli/monitoring.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""Monitoring commands: logs, ps, stats."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
from rich.progress import Progress, TaskID # noqa: TC002
|
||||
from rich.table import Table
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
_STATS_PREVIEW_LIMIT,
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
load_config_or_exit,
|
||||
progress_bar,
|
||||
report_results,
|
||||
run_async,
|
||||
)
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.executor import run_command, run_on_services
|
||||
from compose_farm.state import get_services_needing_migration, load_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from compose_farm.config import Config
|
||||
|
||||
|
||||
def _group_services_by_host(
|
||||
services: dict[str, str | list[str]],
|
||||
hosts: Mapping[str, object],
|
||||
all_hosts: list[str] | None = None,
|
||||
) -> dict[str, list[str]]:
|
||||
"""Group services by their assigned host(s).
|
||||
|
||||
For multi-host services (list or "all"), the service appears in multiple host lists.
|
||||
"""
|
||||
by_host: dict[str, list[str]] = {h: [] for h in hosts}
|
||||
for service, host_value in services.items():
|
||||
if isinstance(host_value, list):
|
||||
# Explicit list of hosts
|
||||
for host_name in host_value:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
elif host_value == "all" and all_hosts:
|
||||
# "all" keyword - add to all hosts
|
||||
for host_name in all_hosts:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
elif host_value in by_host:
|
||||
# Single host
|
||||
by_host[host_value].append(service)
|
||||
return by_host
|
||||
|
||||
|
||||
def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
"""Get container counts from all hosts with a progress bar."""
|
||||
|
||||
async def get_count(host_name: str) -> tuple[str, int]:
|
||||
host = cfg.hosts[host_name]
|
||||
result = await run_command(host, "docker ps -q | wc -l", host_name, stream=False)
|
||||
count = 0
|
||||
if result.success:
|
||||
with contextlib.suppress(ValueError):
|
||||
count = int(result.stdout.strip())
|
||||
return host_name, count
|
||||
|
||||
async def gather_with_progress(progress: Progress, task_id: TaskID) -> dict[str, int]:
|
||||
hosts = list(cfg.hosts.keys())
|
||||
tasks = [asyncio.create_task(get_count(h)) for h in hosts]
|
||||
results: dict[str, int] = {}
|
||||
for coro in asyncio.as_completed(tasks):
|
||||
host_name, count = await coro
|
||||
results[host_name] = count
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{host_name}[/]")
|
||||
return results
|
||||
|
||||
with progress_bar("Querying hosts", len(cfg.hosts)) as (progress, task_id):
|
||||
return asyncio.run(gather_with_progress(progress, task_id))
|
||||
|
||||
|
||||
def _build_host_table(
|
||||
cfg: Config,
|
||||
services_by_host: dict[str, list[str]],
|
||||
running_by_host: dict[str, list[str]],
|
||||
container_counts: dict[str, int],
|
||||
*,
|
||||
show_containers: bool,
|
||||
) -> Table:
|
||||
"""Build the hosts table."""
|
||||
table = Table(title="Hosts", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Host", style="magenta")
|
||||
table.add_column("Address")
|
||||
table.add_column("Configured", justify="right")
|
||||
table.add_column("Running", justify="right")
|
||||
if show_containers:
|
||||
table.add_column("Containers", justify="right")
|
||||
|
||||
for host_name in sorted(cfg.hosts.keys()):
|
||||
host = cfg.hosts[host_name]
|
||||
configured = len(services_by_host[host_name])
|
||||
running = len(running_by_host[host_name])
|
||||
|
||||
row = [
|
||||
host_name,
|
||||
host.address,
|
||||
str(configured),
|
||||
str(running) if running > 0 else "[dim]0[/]",
|
||||
]
|
||||
if show_containers:
|
||||
count = container_counts.get(host_name, 0)
|
||||
row.append(str(count) if count > 0 else "[dim]0[/]")
|
||||
|
||||
table.add_row(*row)
|
||||
return table
|
||||
|
||||
|
||||
def _build_summary_table(
|
||||
cfg: Config, state: dict[str, str | list[str]], pending: list[str]
|
||||
) -> Table:
|
||||
"""Build the summary table."""
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
|
||||
table = Table(title="Summary", show_header=False)
|
||||
table.add_column("Label", style="dim")
|
||||
table.add_column("Value", style="bold")
|
||||
|
||||
table.add_row("Total hosts", str(len(cfg.hosts)))
|
||||
table.add_row("Services (configured)", str(len(cfg.services)))
|
||||
table.add_row("Services (tracked)", str(len(state)))
|
||||
table.add_row("Compose files on disk", str(len(on_disk)))
|
||||
|
||||
if pending:
|
||||
preview = ", ".join(pending[:_STATS_PREVIEW_LIMIT])
|
||||
suffix = "..." if len(pending) > _STATS_PREVIEW_LIMIT else ""
|
||||
table.add_row("Pending migrations", f"[yellow]{len(pending)}[/] ({preview}{suffix})")
|
||||
else:
|
||||
table.add_row("Pending migrations", "[green]0[/]")
|
||||
|
||||
return table
|
||||
|
||||
|
||||
# --- Command functions ---
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def logs(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
host: HostOption = None,
|
||||
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
|
||||
tail: Annotated[
|
||||
int | None,
|
||||
typer.Option("--tail", "-n", help="Number of lines (default: 20 for --all, 100 otherwise)"),
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show service logs."""
|
||||
if all_services and host is not None:
|
||||
err_console.print("[red]✗[/] Cannot use --all and --host together")
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Determine service list based on options
|
||||
if host is not None:
|
||||
if host not in cfg.hosts:
|
||||
err_console.print(f"[red]✗[/] Host '{host}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
# Include services where host is in the list of configured hosts
|
||||
svc_list = [s for s in cfg.services if host in cfg.get_hosts(s)]
|
||||
if not svc_list:
|
||||
err_console.print(f"[yellow]![/] No services configured for host '{host}'")
|
||||
return
|
||||
else:
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Default to fewer lines when showing multiple services
|
||||
many_services = all_services or host is not None or len(svc_list) > 1
|
||||
effective_tail = tail if tail is not None else (20 if many_services else 100)
|
||||
cmd = f"logs --tail {effective_tail}"
|
||||
if follow:
|
||||
cmd += " -f"
|
||||
results = run_async(run_on_services(cfg, svc_list, cmd))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def ps(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of all services."""
|
||||
cfg = load_config_or_exit(config)
|
||||
results = run_async(run_on_services(cfg, list(cfg.services.keys()), "ps"))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def stats(
|
||||
live: Annotated[
|
||||
bool,
|
||||
typer.Option("--live", "-l", help="Query Docker for live container stats"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show overview statistics for hosts and services.
|
||||
|
||||
Without --live: Shows config/state info (hosts, services, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
state = load_state(cfg)
|
||||
pending = get_services_needing_migration(cfg)
|
||||
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
services_by_host = _group_services_by_host(cfg.services, cfg.hosts, all_hosts)
|
||||
running_by_host = _group_services_by_host(state, cfg.hosts, all_hosts)
|
||||
|
||||
container_counts: dict[str, int] = {}
|
||||
if live:
|
||||
container_counts = _get_container_counts(cfg)
|
||||
|
||||
host_table = _build_host_table(
|
||||
cfg, services_by_host, running_by_host, container_counts, show_containers=live
|
||||
)
|
||||
console.print(host_table)
|
||||
|
||||
console.print()
|
||||
console.print(_build_summary_table(cfg, state, pending))
|
||||
48
src/compose_farm/cli/web.py
Normal file
48
src/compose_farm/cli/web.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""Web server command."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.console import console
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Server")
|
||||
def web(
|
||||
host: Annotated[
|
||||
str,
|
||||
typer.Option("--host", "-H", help="Host to bind to"),
|
||||
] = "0.0.0.0", # noqa: S104
|
||||
port: Annotated[
|
||||
int,
|
||||
typer.Option("--port", "-p", help="Port to listen on"),
|
||||
] = 8000,
|
||||
reload: Annotated[
|
||||
bool,
|
||||
typer.Option("--reload", "-r", help="Enable auto-reload for development"),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Start the web UI server."""
|
||||
try:
|
||||
import uvicorn # noqa: PLC0415
|
||||
except ImportError:
|
||||
console.print(
|
||||
"[red]Error:[/] Web dependencies not installed. "
|
||||
"Install with: [cyan]pip install compose-farm[web][/]"
|
||||
)
|
||||
raise typer.Exit(1) from None
|
||||
|
||||
console.print(f"[green]Starting Compose Farm Web UI[/] at http://{host}:{port}")
|
||||
console.print("[dim]Press Ctrl+C to stop[/]")
|
||||
|
||||
uvicorn.run(
|
||||
"compose_farm.web:create_app",
|
||||
factory=True,
|
||||
host=host,
|
||||
port=port,
|
||||
reload=reload,
|
||||
log_level="info",
|
||||
)
|
||||
@@ -18,10 +18,10 @@ if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
|
||||
# Port parsing constants
|
||||
SINGLE_PART = 1
|
||||
PUBLISHED_TARGET_PARTS = 2
|
||||
HOST_PUBLISHED_PARTS = 3
|
||||
MIN_VOLUME_PARTS = 2
|
||||
_SINGLE_PART = 1
|
||||
_PUBLISHED_TARGET_PARTS = 2
|
||||
_HOST_PUBLISHED_PARTS = 3
|
||||
_MIN_VOLUME_PARTS = 2
|
||||
|
||||
_VAR_PATTERN = re.compile(r"\$\{([A-Za-z_][A-Za-z0-9_]*)(?::-(.*?))?\}")
|
||||
|
||||
@@ -34,7 +34,7 @@ class PortMapping:
|
||||
published: int | None
|
||||
|
||||
|
||||
def load_env(compose_path: Path) -> dict[str, str]:
|
||||
def _load_env(compose_path: Path) -> dict[str, str]:
|
||||
"""Load environment variables for compose interpolation.
|
||||
|
||||
Reads from .env file in the same directory as compose file,
|
||||
@@ -59,7 +59,7 @@ def load_env(compose_path: Path) -> dict[str, str]:
|
||||
return env
|
||||
|
||||
|
||||
def interpolate(value: str, env: dict[str, str]) -> str:
|
||||
def _interpolate(value: str, env: dict[str, str]) -> str:
|
||||
"""Perform ${VAR} and ${VAR:-default} interpolation."""
|
||||
|
||||
def replace(match: re.Match[str]) -> str:
|
||||
@@ -73,7 +73,7 @@ def interpolate(value: str, env: dict[str, str]) -> str:
|
||||
return _VAR_PATTERN.sub(replace, value)
|
||||
|
||||
|
||||
def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PLR0912
|
||||
def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PLR0912
|
||||
"""Parse port specifications from compose file.
|
||||
|
||||
Handles string formats like "8080", "8080:80", "0.0.0.0:8080:80",
|
||||
@@ -87,18 +87,22 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
|
||||
for item in items:
|
||||
if isinstance(item, str):
|
||||
interpolated = interpolate(item, env)
|
||||
interpolated = _interpolate(item, env)
|
||||
port_spec, _, _ = interpolated.partition("/")
|
||||
parts = port_spec.split(":")
|
||||
published: int | None = None
|
||||
target: int | None = None
|
||||
|
||||
if len(parts) == SINGLE_PART and parts[0].isdigit():
|
||||
if len(parts) == _SINGLE_PART and parts[0].isdigit():
|
||||
target = int(parts[0])
|
||||
elif len(parts) == PUBLISHED_TARGET_PARTS and parts[0].isdigit() and parts[1].isdigit():
|
||||
elif (
|
||||
len(parts) == _PUBLISHED_TARGET_PARTS and parts[0].isdigit() and parts[1].isdigit()
|
||||
):
|
||||
published = int(parts[0])
|
||||
target = int(parts[1])
|
||||
elif len(parts) == HOST_PUBLISHED_PARTS and parts[-2].isdigit() and parts[-1].isdigit():
|
||||
elif (
|
||||
len(parts) == _HOST_PUBLISHED_PARTS and parts[-2].isdigit() and parts[-1].isdigit()
|
||||
):
|
||||
published = int(parts[-2])
|
||||
target = int(parts[-1])
|
||||
|
||||
@@ -107,7 +111,7 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
elif isinstance(item, dict):
|
||||
target_raw = item.get("target")
|
||||
if isinstance(target_raw, str):
|
||||
target_raw = interpolate(target_raw, env)
|
||||
target_raw = _interpolate(target_raw, env)
|
||||
if target_raw is None:
|
||||
continue
|
||||
try:
|
||||
@@ -117,7 +121,7 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
|
||||
published_raw = item.get("published")
|
||||
if isinstance(published_raw, str):
|
||||
published_raw = interpolate(published_raw, env)
|
||||
published_raw = _interpolate(published_raw, env)
|
||||
published_val: int | None
|
||||
try:
|
||||
published_val = int(str(published_raw)) if published_raw is not None else None
|
||||
@@ -144,14 +148,14 @@ def _parse_volume_item(
|
||||
) -> str | None:
|
||||
"""Parse a single volume item and return host path if it's a bind mount."""
|
||||
if isinstance(item, str):
|
||||
interpolated = interpolate(item, env)
|
||||
interpolated = _interpolate(item, env)
|
||||
parts = interpolated.split(":")
|
||||
if len(parts) >= MIN_VOLUME_PARTS:
|
||||
if len(parts) >= _MIN_VOLUME_PARTS:
|
||||
return _resolve_host_path(parts[0], compose_dir)
|
||||
elif isinstance(item, dict) and item.get("type") == "bind":
|
||||
source = item.get("source")
|
||||
if source:
|
||||
interpolated = interpolate(str(source), env)
|
||||
interpolated = _interpolate(str(source), env)
|
||||
return _resolve_host_path(interpolated, compose_dir)
|
||||
return None
|
||||
|
||||
@@ -166,7 +170,7 @@ def parse_host_volumes(config: Config, service: str) -> list[str]:
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = load_env(compose_path)
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
@@ -199,6 +203,51 @@ def parse_host_volumes(config: Config, service: str) -> list[str]:
|
||||
return unique
|
||||
|
||||
|
||||
def parse_devices(config: Config, service: str) -> list[str]:
|
||||
"""Extract host device paths from a service's compose file.
|
||||
|
||||
Returns a list of host device paths (e.g., /dev/dri, /dev/dri/renderD128).
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
return []
|
||||
|
||||
devices: list[str] = []
|
||||
for definition in raw_services.values():
|
||||
if not isinstance(definition, dict):
|
||||
continue
|
||||
|
||||
device_list = definition.get("devices")
|
||||
if not device_list or not isinstance(device_list, list):
|
||||
continue
|
||||
|
||||
for item in device_list:
|
||||
if not isinstance(item, str):
|
||||
continue
|
||||
interpolated = _interpolate(item, env)
|
||||
# Format: host_path:container_path[:options]
|
||||
parts = interpolated.split(":")
|
||||
if parts:
|
||||
host_path = parts[0]
|
||||
if host_path.startswith("/dev/"):
|
||||
devices.append(host_path)
|
||||
|
||||
# Return unique devices, preserving order
|
||||
seen: set[str] = set()
|
||||
unique: list[str] = []
|
||||
for d in devices:
|
||||
if d not in seen:
|
||||
seen.add(d)
|
||||
unique.append(d)
|
||||
return unique
|
||||
|
||||
|
||||
def parse_external_networks(config: Config, service: str) -> list[str]:
|
||||
"""Extract external network names from a service's compose file.
|
||||
|
||||
@@ -234,7 +283,7 @@ def load_compose_services(
|
||||
message = f"[{stack}] Compose file not found: {compose_path}"
|
||||
raise FileNotFoundError(message)
|
||||
|
||||
env = load_env(compose_path)
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
@@ -248,7 +297,7 @@ def normalize_labels(raw: Any, env: dict[str, str]) -> dict[str, str]:
|
||||
return {}
|
||||
if isinstance(raw, dict):
|
||||
return {
|
||||
interpolate(str(k), env): interpolate(str(v), env)
|
||||
_interpolate(str(k), env): _interpolate(str(v), env)
|
||||
for k, v in raw.items()
|
||||
if k is not None
|
||||
}
|
||||
@@ -258,8 +307,8 @@ def normalize_labels(raw: Any, env: dict[str, str]) -> dict[str, str]:
|
||||
if not isinstance(item, str) or "=" not in item:
|
||||
continue
|
||||
key_raw, value_raw = item.split("=", 1)
|
||||
key = interpolate(key_raw.strip(), env)
|
||||
value = interpolate(value_raw.strip(), env)
|
||||
key = _interpolate(key_raw.strip(), env)
|
||||
value = _interpolate(value_raw.strip(), env)
|
||||
labels[key] = value
|
||||
return labels
|
||||
return {}
|
||||
@@ -278,5 +327,5 @@ def get_ports_for_service(
|
||||
if ref_service in all_services:
|
||||
ref_def = all_services[ref_service]
|
||||
if isinstance(ref_def, dict):
|
||||
return parse_ports(ref_def.get("ports"), env)
|
||||
return parse_ports(definition.get("ports"), env)
|
||||
return _parse_ports(ref_def.get("ports"), env)
|
||||
return _parse_ports(definition.get("ports"), env)
|
||||
|
||||
@@ -8,6 +8,8 @@ from pathlib import Path
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from .paths import config_search_paths, find_config_path
|
||||
|
||||
|
||||
class Host(BaseModel):
|
||||
"""SSH host configuration."""
|
||||
@@ -22,7 +24,7 @@ class Config(BaseModel):
|
||||
|
||||
compose_dir: Path = Path("/opt/compose")
|
||||
hosts: dict[str, Host]
|
||||
services: dict[str, str] # service_name -> host_name
|
||||
services: dict[str, str | list[str]] # service_name -> host_name or list of hosts
|
||||
traefik_file: Path | None = None # Auto-regenerate traefik config after up/down
|
||||
traefik_service: str | None = None # Service name for Traefik (skip its host in file-provider)
|
||||
config_path: Path = Path() # Set by load_config()
|
||||
@@ -32,20 +34,60 @@ class Config(BaseModel):
|
||||
return self.config_path.parent / "compose-farm-state.yaml"
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_service_hosts(self) -> Config:
|
||||
"""Ensure all services reference valid hosts."""
|
||||
for service, host_name in self.services.items():
|
||||
if host_name not in self.hosts:
|
||||
msg = f"Service '{service}' references unknown host '{host_name}'"
|
||||
raise ValueError(msg)
|
||||
def validate_hosts_and_services(self) -> Config:
|
||||
"""Validate host names and service configurations."""
|
||||
# "all" is reserved keyword, cannot be used as host name
|
||||
if "all" in self.hosts:
|
||||
msg = "'all' is a reserved keyword and cannot be used as a host name"
|
||||
raise ValueError(msg)
|
||||
|
||||
for service, host_value in self.services.items():
|
||||
# Validate list configurations
|
||||
if isinstance(host_value, list):
|
||||
if not host_value:
|
||||
msg = f"Service '{service}' has empty host list"
|
||||
raise ValueError(msg)
|
||||
if len(host_value) != len(set(host_value)):
|
||||
msg = f"Service '{service}' has duplicate hosts in list"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Validate all referenced hosts exist
|
||||
host_names = self.get_hosts(service)
|
||||
for host_name in host_names:
|
||||
if host_name not in self.hosts:
|
||||
msg = f"Service '{service}' references unknown host '{host_name}'"
|
||||
raise ValueError(msg)
|
||||
return self
|
||||
|
||||
def get_host(self, service: str) -> Host:
|
||||
"""Get host config for a service."""
|
||||
def get_hosts(self, service: str) -> list[str]:
|
||||
"""Get list of host names for a service.
|
||||
|
||||
Supports:
|
||||
- Single host: "truenas-debian" -> ["truenas-debian"]
|
||||
- All hosts: "all" -> list of all configured hosts
|
||||
- Explicit list: ["host1", "host2"] -> ["host1", "host2"]
|
||||
"""
|
||||
if service not in self.services:
|
||||
msg = f"Unknown service: {service}"
|
||||
raise ValueError(msg)
|
||||
return self.hosts[self.services[service]]
|
||||
host_value = self.services[service]
|
||||
if isinstance(host_value, list):
|
||||
return host_value
|
||||
if host_value == "all":
|
||||
return list(self.hosts.keys())
|
||||
return [host_value]
|
||||
|
||||
def is_multi_host(self, service: str) -> bool:
|
||||
"""Check if a service runs on multiple hosts."""
|
||||
return len(self.get_hosts(service)) > 1
|
||||
|
||||
def get_host(self, service: str) -> Host:
|
||||
"""Get host config for a service (first host if multi-host)."""
|
||||
if service not in self.services:
|
||||
msg = f"Unknown service: {service}"
|
||||
raise ValueError(msg)
|
||||
host_names = self.get_hosts(service)
|
||||
return self.hosts[host_names[0]]
|
||||
|
||||
def get_compose_path(self, service: str) -> Path:
|
||||
"""Get compose file path for a service.
|
||||
@@ -102,26 +144,22 @@ def load_config(path: Path | None = None) -> Config:
|
||||
"""Load configuration from YAML file.
|
||||
|
||||
Search order:
|
||||
1. Explicit path if provided
|
||||
2. ./compose-farm.yaml
|
||||
3. ~/.config/compose-farm/compose-farm.yaml
|
||||
1. Explicit path if provided via --config
|
||||
2. CF_CONFIG environment variable
|
||||
3. ./compose-farm.yaml
|
||||
4. $XDG_CONFIG_HOME/compose-farm/compose-farm.yaml (defaults to ~/.config)
|
||||
"""
|
||||
search_paths = [
|
||||
Path("compose-farm.yaml"),
|
||||
Path.home() / ".config" / "compose-farm" / "compose-farm.yaml",
|
||||
]
|
||||
|
||||
if path:
|
||||
config_path = path
|
||||
else:
|
||||
config_path = None
|
||||
for p in search_paths:
|
||||
if p.exists():
|
||||
config_path = p
|
||||
break
|
||||
config_path = path or find_config_path()
|
||||
|
||||
if config_path is None or not config_path.exists():
|
||||
msg = f"Config file not found. Searched: {', '.join(str(p) for p in search_paths)}"
|
||||
msg = f"Config file not found. Searched: {', '.join(str(p) for p in config_search_paths())}"
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
if config_path.is_dir():
|
||||
msg = (
|
||||
f"Config path is a directory, not a file: {config_path}\n"
|
||||
"This often happens when Docker creates an empty directory for a missing mount."
|
||||
)
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
with config_path.open() as f:
|
||||
|
||||
6
src/compose_farm/console.py
Normal file
6
src/compose_farm/console.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Shared console instances for consistent output styling."""
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
console = Console(highlight=False)
|
||||
err_console = Console(stderr=True, highlight=False)
|
||||
89
src/compose_farm/example-config.yaml
Normal file
89
src/compose_farm/example-config.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
# Compose Farm configuration
|
||||
# Documentation: https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file configures compose-farm to manage Docker Compose services
|
||||
# across multiple hosts via SSH.
|
||||
#
|
||||
# Place this file at:
|
||||
# - ./compose-farm.yaml (current directory)
|
||||
# - ~/.config/compose-farm/compose-farm.yaml
|
||||
# - Or specify with: cf --config /path/to/config.yaml
|
||||
# - Or set CF_CONFIG environment variable
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# compose_dir: Directory containing service subdirectories with compose files
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each subdirectory should contain a compose.yaml (or docker-compose.yml).
|
||||
# This path must be the same on all hosts (NFS mount recommended).
|
||||
#
|
||||
compose_dir: /opt/compose
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# hosts: SSH connection details for each host
|
||||
# ------------------------------------------------------------------------------
|
||||
# Simple form:
|
||||
# hostname: ip-or-fqdn
|
||||
#
|
||||
# Full form:
|
||||
# hostname:
|
||||
# address: ip-or-fqdn
|
||||
# user: ssh-username # default: current user
|
||||
# port: 22 # default: 22
|
||||
#
|
||||
# Note: "all" is a reserved keyword and cannot be used as a host name.
|
||||
#
|
||||
hosts:
|
||||
# Example: simple form (uses current user, port 22)
|
||||
server1: 192.168.1.10
|
||||
|
||||
# Example: full form with explicit user
|
||||
server2:
|
||||
address: 192.168.1.20
|
||||
user: admin
|
||||
|
||||
# Example: full form with custom port
|
||||
server3:
|
||||
address: 192.168.1.30
|
||||
user: root
|
||||
port: 2222
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# services: Map service names to their target host(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each service name must match a subdirectory in compose_dir.
|
||||
#
|
||||
# Single host:
|
||||
# service-name: hostname
|
||||
#
|
||||
# Multiple hosts (explicit list):
|
||||
# service-name: [host1, host2]
|
||||
#
|
||||
# All hosts:
|
||||
# service-name: all
|
||||
#
|
||||
services:
|
||||
# Example: service runs on a single host
|
||||
nginx: server1
|
||||
postgres: server2
|
||||
|
||||
# Example: service runs on multiple specific hosts
|
||||
# prometheus: [server1, server2]
|
||||
|
||||
# Example: service runs on ALL hosts (e.g., monitoring agents)
|
||||
# node-exporter: all
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# traefik_file: (optional) Auto-generate Traefik file-provider config
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, compose-farm automatically regenerates this file after
|
||||
# up/down/restart/update commands. Traefik watches this file for changes.
|
||||
#
|
||||
# traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# traefik_service: (optional) Service name running Traefik
|
||||
# ------------------------------------------------------------------------------
|
||||
# When generating traefik_file, services on the same host as Traefik are
|
||||
# skipped (they're handled by Traefik's Docker provider directly).
|
||||
#
|
||||
# traefik_service: traefik
|
||||
@@ -9,15 +9,14 @@ from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import asyncssh
|
||||
from rich.console import Console
|
||||
from rich.markup import escape
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config, Host
|
||||
from .console import console, err_console
|
||||
|
||||
_console = Console(highlight=False)
|
||||
_err_console = Console(stderr=True, highlight=False)
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from .config import Config, Host
|
||||
|
||||
LOCAL_ADDRESSES = frozenset({"local", "localhost", "127.0.0.1", "::1"})
|
||||
_DEFAULT_SSH_PORT = 22
|
||||
@@ -53,8 +52,17 @@ class CommandResult:
|
||||
stdout: str = ""
|
||||
stderr: str = ""
|
||||
|
||||
# SSH returns 255 when connection is closed unexpectedly (e.g., Ctrl+C)
|
||||
_SSH_CONNECTION_CLOSED = 255
|
||||
|
||||
def _is_local(host: Host) -> bool:
|
||||
@property
|
||||
def interrupted(self) -> bool:
|
||||
"""Check if command was killed by SIGINT (Ctrl+C)."""
|
||||
# Negative exit codes indicate signal termination; -2 = SIGINT
|
||||
return self.exit_code < 0 or self.exit_code == self._SSH_CONNECTION_CLOSED
|
||||
|
||||
|
||||
def is_local(host: Host) -> bool:
|
||||
"""Check if host should run locally (no SSH)."""
|
||||
addr = host.address.lower()
|
||||
if addr in LOCAL_ADDRESSES:
|
||||
@@ -100,14 +108,14 @@ async def _run_local_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
console = _err_console if is_stderr else _console
|
||||
out = err_console if is_stderr else console
|
||||
while True:
|
||||
line = await reader.readline()
|
||||
if not line:
|
||||
break
|
||||
text = line.decode()
|
||||
if text.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(text)}", end="")
|
||||
out.print(f"[cyan]\\[{prefix}][/] {escape(text)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -129,7 +137,7 @@ async def _run_local_command(
|
||||
stderr=stderr_data.decode() if stderr_data else "",
|
||||
)
|
||||
except OSError as e:
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
@@ -156,6 +164,8 @@ async def _run_ssh_command(
|
||||
success=result.returncode == 0,
|
||||
)
|
||||
|
||||
import asyncssh # noqa: PLC0415 - lazy import for faster CLI startup
|
||||
|
||||
proc: asyncssh.SSHClientProcess[Any]
|
||||
try:
|
||||
async with asyncssh.connect( # noqa: SIM117 - conn needed before create_process
|
||||
@@ -173,10 +183,10 @@ async def _run_ssh_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
console = _err_console if is_stderr else _console
|
||||
out = err_console if is_stderr else console
|
||||
async for line in reader:
|
||||
if line.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(line)}", end="")
|
||||
out.print(f"[cyan]\\[{prefix}][/] {escape(line)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -198,7 +208,7 @@ async def _run_ssh_command(
|
||||
stderr=stderr_data,
|
||||
)
|
||||
except (OSError, asyncssh.Error) as e:
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
@@ -210,8 +220,17 @@ async def run_command(
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Run a command on a host (locally or via SSH)."""
|
||||
if _is_local(host):
|
||||
"""Run a command on a host (locally or via SSH).
|
||||
|
||||
Args:
|
||||
host: Host configuration
|
||||
command: Command to run
|
||||
service: Service name (used as prefix in output)
|
||||
stream: Whether to stream output (default True)
|
||||
raw: Whether to use raw mode with TTY (default False)
|
||||
|
||||
"""
|
||||
if is_local(host):
|
||||
return await _run_local_command(command, service, stream=stream, raw=raw)
|
||||
return await _run_ssh_command(host, command, service, stream=stream, raw=raw)
|
||||
|
||||
@@ -262,15 +281,13 @@ async def run_on_services(
|
||||
) -> list[CommandResult]:
|
||||
"""Run a docker compose command on multiple services in parallel.
|
||||
|
||||
For multi-host services, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-service operations.
|
||||
"""
|
||||
tasks = [
|
||||
run_compose(config, service, compose_cmd, stream=stream, raw=raw) for service in services
|
||||
]
|
||||
return await asyncio.gather(*tasks)
|
||||
return await run_sequential_on_services(config, services, [compose_cmd], stream=stream, raw=raw)
|
||||
|
||||
|
||||
async def run_sequential_commands(
|
||||
async def _run_sequential_commands(
|
||||
config: Config,
|
||||
service: str,
|
||||
commands: list[str],
|
||||
@@ -286,6 +303,40 @@ async def run_sequential_commands(
|
||||
return CommandResult(service=service, exit_code=0, success=True)
|
||||
|
||||
|
||||
async def _run_sequential_commands_multi_host(
|
||||
config: Config,
|
||||
service: str,
|
||||
commands: list[str],
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Run multiple compose commands sequentially for a multi-host service.
|
||||
|
||||
Commands are run sequentially, but each command runs on all hosts in parallel.
|
||||
"""
|
||||
host_names = config.get_hosts(service)
|
||||
compose_path = config.get_compose_path(service)
|
||||
final_results: list[CommandResult] = []
|
||||
|
||||
for cmd in commands:
|
||||
command = f"docker compose -f {compose_path} {cmd}"
|
||||
tasks = []
|
||||
for host_name in host_names:
|
||||
host = config.hosts[host_name]
|
||||
label = f"{service}@{host_name}" if len(host_names) > 1 else service
|
||||
tasks.append(run_command(host, command, label, stream=stream, raw=raw))
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
final_results = list(results)
|
||||
|
||||
# Check if any failed
|
||||
if any(not r.success for r in results):
|
||||
return final_results
|
||||
|
||||
return final_results
|
||||
|
||||
|
||||
async def run_sequential_on_services(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
@@ -296,13 +347,38 @@ async def run_sequential_on_services(
|
||||
) -> list[CommandResult]:
|
||||
"""Run sequential commands on multiple services in parallel.
|
||||
|
||||
For multi-host services, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-service operations.
|
||||
"""
|
||||
tasks = [
|
||||
run_sequential_commands(config, service, commands, stream=stream, raw=raw)
|
||||
for service in services
|
||||
]
|
||||
return await asyncio.gather(*tasks)
|
||||
# Separate multi-host and single-host services for type-safe gathering
|
||||
multi_host_tasks = []
|
||||
single_host_tasks = []
|
||||
|
||||
for service in services:
|
||||
if config.is_multi_host(service):
|
||||
multi_host_tasks.append(
|
||||
_run_sequential_commands_multi_host(
|
||||
config, service, commands, stream=stream, raw=raw
|
||||
)
|
||||
)
|
||||
else:
|
||||
single_host_tasks.append(
|
||||
_run_sequential_commands(config, service, commands, stream=stream, raw=raw)
|
||||
)
|
||||
|
||||
# Gather results separately to maintain type safety
|
||||
flat_results: list[CommandResult] = []
|
||||
|
||||
if multi_host_tasks:
|
||||
multi_results = await asyncio.gather(*multi_host_tasks)
|
||||
for result_list in multi_results:
|
||||
flat_results.extend(result_list)
|
||||
|
||||
if single_host_tasks:
|
||||
single_results = await asyncio.gather(*single_host_tasks)
|
||||
flat_results.extend(single_results)
|
||||
|
||||
return flat_results
|
||||
|
||||
|
||||
async def check_service_running(
|
||||
@@ -322,32 +398,27 @@ async def check_service_running(
|
||||
return result.success and bool(result.stdout.strip())
|
||||
|
||||
|
||||
async def check_paths_exist(
|
||||
async def _batch_check_existence(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
paths: list[str],
|
||||
items: list[str],
|
||||
cmd_template: Callable[[str], str],
|
||||
context: str,
|
||||
) -> dict[str, bool]:
|
||||
"""Check if multiple paths exist on a specific host.
|
||||
|
||||
Returns a dict mapping path -> exists.
|
||||
"""
|
||||
if not paths:
|
||||
"""Check existence of multiple items on a host using a command template."""
|
||||
if not items:
|
||||
return {}
|
||||
|
||||
host = config.hosts[host_name]
|
||||
|
||||
# Build a command that checks all paths efficiently
|
||||
# Using a subshell to check each path and report Y/N
|
||||
checks = []
|
||||
for p in paths:
|
||||
# Escape single quotes in path
|
||||
escaped = p.replace("'", "'\\''")
|
||||
checks.append(f"test -e '{escaped}' && echo 'Y:{escaped}' || echo 'N:{escaped}'")
|
||||
for item in items:
|
||||
escaped = item.replace("'", "'\\''")
|
||||
checks.append(cmd_template(escaped))
|
||||
|
||||
command = "; ".join(checks)
|
||||
result = await run_command(host, command, "mount-check", stream=False)
|
||||
result = await run_command(host, command, context, stream=False)
|
||||
|
||||
exists: dict[str, bool] = dict.fromkeys(paths, False)
|
||||
exists: dict[str, bool] = dict.fromkeys(items, False)
|
||||
for raw_line in result.stdout.splitlines():
|
||||
line = raw_line.strip()
|
||||
if line.startswith("Y:"):
|
||||
@@ -358,6 +429,27 @@ async def check_paths_exist(
|
||||
return exists
|
||||
|
||||
|
||||
async def check_paths_exist(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
paths: list[str],
|
||||
) -> dict[str, bool]:
|
||||
"""Check if multiple paths exist on a specific host.
|
||||
|
||||
Returns a dict mapping path -> exists.
|
||||
Handles permission denied as "exists" (path is there, just not accessible).
|
||||
"""
|
||||
# Only report missing if stat says "No such file", otherwise assume exists
|
||||
# (handles permission denied correctly - path exists, just not accessible)
|
||||
return await _batch_check_existence(
|
||||
config,
|
||||
host_name,
|
||||
paths,
|
||||
lambda esc: f"stat '{esc}' 2>&1 | grep -q 'No such file' && echo 'N:{esc}' || echo 'Y:{esc}'",
|
||||
"mount-check",
|
||||
)
|
||||
|
||||
|
||||
async def check_networks_exist(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
@@ -367,29 +459,12 @@ async def check_networks_exist(
|
||||
|
||||
Returns a dict mapping network_name -> exists.
|
||||
"""
|
||||
if not networks:
|
||||
return {}
|
||||
|
||||
host = config.hosts[host_name]
|
||||
|
||||
# Check each network via docker network inspect
|
||||
checks = []
|
||||
for net in networks:
|
||||
escaped = net.replace("'", "'\\''")
|
||||
checks.append(
|
||||
f"docker network inspect '{escaped}' >/dev/null 2>&1 "
|
||||
f"&& echo 'Y:{escaped}' || echo 'N:{escaped}'"
|
||||
)
|
||||
|
||||
command = "; ".join(checks)
|
||||
result = await run_command(host, command, "network-check", stream=False)
|
||||
|
||||
exists: dict[str, bool] = dict.fromkeys(networks, False)
|
||||
for raw_line in result.stdout.splitlines():
|
||||
line = raw_line.strip()
|
||||
if line.startswith("Y:"):
|
||||
exists[line[2:]] = True
|
||||
elif line.startswith("N:"):
|
||||
exists[line[2:]] = False
|
||||
|
||||
return exists
|
||||
return await _batch_check_existence(
|
||||
config,
|
||||
host_name,
|
||||
networks,
|
||||
lambda esc: (
|
||||
f"docker network inspect '{esc}' >/dev/null 2>&1 && echo 'Y:{esc}' || echo 'N:{esc}'"
|
||||
),
|
||||
"network-check",
|
||||
)
|
||||
|
||||
@@ -6,20 +6,21 @@ import json
|
||||
import tomllib
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .executor import run_compose
|
||||
from .paths import xdg_config_home
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Awaitable, Callable, Iterable
|
||||
from pathlib import Path
|
||||
|
||||
from .config import Config
|
||||
from .executor import CommandResult
|
||||
|
||||
|
||||
DEFAULT_LOG_PATH = Path.home() / ".config" / "compose-farm" / "dockerfarm-log.toml"
|
||||
DIGEST_HEX_LENGTH = 64
|
||||
DEFAULT_LOG_PATH = xdg_config_home() / "compose-farm" / "dockerfarm-log.toml"
|
||||
_DIGEST_HEX_LENGTH = 64
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@@ -46,7 +47,8 @@ class SnapshotEntry:
|
||||
}
|
||||
|
||||
|
||||
def _isoformat(dt: datetime) -> str:
|
||||
def isoformat(dt: datetime) -> str:
|
||||
"""Format a datetime as an ISO 8601 string with Z suffix for UTC."""
|
||||
return dt.astimezone(UTC).replace(microsecond=0).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
@@ -95,13 +97,13 @@ def _extract_image_fields(record: dict[str, Any]) -> tuple[str, str]:
|
||||
or ""
|
||||
)
|
||||
|
||||
if digest and not digest.startswith("sha256:") and len(digest) == DIGEST_HEX_LENGTH:
|
||||
if digest and not digest.startswith("sha256:") and len(digest) == _DIGEST_HEX_LENGTH:
|
||||
digest = f"sha256:{digest}"
|
||||
|
||||
return image, digest
|
||||
|
||||
|
||||
async def _collect_service_entries(
|
||||
async def collect_service_entries(
|
||||
config: Config,
|
||||
service: str,
|
||||
*,
|
||||
@@ -116,7 +118,8 @@ async def _collect_service_entries(
|
||||
raise RuntimeError(error)
|
||||
|
||||
records = _parse_images_output(result.stdout)
|
||||
host_name = config.services[service]
|
||||
# Use first host for snapshots (multi-host services use same images on all hosts)
|
||||
host_name = config.get_hosts(service)[0]
|
||||
compose_path = config.get_compose_path(service)
|
||||
|
||||
entries: list[SnapshotEntry] = []
|
||||
@@ -137,19 +140,21 @@ async def _collect_service_entries(
|
||||
return entries
|
||||
|
||||
|
||||
def _load_existing_entries(log_path: Path) -> list[dict[str, str]]:
|
||||
def load_existing_entries(log_path: Path) -> list[dict[str, str]]:
|
||||
"""Load existing snapshot entries from a TOML log file."""
|
||||
if not log_path.exists():
|
||||
return []
|
||||
data = tomllib.loads(log_path.read_text())
|
||||
return list(data.get("entries", []))
|
||||
|
||||
|
||||
def _merge_entries(
|
||||
def merge_entries(
|
||||
existing: Iterable[dict[str, str]],
|
||||
new_entries: Iterable[SnapshotEntry],
|
||||
*,
|
||||
now_iso: str,
|
||||
) -> list[dict[str, str]]:
|
||||
"""Merge new snapshot entries with existing ones, preserving first_seen timestamps."""
|
||||
merged: dict[tuple[str, str, str], dict[str, str]] = {
|
||||
(e["service"], e["host"], e["digest"]): dict(e) for e in existing
|
||||
}
|
||||
@@ -162,7 +167,8 @@ def _merge_entries(
|
||||
return list(merged.values())
|
||||
|
||||
|
||||
def _write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str, str]]) -> None:
|
||||
def write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str, str]]) -> None:
|
||||
"""Write snapshot entries to a TOML log file."""
|
||||
lines: list[str] = ["[meta]"]
|
||||
lines.extend(f'{key} = "{_escape(meta[key])}"' for key in sorted(meta))
|
||||
|
||||
@@ -187,45 +193,3 @@ def _write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str,
|
||||
content = "\n".join(lines).rstrip() + "\n"
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
log_path.write_text(content)
|
||||
|
||||
|
||||
async def snapshot_services(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
*,
|
||||
log_path: Path | None = None,
|
||||
now: datetime | None = None,
|
||||
run_compose_fn: Callable[..., Awaitable[CommandResult]] = run_compose,
|
||||
) -> Path:
|
||||
"""Capture current image digests for services and write them to a TOML log.
|
||||
|
||||
- Preserves the earliest `first_seen` per (service, host, digest)
|
||||
- Updates `last_seen` for digests observed in this snapshot
|
||||
- Leaves untouched digests that were not part of this run (history is kept)
|
||||
"""
|
||||
if not services:
|
||||
error = "No services specified for snapshot"
|
||||
raise RuntimeError(error)
|
||||
|
||||
log_path = log_path or DEFAULT_LOG_PATH
|
||||
now_dt = now or datetime.now(UTC)
|
||||
now_iso = _isoformat(now_dt)
|
||||
|
||||
existing_entries = _load_existing_entries(log_path)
|
||||
|
||||
snapshot_entries: list[SnapshotEntry] = []
|
||||
for service in services:
|
||||
snapshot_entries.extend(
|
||||
await _collect_service_entries(
|
||||
config, service, now=now_dt, run_compose_fn=run_compose_fn
|
||||
)
|
||||
)
|
||||
|
||||
if not snapshot_entries:
|
||||
error = "No image digests were captured"
|
||||
raise RuntimeError(error)
|
||||
|
||||
merged_entries = _merge_entries(existing_entries, snapshot_entries, now_iso=now_iso)
|
||||
meta = {"generated_at": now_iso, "compose_dir": str(config.compose_dir)}
|
||||
_write_toml(log_path, meta=meta, entries=merged_entries)
|
||||
return log_path
|
||||
|
||||
@@ -6,26 +6,67 @@ CLI commands are thin wrappers around these functions.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING, NamedTuple
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from .compose import parse_external_networks, parse_host_volumes
|
||||
from .compose import parse_devices, parse_external_networks, parse_host_volumes
|
||||
from .console import console, err_console
|
||||
from .executor import (
|
||||
CommandResult,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
check_service_running,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_compose_on_host,
|
||||
)
|
||||
from .state import get_service_host, set_service_host
|
||||
from .state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
remove_service,
|
||||
set_multi_host_service,
|
||||
set_service_host,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
|
||||
console = Console(highlight=False)
|
||||
err_console = Console(stderr=True, highlight=False)
|
||||
|
||||
class OperationInterruptedError(Exception):
|
||||
"""Raised when a command is interrupted by Ctrl+C."""
|
||||
|
||||
|
||||
class PreflightResult(NamedTuple):
|
||||
"""Result of pre-flight checks for a service on a host."""
|
||||
|
||||
missing_paths: list[str]
|
||||
missing_networks: list[str]
|
||||
missing_devices: list[str]
|
||||
|
||||
@property
|
||||
def ok(self) -> bool:
|
||||
"""Return True if all checks passed."""
|
||||
return not (self.missing_paths or self.missing_networks or self.missing_devices)
|
||||
|
||||
|
||||
async def _run_compose_step(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
command: str,
|
||||
*,
|
||||
raw: bool,
|
||||
host: str | None = None,
|
||||
) -> CommandResult:
|
||||
"""Run a compose command, handle raw output newline, and check for interrupts."""
|
||||
if host:
|
||||
result = await run_compose_on_host(cfg, service, host, command, raw=raw)
|
||||
else:
|
||||
result = await run_compose(cfg, service, command, raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if result.interrupted:
|
||||
raise OperationInterruptedError
|
||||
return result
|
||||
|
||||
|
||||
def get_service_paths(cfg: Config, service: str) -> list[str]:
|
||||
@@ -35,58 +76,247 @@ def get_service_paths(cfg: Config, service: str) -> list[str]:
|
||||
return paths
|
||||
|
||||
|
||||
async def check_mounts_for_migration(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
) -> list[str]:
|
||||
"""Check if mount paths exist on target host. Returns list of missing paths."""
|
||||
paths = get_service_paths(cfg, service)
|
||||
exists = await check_paths_exist(cfg, target_host, paths)
|
||||
return [p for p, found in exists.items() if not found]
|
||||
async def discover_service_host(cfg: Config, service: str) -> tuple[str, str | list[str] | None]:
|
||||
"""Discover where a service is running.
|
||||
|
||||
For multi-host services, checks all assigned hosts in parallel.
|
||||
For single-host, checks assigned host first, then others.
|
||||
|
||||
async def check_networks_for_migration(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
) -> list[str]:
|
||||
"""Check if Docker networks exist on target host. Returns list of missing networks."""
|
||||
networks = parse_external_networks(cfg, service)
|
||||
if not networks:
|
||||
return []
|
||||
exists = await check_networks_exist(cfg, target_host, networks)
|
||||
return [n for n, found in exists.items() if not found]
|
||||
|
||||
|
||||
async def preflight_check(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
) -> tuple[list[str], list[str]]:
|
||||
"""Run pre-flight checks for a service on target host.
|
||||
|
||||
Returns (missing_paths, missing_networks).
|
||||
Returns (service_name, host_or_hosts_or_none).
|
||||
"""
|
||||
missing_paths = await check_mounts_for_migration(cfg, service, target_host)
|
||||
missing_networks = await check_networks_for_migration(cfg, service, target_host)
|
||||
return missing_paths, missing_networks
|
||||
assigned_hosts = cfg.get_hosts(service)
|
||||
|
||||
if cfg.is_multi_host(service):
|
||||
# Check all assigned hosts in parallel
|
||||
checks = await asyncio.gather(
|
||||
*[check_service_running(cfg, service, h) for h in assigned_hosts]
|
||||
)
|
||||
running = [h for h, is_running in zip(assigned_hosts, checks, strict=True) if is_running]
|
||||
return service, running if running else None
|
||||
|
||||
# Single-host: check assigned host first, then others
|
||||
if await check_service_running(cfg, service, assigned_hosts[0]):
|
||||
return service, assigned_hosts[0]
|
||||
for host in cfg.hosts:
|
||||
if host != assigned_hosts[0] and await check_service_running(cfg, service, host):
|
||||
return service, host
|
||||
return service, None
|
||||
|
||||
|
||||
def report_preflight_failures(
|
||||
async def check_service_requirements(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
host_name: str,
|
||||
) -> PreflightResult:
|
||||
"""Check if a service can run on a specific host.
|
||||
|
||||
Verifies that all required paths (volumes), networks, and devices exist.
|
||||
"""
|
||||
# Check mount paths
|
||||
paths = get_service_paths(cfg, service)
|
||||
path_exists = await check_paths_exist(cfg, host_name, paths)
|
||||
missing_paths = [p for p, found in path_exists.items() if not found]
|
||||
|
||||
# Check external networks
|
||||
networks = parse_external_networks(cfg, service)
|
||||
missing_networks: list[str] = []
|
||||
if networks:
|
||||
net_exists = await check_networks_exist(cfg, host_name, networks)
|
||||
missing_networks = [n for n, found in net_exists.items() if not found]
|
||||
|
||||
# Check devices
|
||||
devices = parse_devices(cfg, service)
|
||||
missing_devices: list[str] = []
|
||||
if devices:
|
||||
dev_exists = await check_paths_exist(cfg, host_name, devices)
|
||||
missing_devices = [d for d, found in dev_exists.items() if not found]
|
||||
|
||||
return PreflightResult(missing_paths, missing_networks, missing_devices)
|
||||
|
||||
|
||||
async def _cleanup_and_rollback(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
missing_paths: list[str],
|
||||
missing_networks: list[str],
|
||||
current_host: str,
|
||||
prefix: str,
|
||||
*,
|
||||
was_running: bool,
|
||||
raw: bool = False,
|
||||
) -> None:
|
||||
"""Clean up failed start and attempt rollback to old host if it was running."""
|
||||
err_console.print(
|
||||
f"{prefix} [yellow]![/] Cleaning up failed start on [magenta]{target_host}[/]"
|
||||
)
|
||||
await run_compose(cfg, service, "down", raw=raw)
|
||||
|
||||
if not was_running:
|
||||
err_console.print(
|
||||
f"{prefix} [dim]Service was not running on [magenta]{current_host}[/], skipping rollback[/]"
|
||||
)
|
||||
return
|
||||
|
||||
err_console.print(f"{prefix} [yellow]![/] Rolling back to [magenta]{current_host}[/]...")
|
||||
rollback_result = await run_compose_on_host(cfg, service, current_host, "up -d", raw=raw)
|
||||
if rollback_result.success:
|
||||
console.print(f"{prefix} [green]✓[/] Rollback succeeded on [magenta]{current_host}[/]")
|
||||
else:
|
||||
err_console.print(f"{prefix} [red]✗[/] Rollback failed - service is down")
|
||||
|
||||
|
||||
def _report_preflight_failures(
|
||||
service: str,
|
||||
target_host: str,
|
||||
preflight: PreflightResult,
|
||||
) -> None:
|
||||
"""Report pre-flight check failures."""
|
||||
err_console.print(
|
||||
f"[cyan]\\[{service}][/] [red]✗[/] Cannot start on [magenta]{target_host}[/]:"
|
||||
)
|
||||
for path in missing_paths:
|
||||
for path in preflight.missing_paths:
|
||||
err_console.print(f" [red]✗[/] missing path: {path}")
|
||||
for net in missing_networks:
|
||||
for net in preflight.missing_networks:
|
||||
err_console.print(f" [red]✗[/] missing network: {net}")
|
||||
if preflight.missing_networks:
|
||||
err_console.print(f" [dim]hint: cf init-network {target_host}[/]")
|
||||
for dev in preflight.missing_devices:
|
||||
err_console.print(f" [red]✗[/] missing device: {dev}")
|
||||
|
||||
|
||||
async def _up_multi_host_service(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start a multi-host service on all configured hosts."""
|
||||
host_names = cfg.get_hosts(service)
|
||||
results: list[CommandResult] = []
|
||||
compose_path = cfg.get_compose_path(service)
|
||||
command = f"docker compose -f {compose_path} up -d"
|
||||
|
||||
# Pre-flight checks on all hosts
|
||||
for host_name in host_names:
|
||||
preflight = await check_service_requirements(cfg, service, host_name)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(service, host_name, preflight)
|
||||
results.append(
|
||||
CommandResult(service=f"{service}@{host_name}", exit_code=1, success=False)
|
||||
)
|
||||
return results
|
||||
|
||||
# Start on all hosts
|
||||
hosts_str = ", ".join(f"[magenta]{h}[/]" for h in host_names)
|
||||
console.print(f"{prefix} Starting on {hosts_str}...")
|
||||
|
||||
succeeded_hosts: list[str] = []
|
||||
for host_name in host_names:
|
||||
host = cfg.hosts[host_name]
|
||||
label = f"{service}@{host_name}"
|
||||
result = await run_command(host, command, label, stream=not raw, raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
results.append(result)
|
||||
if result.success:
|
||||
succeeded_hosts.append(host_name)
|
||||
|
||||
# Update state with hosts that succeeded (partial success is tracked)
|
||||
if succeeded_hosts:
|
||||
set_multi_host_service(cfg, service, succeeded_hosts)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def _migrate_service(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
current_host: str,
|
||||
target_host: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> CommandResult | None:
|
||||
"""Migrate a service from current_host to target_host.
|
||||
|
||||
Pre-pulls/builds images on target, then stops service on current host.
|
||||
Returns failure result if migration prep fails, None on success.
|
||||
"""
|
||||
console.print(
|
||||
f"{prefix} Migrating from [magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
)
|
||||
|
||||
# Prepare images on target host before stopping old service to minimize downtime.
|
||||
# Pull handles image-based services; build handles Dockerfile-based services.
|
||||
# --ignore-buildable makes pull skip images that have build: defined.
|
||||
for cmd, label in [("pull --ignore-buildable", "Pull"), ("build", "Build")]:
|
||||
result = await _run_compose_step(cfg, service, cmd, raw=raw)
|
||||
if not result.success:
|
||||
err_console.print(
|
||||
f"{prefix} [red]✗[/] {label} failed on [magenta]{target_host}[/], "
|
||||
"leaving service on current host"
|
||||
)
|
||||
return result
|
||||
|
||||
# Stop on current host
|
||||
down_result = await _run_compose_step(cfg, service, "down", raw=raw, host=current_host)
|
||||
return down_result if not down_result.success else None
|
||||
|
||||
|
||||
async def _up_single_service(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host service with migration support."""
|
||||
target_host = cfg.get_hosts(service)[0]
|
||||
current_host = get_service_host(cfg, service)
|
||||
|
||||
# Pre-flight check: verify paths, networks, and devices exist on target
|
||||
preflight = await check_service_requirements(cfg, service, target_host)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(service, target_host, preflight)
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
# If service is deployed elsewhere, migrate it
|
||||
did_migration = False
|
||||
was_running = False
|
||||
if current_host and current_host != target_host:
|
||||
if current_host in cfg.hosts:
|
||||
was_running = await check_service_running(cfg, service, current_host)
|
||||
failure = await _migrate_service(
|
||||
cfg, service, current_host, target_host, prefix, raw=raw
|
||||
)
|
||||
if failure:
|
||||
return failure
|
||||
did_migration = True
|
||||
else:
|
||||
err_console.print(
|
||||
f"{prefix} [yellow]![/] was on "
|
||||
f"[magenta]{current_host}[/] (not in config), skipping down"
|
||||
)
|
||||
|
||||
# Start on target host
|
||||
console.print(f"{prefix} Starting on [magenta]{target_host}[/]...")
|
||||
up_result = await _run_compose_step(cfg, service, "up -d", raw=raw)
|
||||
|
||||
# Update state on success, or rollback on failure
|
||||
if up_result.success:
|
||||
set_service_host(cfg, service, target_host)
|
||||
elif did_migration and current_host:
|
||||
await _cleanup_and_rollback(
|
||||
cfg,
|
||||
service,
|
||||
target_host,
|
||||
current_host,
|
||||
prefix,
|
||||
was_running=was_running,
|
||||
raw=raw,
|
||||
)
|
||||
|
||||
return up_result
|
||||
|
||||
|
||||
async def up_services(
|
||||
@@ -99,136 +329,113 @@ async def up_services(
|
||||
results: list[CommandResult] = []
|
||||
total = len(services)
|
||||
|
||||
for idx, service in enumerate(services, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{service}][/]"
|
||||
target_host = cfg.services[service]
|
||||
current_host = get_service_host(cfg, service)
|
||||
try:
|
||||
for idx, service in enumerate(services, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{service}][/]"
|
||||
|
||||
# Pre-flight check: verify paths and networks exist on target
|
||||
missing_paths, missing_networks = await preflight_check(cfg, service, target_host)
|
||||
if missing_paths or missing_networks:
|
||||
report_preflight_failures(service, target_host, missing_paths, missing_networks)
|
||||
results.append(CommandResult(service=service, exit_code=1, success=False))
|
||||
continue
|
||||
|
||||
# If service is deployed elsewhere, migrate it
|
||||
if current_host and current_host != target_host:
|
||||
if current_host in cfg.hosts:
|
||||
console.print(
|
||||
f"{prefix} Migrating from "
|
||||
f"[magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
)
|
||||
down_result = await run_compose_on_host(cfg, service, current_host, "down", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if not down_result.success:
|
||||
results.append(down_result)
|
||||
continue
|
||||
if cfg.is_multi_host(service):
|
||||
results.extend(await _up_multi_host_service(cfg, service, prefix, raw=raw))
|
||||
else:
|
||||
err_console.print(
|
||||
f"{prefix} [yellow]![/] was on "
|
||||
f"[magenta]{current_host}[/] (not in config), skipping down"
|
||||
)
|
||||
|
||||
# Start on target host
|
||||
console.print(f"{prefix} Starting on [magenta]{target_host}[/]...")
|
||||
up_result = await run_compose(cfg, service, "up -d", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output (progress bars end with \r)
|
||||
results.append(up_result)
|
||||
|
||||
# Update state on success
|
||||
if up_result.success:
|
||||
set_service_host(cfg, service, target_host)
|
||||
results.append(await _up_single_service(cfg, service, prefix, raw=raw))
|
||||
except OperationInterruptedError:
|
||||
raise KeyboardInterrupt from None
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def discover_running_services(cfg: Config) -> dict[str, str]:
|
||||
"""Discover which services are running on which hosts.
|
||||
|
||||
Returns a dict mapping service names to host names for running services.
|
||||
"""
|
||||
discovered: dict[str, str] = {}
|
||||
|
||||
for service, assigned_host in cfg.services.items():
|
||||
# Check assigned host first (most common case)
|
||||
if await check_service_running(cfg, service, assigned_host):
|
||||
discovered[service] = assigned_host
|
||||
continue
|
||||
|
||||
# Check other hosts in case service was migrated but state is stale
|
||||
for host_name in cfg.hosts:
|
||||
if host_name == assigned_host:
|
||||
continue
|
||||
if await check_service_running(cfg, service, host_name):
|
||||
discovered[service] = host_name
|
||||
break
|
||||
|
||||
return discovered
|
||||
|
||||
|
||||
async def check_host_compatibility(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
) -> dict[str, tuple[int, int, list[str]]]:
|
||||
"""Check which hosts can run a service based on mount paths.
|
||||
"""Check which hosts can run a service based on paths, networks, and devices.
|
||||
|
||||
Returns dict of host_name -> (found_count, total_count, missing_paths).
|
||||
Returns dict of host_name -> (found_count, total_count, missing_items).
|
||||
"""
|
||||
# Get total requirements count
|
||||
paths = get_service_paths(cfg, service)
|
||||
networks = parse_external_networks(cfg, service)
|
||||
devices = parse_devices(cfg, service)
|
||||
total = len(paths) + len(networks) + len(devices)
|
||||
|
||||
results: dict[str, tuple[int, int, list[str]]] = {}
|
||||
|
||||
for host_name in cfg.hosts:
|
||||
exists = await check_paths_exist(cfg, host_name, paths)
|
||||
found = sum(1 for v in exists.values() if v)
|
||||
missing = [p for p, v in exists.items() if not v]
|
||||
results[host_name] = (found, len(paths), missing)
|
||||
preflight = await check_service_requirements(cfg, service, host_name)
|
||||
all_missing = (
|
||||
preflight.missing_paths + preflight.missing_networks + preflight.missing_devices
|
||||
)
|
||||
found = total - len(all_missing)
|
||||
results[host_name] = (found, total, all_missing)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def check_mounts_on_configured_hosts(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Check mount paths exist on configured hosts.
|
||||
async def stop_orphaned_services(cfg: Config) -> list[CommandResult]:
|
||||
"""Stop orphaned services (in state but not in config).
|
||||
|
||||
Returns list of (service, host, missing_path) tuples.
|
||||
Runs docker compose down on each service on its tracked host(s).
|
||||
Only removes from state on successful stop.
|
||||
|
||||
Returns list of CommandResults for each service@host.
|
||||
"""
|
||||
missing: list[tuple[str, str, str]] = []
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
if not orphaned:
|
||||
return []
|
||||
|
||||
for service in services:
|
||||
host_name = cfg.services[service]
|
||||
paths = get_service_paths(cfg, service)
|
||||
exists = await check_paths_exist(cfg, host_name, paths)
|
||||
results: list[CommandResult] = []
|
||||
tasks: list[tuple[str, str, asyncio.Task[CommandResult]]] = []
|
||||
|
||||
for path, found in exists.items():
|
||||
if not found:
|
||||
missing.append((service, host_name, path))
|
||||
# Build list of (service, host, task) for all orphaned services
|
||||
for service, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
for host in host_list:
|
||||
# Skip hosts no longer in config
|
||||
if host not in cfg.hosts:
|
||||
console.print(
|
||||
f" [yellow]![/] {service}@{host}: host no longer in config, skipping"
|
||||
)
|
||||
results.append(
|
||||
CommandResult(
|
||||
service=f"{service}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr="host no longer in config",
|
||||
)
|
||||
)
|
||||
continue
|
||||
coro = run_compose_on_host(cfg, service, host, "down")
|
||||
tasks.append((service, host, asyncio.create_task(coro)))
|
||||
|
||||
return missing
|
||||
# Run all down commands in parallel
|
||||
if tasks:
|
||||
for service, host, task in tasks:
|
||||
try:
|
||||
result = await task
|
||||
results.append(result)
|
||||
if result.success:
|
||||
console.print(f" [green]✓[/] {service}@{host}: stopped")
|
||||
else:
|
||||
console.print(f" [red]✗[/] {service}@{host}: {result.stderr or 'failed'}")
|
||||
except Exception as e:
|
||||
console.print(f" [red]✗[/] {service}@{host}: {e}")
|
||||
results.append(
|
||||
CommandResult(
|
||||
service=f"{service}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr=str(e),
|
||||
)
|
||||
)
|
||||
|
||||
# Remove from state only for services where ALL hosts succeeded
|
||||
for service, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
all_succeeded = all(
|
||||
r.success
|
||||
for r in results
|
||||
if r.service.startswith(f"{service}@") or r.service == service
|
||||
)
|
||||
if all_succeeded:
|
||||
remove_service(cfg, service)
|
||||
|
||||
async def check_networks_on_configured_hosts(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Check Docker networks exist on configured hosts.
|
||||
|
||||
Returns list of (service, host, missing_network) tuples.
|
||||
"""
|
||||
missing: list[tuple[str, str, str]] = []
|
||||
|
||||
for service in services:
|
||||
host_name = cfg.services[service]
|
||||
networks = parse_external_networks(cfg, service)
|
||||
if not networks:
|
||||
continue
|
||||
exists = await check_networks_exist(cfg, host_name, networks)
|
||||
|
||||
for net, found in exists.items():
|
||||
if not found:
|
||||
missing.append((service, host_name, net))
|
||||
|
||||
return missing
|
||||
return results
|
||||
|
||||
33
src/compose_farm/paths.py
Normal file
33
src/compose_farm/paths.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""Path utilities - lightweight module with no heavy dependencies."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def xdg_config_home() -> Path:
|
||||
"""Get XDG config directory, respecting XDG_CONFIG_HOME env var."""
|
||||
return Path(os.environ.get("XDG_CONFIG_HOME", Path.home() / ".config"))
|
||||
|
||||
|
||||
def default_config_path() -> Path:
|
||||
"""Get the default user config path."""
|
||||
return xdg_config_home() / "compose-farm" / "compose-farm.yaml"
|
||||
|
||||
|
||||
def config_search_paths() -> list[Path]:
|
||||
"""Get search paths for config files."""
|
||||
return [Path("compose-farm.yaml"), default_config_path()]
|
||||
|
||||
|
||||
def find_config_path() -> Path | None:
|
||||
"""Find the config file path, checking CF_CONFIG env var and search paths."""
|
||||
if env_path := os.environ.get("CF_CONFIG"):
|
||||
p = Path(env_path)
|
||||
if p.exists() and p.is_file():
|
||||
return p
|
||||
for p in config_search_paths():
|
||||
if p.exists() and p.is_file():
|
||||
return p
|
||||
return None
|
||||
@@ -2,18 +2,22 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
|
||||
from .config import Config
|
||||
|
||||
|
||||
def load_state(config: Config) -> dict[str, str]:
|
||||
def load_state(config: Config) -> dict[str, str | list[str]]:
|
||||
"""Load the current deployment state.
|
||||
|
||||
Returns a dict mapping service names to host names.
|
||||
Returns a dict mapping service names to host name(s).
|
||||
Multi-host services store a list of hosts.
|
||||
"""
|
||||
state_path = config.get_state_path()
|
||||
if not state_path.exists():
|
||||
@@ -22,43 +26,141 @@ def load_state(config: Config) -> dict[str, str]:
|
||||
with state_path.open() as f:
|
||||
data: dict[str, Any] = yaml.safe_load(f) or {}
|
||||
|
||||
deployed: dict[str, str] = data.get("deployed", {})
|
||||
deployed: dict[str, str | list[str]] = data.get("deployed", {})
|
||||
return deployed
|
||||
|
||||
|
||||
def save_state(config: Config, deployed: dict[str, str]) -> None:
|
||||
def _sorted_dict(d: dict[str, str | list[str]]) -> dict[str, str | list[str]]:
|
||||
"""Return a dictionary sorted by keys."""
|
||||
return dict(sorted(d.items(), key=lambda item: item[0]))
|
||||
|
||||
|
||||
def save_state(config: Config, deployed: dict[str, str | list[str]]) -> None:
|
||||
"""Save the deployment state."""
|
||||
state_path = config.get_state_path()
|
||||
with state_path.open("w") as f:
|
||||
yaml.safe_dump({"deployed": deployed}, f, sort_keys=False)
|
||||
yaml.safe_dump({"deployed": _sorted_dict(deployed)}, f, sort_keys=False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _modify_state(config: Config) -> Generator[dict[str, str | list[str]], None, None]:
|
||||
"""Context manager to load, modify, and save state."""
|
||||
state = load_state(config)
|
||||
yield state
|
||||
save_state(config, state)
|
||||
|
||||
|
||||
def get_service_host(config: Config, service: str) -> str | None:
|
||||
"""Get the host where a service is currently deployed."""
|
||||
"""Get the host where a service is currently deployed.
|
||||
|
||||
For multi-host services, returns the first host or None.
|
||||
"""
|
||||
state = load_state(config)
|
||||
return state.get(service)
|
||||
value = state.get(service)
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, list):
|
||||
return value[0] if value else None
|
||||
return value
|
||||
|
||||
|
||||
def set_service_host(config: Config, service: str, host: str) -> None:
|
||||
"""Record that a service is deployed on a host."""
|
||||
state = load_state(config)
|
||||
state[service] = host
|
||||
save_state(config, state)
|
||||
with _modify_state(config) as state:
|
||||
state[service] = host
|
||||
|
||||
|
||||
def set_multi_host_service(config: Config, service: str, hosts: list[str]) -> None:
|
||||
"""Record that a multi-host service is deployed on multiple hosts."""
|
||||
with _modify_state(config) as state:
|
||||
state[service] = hosts
|
||||
|
||||
|
||||
def remove_service(config: Config, service: str) -> None:
|
||||
"""Remove a service from the state (after down)."""
|
||||
state = load_state(config)
|
||||
state.pop(service, None)
|
||||
save_state(config, state)
|
||||
with _modify_state(config) as state:
|
||||
state.pop(service, None)
|
||||
|
||||
|
||||
def add_service_to_host(config: Config, service: str, host: str) -> None:
|
||||
"""Add a specific host to a service's state.
|
||||
|
||||
For multi-host services, adds the host to the list if not present.
|
||||
For single-host services, sets the host.
|
||||
"""
|
||||
with _modify_state(config) as state:
|
||||
current = state.get(service)
|
||||
|
||||
if config.is_multi_host(service):
|
||||
# Multi-host: add to list if not present
|
||||
if isinstance(current, list):
|
||||
if host not in current:
|
||||
state[service] = [*current, host]
|
||||
else:
|
||||
state[service] = [host]
|
||||
else:
|
||||
# Single-host: just set it
|
||||
state[service] = host
|
||||
|
||||
|
||||
def remove_service_from_host(config: Config, service: str, host: str) -> None:
|
||||
"""Remove a specific host from a service's state.
|
||||
|
||||
For multi-host services, removes just that host from the list.
|
||||
For single-host services, removes the service entirely if host matches.
|
||||
"""
|
||||
with _modify_state(config) as state:
|
||||
current = state.get(service)
|
||||
if current is None:
|
||||
return
|
||||
|
||||
if isinstance(current, list):
|
||||
# Multi-host: remove this host from list
|
||||
remaining = [h for h in current if h != host]
|
||||
if remaining:
|
||||
state[service] = remaining
|
||||
else:
|
||||
state.pop(service, None)
|
||||
elif current == host:
|
||||
# Single-host: remove if matches
|
||||
state.pop(service, None)
|
||||
|
||||
|
||||
def get_services_needing_migration(config: Config) -> list[str]:
|
||||
"""Get services where current host differs from configured host."""
|
||||
state = load_state(config)
|
||||
"""Get services where current host differs from configured host.
|
||||
|
||||
Multi-host services are never considered for migration.
|
||||
"""
|
||||
needs_migration = []
|
||||
for service, configured_host in config.services.items():
|
||||
current_host = state.get(service)
|
||||
for service in config.services:
|
||||
# Skip multi-host services
|
||||
if config.is_multi_host(service):
|
||||
continue
|
||||
|
||||
configured_host = config.get_hosts(service)[0]
|
||||
current_host = get_service_host(config, service)
|
||||
if current_host and current_host != configured_host:
|
||||
needs_migration.append(service)
|
||||
return needs_migration
|
||||
|
||||
|
||||
def get_orphaned_services(config: Config) -> dict[str, str | list[str]]:
|
||||
"""Get services that are in state but not in config.
|
||||
|
||||
These are services that were previously deployed but have been
|
||||
removed from the config file (e.g., commented out).
|
||||
|
||||
Returns a dict mapping service name to host(s) where it's deployed.
|
||||
"""
|
||||
state = load_state(config)
|
||||
return {service: hosts for service, hosts in state.items() if service not in config.services}
|
||||
|
||||
|
||||
def get_services_not_in_state(config: Config) -> list[str]:
|
||||
"""Get services that are in config but not in state.
|
||||
|
||||
These are services that should be running but aren't tracked
|
||||
(e.g., newly added to config, or previously stopped as orphans).
|
||||
"""
|
||||
state = load_state(config)
|
||||
return [service for service in config.services if service not in state]
|
||||
|
||||
@@ -11,6 +11,8 @@ from __future__ import annotations
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import yaml
|
||||
|
||||
from .compose import (
|
||||
PortMapping,
|
||||
get_ports_for_service,
|
||||
@@ -24,7 +26,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraefikServiceSource:
|
||||
class _TraefikServiceSource:
|
||||
"""Source information to build an upstream for a Traefik service."""
|
||||
|
||||
traefik_service: str
|
||||
@@ -36,9 +38,9 @@ class TraefikServiceSource:
|
||||
scheme: str | None = None
|
||||
|
||||
|
||||
LIST_VALUE_KEYS = {"entrypoints", "middlewares"}
|
||||
MIN_ROUTER_PARTS = 3
|
||||
MIN_SERVICE_LABEL_PARTS = 6
|
||||
_LIST_VALUE_KEYS = {"entrypoints", "middlewares"}
|
||||
_MIN_ROUTER_PARTS = 3
|
||||
_MIN_SERVICE_LABEL_PARTS = 6
|
||||
|
||||
|
||||
def _parse_value(key: str, raw_value: str) -> Any:
|
||||
@@ -49,7 +51,7 @@ def _parse_value(key: str, raw_value: str) -> Any:
|
||||
if value.isdigit():
|
||||
return int(value)
|
||||
last_segment = key.rsplit(".", 1)[-1]
|
||||
if last_segment in LIST_VALUE_KEYS:
|
||||
if last_segment in _LIST_VALUE_KEYS:
|
||||
parts = [v.strip() for v in value.split(",")] if "," in value else [value]
|
||||
return [part for part in parts if part]
|
||||
return value
|
||||
@@ -100,7 +102,7 @@ def _insert(root: dict[str, Any], key_path: list[str], value: Any) -> None: # n
|
||||
current = container_list[list_index]
|
||||
|
||||
|
||||
def _resolve_published_port(source: TraefikServiceSource) -> tuple[int | None, str | None]:
|
||||
def _resolve_published_port(source: _TraefikServiceSource) -> tuple[int | None, str | None]:
|
||||
"""Resolve host-published port for a Traefik service.
|
||||
|
||||
Returns (published_port, warning_message).
|
||||
@@ -138,7 +140,7 @@ def _resolve_published_port(source: TraefikServiceSource) -> tuple[int | None, s
|
||||
|
||||
def _finalize_http_services(
|
||||
dynamic: dict[str, Any],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
warnings: list[str],
|
||||
) -> None:
|
||||
for traefik_service, source in sources.items():
|
||||
@@ -209,7 +211,7 @@ def _process_router_label(
|
||||
if not key_without_prefix.startswith("http.routers."):
|
||||
return
|
||||
router_parts = key_without_prefix.split(".")
|
||||
if len(router_parts) < MIN_ROUTER_PARTS:
|
||||
if len(router_parts) < _MIN_ROUTER_PARTS:
|
||||
return
|
||||
router_name = router_parts[2]
|
||||
router_remainder = router_parts[3:]
|
||||
@@ -227,12 +229,12 @@ def _process_service_label(
|
||||
host_address: str,
|
||||
ports: list[PortMapping],
|
||||
service_names: set[str],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
) -> None:
|
||||
if not key_without_prefix.startswith("http.services."):
|
||||
return
|
||||
parts = key_without_prefix.split(".")
|
||||
if len(parts) < MIN_SERVICE_LABEL_PARTS:
|
||||
if len(parts) < _MIN_SERVICE_LABEL_PARTS:
|
||||
return
|
||||
traefik_service = parts[2]
|
||||
service_names.add(traefik_service)
|
||||
@@ -240,7 +242,7 @@ def _process_service_label(
|
||||
|
||||
source = sources.get(traefik_service)
|
||||
if source is None:
|
||||
source = TraefikServiceSource(
|
||||
source = _TraefikServiceSource(
|
||||
traefik_service=traefik_service,
|
||||
stack=stack,
|
||||
compose_service=compose_service,
|
||||
@@ -265,7 +267,7 @@ def _process_service_labels(
|
||||
host_address: str,
|
||||
env: dict[str, str],
|
||||
dynamic: dict[str, Any],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
warnings: list[str],
|
||||
) -> None:
|
||||
labels = normalize_labels(definition.get("labels"), env)
|
||||
@@ -326,7 +328,7 @@ def generate_traefik_config(
|
||||
"""
|
||||
dynamic: dict[str, Any] = {}
|
||||
warnings: list[str] = []
|
||||
sources: dict[str, TraefikServiceSource] = {}
|
||||
sources: dict[str, _TraefikServiceSource] = {}
|
||||
|
||||
# Determine Traefik's host from service assignment
|
||||
traefik_host = None
|
||||
@@ -362,3 +364,22 @@ def generate_traefik_config(
|
||||
|
||||
_finalize_http_services(dynamic, sources, warnings)
|
||||
return dynamic, warnings
|
||||
|
||||
|
||||
_TRAEFIK_CONFIG_HEADER = """\
|
||||
# Auto-generated by compose-farm
|
||||
# https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file routes traffic to services running on hosts other than Traefik's host.
|
||||
# Services on Traefik's host use the Docker provider directly.
|
||||
#
|
||||
# Regenerate with: compose-farm traefik-file --all -o <this-file>
|
||||
# Or configure traefik_file in compose-farm.yaml for automatic updates.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def render_traefik_config(dynamic: dict[str, Any]) -> str:
|
||||
"""Render Traefik dynamic config as YAML with a header comment."""
|
||||
body = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
return _TRAEFIK_CONFIG_HEADER + body
|
||||
|
||||
7
src/compose_farm/web/__init__.py
Normal file
7
src/compose_farm/web/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Compose Farm Web UI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from compose_farm.web.app import create_app
|
||||
|
||||
__all__ = ["create_app"]
|
||||
51
src/compose_farm/web/app.py
Normal file
51
src/compose_farm/web/app.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""FastAPI application setup."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import ValidationError
|
||||
|
||||
from compose_farm.web.deps import STATIC_DIR, get_config
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(_app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
"""Application lifespan handler."""
|
||||
# Startup: pre-load config (ignore errors - handled per-request)
|
||||
with suppress(ValidationError, FileNotFoundError):
|
||||
get_config()
|
||||
yield
|
||||
# Shutdown: nothing to clean up
|
||||
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
"""Create and configure the FastAPI application."""
|
||||
app = FastAPI(
|
||||
title="Compose Farm",
|
||||
description="Web UI for managing Docker Compose services across multiple hosts",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# Mount static files
|
||||
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
|
||||
|
||||
app.include_router(pages.router)
|
||||
app.include_router(api.router, prefix="/api")
|
||||
app.include_router(actions.router, prefix="/api")
|
||||
|
||||
# WebSocket routes use Unix-only modules (fcntl, pty)
|
||||
if sys.platform != "win32":
|
||||
from compose_farm.web.ws import router as ws_router # noqa: PLC0415
|
||||
|
||||
app.include_router(ws_router)
|
||||
|
||||
return app
|
||||
32
src/compose_farm/web/deps.py
Normal file
32
src/compose_farm/web/deps.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Shared dependencies for web modules.
|
||||
|
||||
This module contains shared config and template accessors to avoid circular imports
|
||||
between app.py and route modules.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
# Paths
|
||||
WEB_DIR = Path(__file__).parent
|
||||
TEMPLATES_DIR = WEB_DIR / "templates"
|
||||
STATIC_DIR = WEB_DIR / "static"
|
||||
|
||||
|
||||
def get_config() -> Config:
|
||||
"""Load config from disk (always fresh)."""
|
||||
from compose_farm.config import load_config # noqa: PLC0415
|
||||
|
||||
return load_config()
|
||||
|
||||
|
||||
def get_templates() -> Jinja2Templates:
|
||||
"""Get Jinja2 templates instance."""
|
||||
return Jinja2Templates(directory=str(TEMPLATES_DIR))
|
||||
5
src/compose_farm/web/routes/__init__.py
Normal file
5
src/compose_farm/web/routes/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Web routes."""
|
||||
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
|
||||
__all__ = ["actions", "api", "pages"]
|
||||
95
src/compose_farm/web/routes/actions.py
Normal file
95
src/compose_farm/web/routes/actions.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""Action routes for service operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Coroutine
|
||||
|
||||
from compose_farm.web.deps import get_config
|
||||
from compose_farm.web.streaming import run_cli_streaming, run_compose_streaming, tasks
|
||||
|
||||
router = APIRouter(tags=["actions"])
|
||||
|
||||
# Store task references to prevent garbage collection
|
||||
_background_tasks: set[asyncio.Task[None]] = set()
|
||||
|
||||
|
||||
def _start_task(coro_factory: Callable[[str], Coroutine[Any, Any, None]]) -> str:
|
||||
"""Create a task, register it, and return the task_id."""
|
||||
task_id = str(uuid.uuid4())
|
||||
tasks[task_id] = {"status": "running", "output": []}
|
||||
|
||||
task: asyncio.Task[None] = asyncio.create_task(coro_factory(task_id))
|
||||
_background_tasks.add(task)
|
||||
task.add_done_callback(_background_tasks.discard)
|
||||
|
||||
return task_id
|
||||
|
||||
|
||||
async def _run_service_action(name: str, command: str) -> dict[str, Any]:
|
||||
"""Run a compose command for a service."""
|
||||
config = get_config()
|
||||
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
|
||||
task_id = _start_task(lambda tid: run_compose_streaming(config, name, command, tid))
|
||||
return {"task_id": task_id, "service": name, "command": command}
|
||||
|
||||
|
||||
@router.post("/service/{name}/up")
|
||||
async def up_service(name: str) -> dict[str, Any]:
|
||||
"""Start a service."""
|
||||
return await _run_service_action(name, "up")
|
||||
|
||||
|
||||
@router.post("/service/{name}/down")
|
||||
async def down_service(name: str) -> dict[str, Any]:
|
||||
"""Stop a service."""
|
||||
return await _run_service_action(name, "down")
|
||||
|
||||
|
||||
@router.post("/service/{name}/restart")
|
||||
async def restart_service(name: str) -> dict[str, Any]:
|
||||
"""Restart a service (down + up)."""
|
||||
return await _run_service_action(name, "restart")
|
||||
|
||||
|
||||
@router.post("/service/{name}/pull")
|
||||
async def pull_service(name: str) -> dict[str, Any]:
|
||||
"""Pull latest images for a service."""
|
||||
return await _run_service_action(name, "pull")
|
||||
|
||||
|
||||
@router.post("/service/{name}/update")
|
||||
async def update_service(name: str) -> dict[str, Any]:
|
||||
"""Update a service (pull + build + down + up)."""
|
||||
return await _run_service_action(name, "update")
|
||||
|
||||
|
||||
@router.post("/service/{name}/logs")
|
||||
async def logs_service(name: str) -> dict[str, Any]:
|
||||
"""Show logs for a service."""
|
||||
return await _run_service_action(name, "logs")
|
||||
|
||||
|
||||
@router.post("/apply")
|
||||
async def apply_all() -> dict[str, Any]:
|
||||
"""Run cf apply to reconcile all services."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["apply"], tid))
|
||||
return {"task_id": task_id, "command": "apply"}
|
||||
|
||||
|
||||
@router.post("/refresh")
|
||||
async def refresh_state() -> dict[str, Any]:
|
||||
"""Refresh state from running services."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["refresh"], tid))
|
||||
return {"task_id": task_id, "command": "refresh"}
|
||||
212
src/compose_farm/web/routes/api.py
Normal file
212
src/compose_farm/web/routes/api.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""JSON API routes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Annotated, Any
|
||||
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from compose_farm.executor import run_compose_on_host
|
||||
from compose_farm.paths import find_config_path
|
||||
from compose_farm.state import load_state
|
||||
from compose_farm.web.deps import get_config, get_templates
|
||||
|
||||
router = APIRouter(tags=["api"])
|
||||
|
||||
|
||||
def _validate_yaml(content: str) -> None:
|
||||
"""Validate YAML content, raise HTTPException on error."""
|
||||
try:
|
||||
yaml.safe_load(content)
|
||||
except yaml.YAMLError as e:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid YAML: {e}") from e
|
||||
|
||||
|
||||
def _get_service_compose_path(name: str) -> Path:
|
||||
"""Get compose path for service, raising HTTPException if not found."""
|
||||
config = get_config()
|
||||
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
|
||||
compose_path = config.get_compose_path(name)
|
||||
if not compose_path:
|
||||
raise HTTPException(status_code=404, detail="Compose file not found")
|
||||
|
||||
return compose_path
|
||||
|
||||
|
||||
def _get_compose_services(config: Any, service: str, hosts: list[str]) -> list[dict[str, Any]]:
|
||||
"""Get container info from compose file (fast, local read).
|
||||
|
||||
Returns one entry per container per host for multi-host services.
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
if not compose_path or not compose_path.exists():
|
||||
return []
|
||||
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
return []
|
||||
|
||||
# Project name is the directory name (docker compose default)
|
||||
project_name = compose_path.parent.name
|
||||
|
||||
containers = []
|
||||
for host in hosts:
|
||||
for svc_name, svc_def in raw_services.items():
|
||||
# Use container_name if set, otherwise default to {project}-{service}-1
|
||||
if isinstance(svc_def, dict) and svc_def.get("container_name"):
|
||||
container_name = svc_def["container_name"]
|
||||
else:
|
||||
container_name = f"{project_name}-{svc_name}-1"
|
||||
containers.append(
|
||||
{
|
||||
"Name": container_name,
|
||||
"Service": svc_name,
|
||||
"Host": host,
|
||||
"State": "unknown", # Status requires Docker query
|
||||
}
|
||||
)
|
||||
return containers
|
||||
|
||||
|
||||
async def _get_container_states(
|
||||
config: Any, service: str, containers: list[dict[str, Any]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Query Docker for actual container states on a single host."""
|
||||
if not containers:
|
||||
return containers
|
||||
|
||||
# All containers should be on the same host
|
||||
host_name = containers[0]["Host"]
|
||||
|
||||
result = await run_compose_on_host(config, service, host_name, "ps --format json", stream=False)
|
||||
if not result.success:
|
||||
return containers
|
||||
|
||||
# Build state map
|
||||
state_map: dict[str, str] = {}
|
||||
for line in result.stdout.strip().split("\n"):
|
||||
if line.strip():
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
data = json.loads(line)
|
||||
state_map[data.get("Name", "")] = data.get("State", "unknown")
|
||||
|
||||
# Update container states
|
||||
for c in containers:
|
||||
if c["Name"] in state_map:
|
||||
c["State"] = state_map[c["Name"]]
|
||||
|
||||
return containers
|
||||
|
||||
|
||||
def _render_containers(
|
||||
service: str, host: str, containers: list[dict[str, Any]], *, show_header: bool = False
|
||||
) -> str:
|
||||
"""Render containers HTML using Jinja template."""
|
||||
templates = get_templates()
|
||||
template = templates.env.get_template("partials/containers.html")
|
||||
module = template.make_module()
|
||||
result: str = module.host_containers(service, host, containers, show_header=show_header)
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/service/{name}/containers", response_class=HTMLResponse)
|
||||
async def get_containers(name: str, host: str | None = None) -> HTMLResponse:
|
||||
"""Get containers for a service as HTML buttons.
|
||||
|
||||
If host is specified, queries Docker for that host's status.
|
||||
Otherwise returns all hosts with loading spinners that auto-fetch.
|
||||
"""
|
||||
config = get_config()
|
||||
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
|
||||
# Get hosts where service is running from state
|
||||
state = load_state(config)
|
||||
current_hosts = state.get(name)
|
||||
if not current_hosts:
|
||||
return HTMLResponse('<span class="text-base-content/60">Service not running</span>')
|
||||
|
||||
all_hosts = current_hosts if isinstance(current_hosts, list) else [current_hosts]
|
||||
|
||||
# If host specified, return just that host's containers with status
|
||||
if host:
|
||||
if host not in all_hosts:
|
||||
return HTMLResponse(f'<span class="text-error">Host {host} not found</span>')
|
||||
|
||||
containers = _get_compose_services(config, name, [host])
|
||||
containers = await _get_container_states(config, name, containers)
|
||||
return HTMLResponse(_render_containers(name, host, containers))
|
||||
|
||||
# Initial load: return all hosts with loading spinners, each fetches its own status
|
||||
html_parts = []
|
||||
is_multi_host = len(all_hosts) > 1
|
||||
|
||||
for h in all_hosts:
|
||||
host_id = f"containers-{name}-{h}".replace(".", "-")
|
||||
containers = _get_compose_services(config, name, [h])
|
||||
|
||||
if is_multi_host:
|
||||
html_parts.append(f'<div class="font-semibold text-sm mt-3 mb-1">{h}</div>')
|
||||
|
||||
# Container for this host that auto-fetches its own status
|
||||
html_parts.append(f"""
|
||||
<div id="{host_id}"
|
||||
hx-get="/api/service/{name}/containers?host={h}"
|
||||
hx-trigger="load"
|
||||
hx-target="this"
|
||||
hx-select="unset"
|
||||
hx-swap="innerHTML">
|
||||
{_render_containers(name, h, containers)}
|
||||
</div>
|
||||
""")
|
||||
|
||||
return HTMLResponse("".join(html_parts))
|
||||
|
||||
|
||||
@router.put("/service/{name}/compose")
|
||||
async def save_compose(
|
||||
name: str, content: Annotated[str, Body(media_type="text/plain")]
|
||||
) -> dict[str, Any]:
|
||||
"""Save compose file content."""
|
||||
compose_path = _get_service_compose_path(name)
|
||||
_validate_yaml(content)
|
||||
compose_path.write_text(content)
|
||||
return {"success": True, "message": "Compose file saved"}
|
||||
|
||||
|
||||
@router.put("/service/{name}/env")
|
||||
async def save_env(
|
||||
name: str, content: Annotated[str, Body(media_type="text/plain")]
|
||||
) -> dict[str, Any]:
|
||||
"""Save .env file content."""
|
||||
env_path = _get_service_compose_path(name).parent / ".env"
|
||||
env_path.write_text(content)
|
||||
return {"success": True, "message": ".env file saved"}
|
||||
|
||||
|
||||
@router.put("/config")
|
||||
async def save_config(
|
||||
content: Annotated[str, Body(media_type="text/plain")],
|
||||
) -> dict[str, Any]:
|
||||
"""Save compose-farm.yaml config file."""
|
||||
config_path = find_config_path()
|
||||
if not config_path:
|
||||
raise HTTPException(status_code=404, detail="Config file not found")
|
||||
|
||||
_validate_yaml(content)
|
||||
config_path.write_text(content)
|
||||
|
||||
return {"success": True, "message": "Config saved"}
|
||||
267
src/compose_farm/web/routes/pages.py
Normal file
267
src/compose_farm/web/routes/pages.py
Normal file
@@ -0,0 +1,267 @@
|
||||
"""HTML page routes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import yaml
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from pydantic import ValidationError
|
||||
|
||||
from compose_farm.paths import find_config_path
|
||||
from compose_farm.state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
get_services_needing_migration,
|
||||
get_services_not_in_state,
|
||||
load_state,
|
||||
)
|
||||
from compose_farm.web.deps import get_config, get_templates
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/", response_class=HTMLResponse)
|
||||
async def index(request: Request) -> HTMLResponse:
|
||||
"""Dashboard page - combined view of all cluster info."""
|
||||
templates = get_templates()
|
||||
|
||||
# Try to load config, handle errors gracefully
|
||||
config_error = None
|
||||
try:
|
||||
config = get_config()
|
||||
except (ValidationError, FileNotFoundError) as e:
|
||||
# Extract error message
|
||||
if isinstance(e, ValidationError):
|
||||
config_error = "; ".join(err.get("msg", str(err)) for err in e.errors())
|
||||
else:
|
||||
config_error = str(e)
|
||||
|
||||
# Read raw config content for the editor
|
||||
config_path = find_config_path()
|
||||
config_content = config_path.read_text() if config_path else ""
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"index.html",
|
||||
{
|
||||
"request": request,
|
||||
"config_error": config_error,
|
||||
"hosts": {},
|
||||
"services": {},
|
||||
"config_content": config_content,
|
||||
"state_content": "",
|
||||
"running_count": 0,
|
||||
"stopped_count": 0,
|
||||
"orphaned": [],
|
||||
"migrations": [],
|
||||
"not_started": [],
|
||||
"services_by_host": {},
|
||||
},
|
||||
)
|
||||
|
||||
# Get state
|
||||
deployed = load_state(config)
|
||||
|
||||
# Stats
|
||||
running_count = len(deployed)
|
||||
stopped_count = len(config.services) - running_count
|
||||
|
||||
# Pending operations
|
||||
orphaned = get_orphaned_services(config)
|
||||
migrations = get_services_needing_migration(config)
|
||||
not_started = get_services_not_in_state(config)
|
||||
|
||||
# Group services by host
|
||||
services_by_host: dict[str, list[str]] = {}
|
||||
for svc, host in deployed.items():
|
||||
if isinstance(host, list):
|
||||
for h in host:
|
||||
services_by_host.setdefault(h, []).append(svc)
|
||||
else:
|
||||
services_by_host.setdefault(host, []).append(svc)
|
||||
|
||||
# Config file content
|
||||
config_content = ""
|
||||
if config.config_path and config.config_path.exists():
|
||||
config_content = config.config_path.read_text()
|
||||
|
||||
# State file content
|
||||
state_content = yaml.dump({"deployed": deployed}, default_flow_style=False, sort_keys=False)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"index.html",
|
||||
{
|
||||
"request": request,
|
||||
"config_error": None,
|
||||
# Config data
|
||||
"hosts": config.hosts,
|
||||
"services": config.services,
|
||||
"config_content": config_content,
|
||||
# State data
|
||||
"state_content": state_content,
|
||||
# Stats
|
||||
"running_count": running_count,
|
||||
"stopped_count": stopped_count,
|
||||
# Pending operations
|
||||
"orphaned": orphaned,
|
||||
"migrations": migrations,
|
||||
"not_started": not_started,
|
||||
# Services by host
|
||||
"services_by_host": services_by_host,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/service/{name}", response_class=HTMLResponse)
|
||||
async def service_detail(request: Request, name: str) -> HTMLResponse:
|
||||
"""Service detail page."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
# Get compose file content
|
||||
compose_path = config.get_compose_path(name)
|
||||
compose_content = ""
|
||||
if compose_path and compose_path.exists():
|
||||
compose_content = compose_path.read_text()
|
||||
|
||||
# Get .env file content
|
||||
env_content = ""
|
||||
env_path = None
|
||||
if compose_path:
|
||||
env_path = compose_path.parent / ".env"
|
||||
if env_path.exists():
|
||||
env_content = env_path.read_text()
|
||||
|
||||
# Get host info
|
||||
hosts = config.get_hosts(name)
|
||||
|
||||
# Get state
|
||||
current_host = get_service_host(config, name)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"service.html",
|
||||
{
|
||||
"request": request,
|
||||
"name": name,
|
||||
"hosts": hosts,
|
||||
"current_host": current_host,
|
||||
"compose_content": compose_content,
|
||||
"compose_path": str(compose_path) if compose_path else None,
|
||||
"env_content": env_content,
|
||||
"env_path": str(env_path) if env_path else None,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/sidebar", response_class=HTMLResponse)
|
||||
async def sidebar_partial(request: Request) -> HTMLResponse:
|
||||
"""Sidebar service list partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
state = load_state(config)
|
||||
|
||||
# Build service -> host mapping (empty string for multi-host services)
|
||||
service_hosts = {
|
||||
svc: "" if host_val == "all" or isinstance(host_val, list) else host_val
|
||||
for svc, host_val in config.services.items()
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/sidebar.html",
|
||||
{
|
||||
"request": request,
|
||||
"services": sorted(config.services.keys()),
|
||||
"service_hosts": service_hosts,
|
||||
"hosts": sorted(config.hosts.keys()),
|
||||
"state": state,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/config-error", response_class=HTMLResponse)
|
||||
async def config_error_partial(request: Request) -> HTMLResponse:
|
||||
"""Config error banner partial."""
|
||||
templates = get_templates()
|
||||
try:
|
||||
get_config()
|
||||
return HTMLResponse("") # No error
|
||||
except (ValidationError, FileNotFoundError) as e:
|
||||
if isinstance(e, ValidationError):
|
||||
error = "; ".join(err.get("msg", str(err)) for err in e.errors())
|
||||
else:
|
||||
error = str(e)
|
||||
return templates.TemplateResponse(
|
||||
"partials/config_error.html", {"request": request, "config_error": error}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/stats", response_class=HTMLResponse)
|
||||
async def stats_partial(request: Request) -> HTMLResponse:
|
||||
"""Stats cards partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
deployed = load_state(config)
|
||||
running_count = len(deployed)
|
||||
stopped_count = len(config.services) - running_count
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/stats.html",
|
||||
{
|
||||
"request": request,
|
||||
"hosts": config.hosts,
|
||||
"services": config.services,
|
||||
"running_count": running_count,
|
||||
"stopped_count": stopped_count,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/pending", response_class=HTMLResponse)
|
||||
async def pending_partial(request: Request, expanded: bool = True) -> HTMLResponse:
|
||||
"""Pending operations partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
orphaned = get_orphaned_services(config)
|
||||
migrations = get_services_needing_migration(config)
|
||||
not_started = get_services_not_in_state(config)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/pending.html",
|
||||
{
|
||||
"request": request,
|
||||
"orphaned": orphaned,
|
||||
"migrations": migrations,
|
||||
"not_started": not_started,
|
||||
"expanded": expanded,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/services-by-host", response_class=HTMLResponse)
|
||||
async def services_by_host_partial(request: Request, expanded: bool = True) -> HTMLResponse:
|
||||
"""Services by host partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
deployed = load_state(config)
|
||||
|
||||
# Group services by host
|
||||
services_by_host: dict[str, list[str]] = {}
|
||||
for svc, host in deployed.items():
|
||||
if isinstance(host, list):
|
||||
for h in host:
|
||||
services_by_host.setdefault(h, []).append(svc)
|
||||
else:
|
||||
services_by_host.setdefault(host, []).append(svc)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/services_by_host.html",
|
||||
{
|
||||
"request": request,
|
||||
"hosts": config.hosts,
|
||||
"services_by_host": services_by_host,
|
||||
"expanded": expanded,
|
||||
},
|
||||
)
|
||||
55
src/compose_farm/web/static/app.css
Normal file
55
src/compose_farm/web/static/app.css
Normal file
@@ -0,0 +1,55 @@
|
||||
/* Editors (Monaco) - wrapper makes it resizable */
|
||||
.editor-wrapper {
|
||||
resize: vertical;
|
||||
overflow: hidden;
|
||||
min-height: 150px;
|
||||
}
|
||||
|
||||
.editor-wrapper .yaml-editor,
|
||||
.editor-wrapper .env-editor,
|
||||
.editor-wrapper .yaml-viewer {
|
||||
height: 100%;
|
||||
border: 1px solid oklch(var(--bc) / 0.2);
|
||||
border-radius: 0.5rem;
|
||||
}
|
||||
|
||||
.editor-wrapper.yaml-wrapper { height: 400px; }
|
||||
.editor-wrapper.env-wrapper { height: 250px; }
|
||||
.editor-wrapper.viewer-wrapper { height: 300px; }
|
||||
|
||||
/* Terminal - no custom CSS needed, using h-full class in HTML */
|
||||
|
||||
/* Prevent save button resize when text changes */
|
||||
#save-btn, #save-config-btn {
|
||||
min-width: 5rem;
|
||||
}
|
||||
|
||||
/* Rainbow hover effect for headers */
|
||||
.rainbow-hover {
|
||||
transition: color 0.3s;
|
||||
}
|
||||
|
||||
.rainbow-hover:hover {
|
||||
background: linear-gradient(
|
||||
90deg,
|
||||
#e07070,
|
||||
#e0a070,
|
||||
#d0d070,
|
||||
#70c080,
|
||||
#7090d0,
|
||||
#9080b0,
|
||||
#b080a0,
|
||||
#e07070
|
||||
);
|
||||
background-size: 16em 100%;
|
||||
background-clip: text;
|
||||
-webkit-background-clip: text;
|
||||
color: transparent;
|
||||
animation: rainbow 4s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes rainbow {
|
||||
to {
|
||||
background-position: 16em center;
|
||||
}
|
||||
}
|
||||
546
src/compose_farm/web/static/app.js
Normal file
546
src/compose_farm/web/static/app.js
Normal file
@@ -0,0 +1,546 @@
|
||||
/**
|
||||
* Compose Farm Web UI JavaScript
|
||||
*/
|
||||
|
||||
// ANSI escape codes for terminal output
|
||||
const ANSI = {
|
||||
RED: '\x1b[31m',
|
||||
GREEN: '\x1b[32m',
|
||||
DIM: '\x1b[2m',
|
||||
RESET: '\x1b[0m',
|
||||
CRLF: '\r\n'
|
||||
};
|
||||
|
||||
// Store active terminals and editors
|
||||
const terminals = {};
|
||||
const editors = {};
|
||||
let monacoLoaded = false;
|
||||
let monacoLoading = false;
|
||||
|
||||
// Terminal color theme (dark mode matching PicoCSS)
|
||||
const TERMINAL_THEME = {
|
||||
background: '#1a1a2e',
|
||||
foreground: '#e4e4e7',
|
||||
cursor: '#e4e4e7',
|
||||
cursorAccent: '#1a1a2e',
|
||||
black: '#18181b',
|
||||
red: '#ef4444',
|
||||
green: '#22c55e',
|
||||
yellow: '#eab308',
|
||||
blue: '#3b82f6',
|
||||
magenta: '#a855f7',
|
||||
cyan: '#06b6d4',
|
||||
white: '#e4e4e7',
|
||||
brightBlack: '#52525b',
|
||||
brightRed: '#f87171',
|
||||
brightGreen: '#4ade80',
|
||||
brightYellow: '#facc15',
|
||||
brightBlue: '#60a5fa',
|
||||
brightMagenta: '#c084fc',
|
||||
brightCyan: '#22d3ee',
|
||||
brightWhite: '#fafafa'
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a terminal with fit addon and resize observer
|
||||
* @param {HTMLElement} container - Container element
|
||||
* @param {object} extraOptions - Additional terminal options
|
||||
* @param {function} onResize - Optional callback called with (cols, rows) after resize
|
||||
* @returns {{term: Terminal, fitAddon: FitAddon}}
|
||||
*/
|
||||
function createTerminal(container, extraOptions = {}, onResize = null) {
|
||||
container.innerHTML = '';
|
||||
|
||||
const term = new Terminal({
|
||||
convertEol: true,
|
||||
theme: TERMINAL_THEME,
|
||||
fontSize: 13,
|
||||
fontFamily: 'Monaco, Menlo, "Ubuntu Mono", monospace',
|
||||
scrollback: 5000,
|
||||
...extraOptions
|
||||
});
|
||||
|
||||
const fitAddon = new FitAddon.FitAddon();
|
||||
term.loadAddon(fitAddon);
|
||||
term.open(container);
|
||||
fitAddon.fit();
|
||||
|
||||
const handleResize = () => {
|
||||
fitAddon.fit();
|
||||
if (onResize) {
|
||||
onResize(term.cols, term.rows);
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener('resize', handleResize);
|
||||
new ResizeObserver(handleResize).observe(container);
|
||||
|
||||
return { term, fitAddon };
|
||||
}
|
||||
|
||||
/**
|
||||
* Create WebSocket connection with standard handlers
|
||||
* @param {string} path - WebSocket path
|
||||
* @returns {WebSocket}
|
||||
*/
|
||||
function createWebSocket(path) {
|
||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
return new WebSocket(`${protocol}//${window.location.host}${path}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a terminal and connect to WebSocket for streaming
|
||||
*/
|
||||
function initTerminal(elementId, taskId) {
|
||||
const container = document.getElementById(elementId);
|
||||
if (!container) {
|
||||
console.error('Terminal container not found:', elementId);
|
||||
return;
|
||||
}
|
||||
|
||||
const { term, fitAddon } = createTerminal(container);
|
||||
const ws = createWebSocket(`/ws/terminal/${taskId}`);
|
||||
|
||||
ws.onopen = () => {
|
||||
term.write(`${ANSI.DIM}[Connected]${ANSI.RESET}${ANSI.CRLF}`);
|
||||
setTerminalLoading(true);
|
||||
};
|
||||
ws.onmessage = (event) => term.write(event.data);
|
||||
ws.onclose = () => setTerminalLoading(false);
|
||||
ws.onerror = (error) => {
|
||||
term.write(`${ANSI.RED}[WebSocket Error]${ANSI.RESET}${ANSI.CRLF}`);
|
||||
console.error('WebSocket error:', error);
|
||||
setTerminalLoading(false);
|
||||
};
|
||||
|
||||
terminals[taskId] = { term, ws, fitAddon };
|
||||
return { term, ws };
|
||||
}
|
||||
|
||||
window.initTerminal = initTerminal;
|
||||
|
||||
/**
|
||||
* Initialize an interactive exec terminal
|
||||
*/
|
||||
let execTerminal = null;
|
||||
let execWs = null;
|
||||
|
||||
function initExecTerminal(service, container, host) {
|
||||
const containerEl = document.getElementById('exec-terminal-container');
|
||||
const terminalEl = document.getElementById('exec-terminal');
|
||||
|
||||
if (!containerEl || !terminalEl) {
|
||||
console.error('Exec terminal elements not found');
|
||||
return;
|
||||
}
|
||||
|
||||
containerEl.classList.remove('hidden');
|
||||
|
||||
// Clean up existing
|
||||
if (execWs) { execWs.close(); execWs = null; }
|
||||
if (execTerminal) { execTerminal.dispose(); execTerminal = null; }
|
||||
|
||||
// Create WebSocket first so resize callback can use it
|
||||
execWs = createWebSocket(`/ws/exec/${service}/${container}/${host}`);
|
||||
|
||||
// Resize callback sends size to WebSocket
|
||||
const sendSize = (cols, rows) => {
|
||||
if (execWs && execWs.readyState === WebSocket.OPEN) {
|
||||
execWs.send(JSON.stringify({ type: 'resize', cols, rows }));
|
||||
}
|
||||
};
|
||||
|
||||
const { term } = createTerminal(terminalEl, { cursorBlink: true }, sendSize);
|
||||
execTerminal = term;
|
||||
|
||||
execWs.onopen = () => { sendSize(term.cols, term.rows); term.focus(); };
|
||||
execWs.onmessage = (event) => term.write(event.data);
|
||||
execWs.onclose = () => term.write(`${ANSI.CRLF}${ANSI.DIM}[Connection closed]${ANSI.RESET}${ANSI.CRLF}`);
|
||||
execWs.onerror = (error) => {
|
||||
term.write(`${ANSI.RED}[WebSocket Error]${ANSI.RESET}${ANSI.CRLF}`);
|
||||
console.error('Exec WebSocket error:', error);
|
||||
};
|
||||
|
||||
term.onData((data) => {
|
||||
if (execWs && execWs.readyState === WebSocket.OPEN) {
|
||||
execWs.send(data);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
window.initExecTerminal = initExecTerminal;
|
||||
|
||||
/**
|
||||
* Refresh dashboard partials while preserving collapse states
|
||||
*/
|
||||
function refreshDashboard() {
|
||||
const isExpanded = (id) => document.getElementById(id)?.checked ?? true;
|
||||
htmx.ajax('GET', '/partials/sidebar', {target: '#sidebar nav', swap: 'innerHTML'});
|
||||
htmx.ajax('GET', '/partials/stats', {target: '#stats-cards', swap: 'outerHTML'});
|
||||
htmx.ajax('GET', `/partials/pending?expanded=${isExpanded('pending-collapse')}`, {target: '#pending-operations', swap: 'outerHTML'});
|
||||
htmx.ajax('GET', `/partials/services-by-host?expanded=${isExpanded('services-by-host-collapse')}`, {target: '#services-by-host', swap: 'outerHTML'});
|
||||
htmx.ajax('GET', '/partials/config-error', {target: '#config-error', swap: 'innerHTML'});
|
||||
}
|
||||
|
||||
/**
|
||||
* Load Monaco editor dynamically (only once)
|
||||
*/
|
||||
function loadMonaco(callback) {
|
||||
if (monacoLoaded) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
if (monacoLoading) {
|
||||
// Wait for it to load
|
||||
const checkInterval = setInterval(() => {
|
||||
if (monacoLoaded) {
|
||||
clearInterval(checkInterval);
|
||||
callback();
|
||||
}
|
||||
}, 100);
|
||||
return;
|
||||
}
|
||||
|
||||
monacoLoading = true;
|
||||
|
||||
// Load the Monaco loader script
|
||||
const script = document.createElement('script');
|
||||
script.src = 'https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/loader.js';
|
||||
script.onload = function() {
|
||||
require.config({ paths: { vs: 'https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs' }});
|
||||
require(['vs/editor/editor.main'], function() {
|
||||
monacoLoaded = true;
|
||||
monacoLoading = false;
|
||||
callback();
|
||||
});
|
||||
};
|
||||
document.head.appendChild(script);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Monaco editor instance
|
||||
* @param {HTMLElement} container - Container element
|
||||
* @param {string} content - Initial content
|
||||
* @param {string} language - Editor language (yaml, plaintext, etc.)
|
||||
* @param {boolean} readonly - Whether editor is read-only
|
||||
* @returns {object} Monaco editor instance
|
||||
*/
|
||||
function createEditor(container, content, language, readonly = false) {
|
||||
const options = {
|
||||
value: content,
|
||||
language: language,
|
||||
theme: 'vs-dark',
|
||||
minimap: { enabled: false },
|
||||
automaticLayout: true,
|
||||
scrollBeyondLastLine: false,
|
||||
fontSize: 14,
|
||||
lineNumbers: 'on',
|
||||
wordWrap: 'on'
|
||||
};
|
||||
|
||||
if (readonly) {
|
||||
options.readOnly = true;
|
||||
options.domReadOnly = true;
|
||||
}
|
||||
|
||||
const editor = monaco.editor.create(container, options);
|
||||
|
||||
// Add Command+S / Ctrl+S handler for editable editors
|
||||
if (!readonly) {
|
||||
editor.addCommand(monaco.KeyMod.CtrlCmd | monaco.KeyCode.KeyS, function() {
|
||||
saveAllEditors();
|
||||
});
|
||||
}
|
||||
|
||||
return editor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize all Monaco editors on the page
|
||||
*/
|
||||
function initMonacoEditors() {
|
||||
// Dispose existing editors
|
||||
Object.values(editors).forEach(ed => {
|
||||
if (ed && ed.dispose) ed.dispose();
|
||||
});
|
||||
Object.keys(editors).forEach(key => delete editors[key]);
|
||||
|
||||
const editorConfigs = [
|
||||
{ id: 'compose-editor', language: 'yaml', readonly: false },
|
||||
{ id: 'env-editor', language: 'plaintext', readonly: false },
|
||||
{ id: 'config-editor', language: 'yaml', readonly: false },
|
||||
{ id: 'state-viewer', language: 'yaml', readonly: true }
|
||||
];
|
||||
|
||||
// Check if any editor elements exist
|
||||
const hasEditors = editorConfigs.some(({ id }) => document.getElementById(id));
|
||||
if (!hasEditors) return;
|
||||
|
||||
// Load Monaco and create editors
|
||||
loadMonaco(() => {
|
||||
editorConfigs.forEach(({ id, language, readonly }) => {
|
||||
const el = document.getElementById(id);
|
||||
if (!el) return;
|
||||
|
||||
const content = el.dataset.content || '';
|
||||
editors[id] = createEditor(el, content, language, readonly);
|
||||
if (!readonly) {
|
||||
editors[id].saveUrl = el.dataset.saveUrl;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Save all editors
|
||||
*/
|
||||
async function saveAllEditors() {
|
||||
const saveBtn = document.getElementById('save-btn') || document.getElementById('save-config-btn');
|
||||
const results = [];
|
||||
|
||||
for (const [id, editor] of Object.entries(editors)) {
|
||||
if (!editor || !editor.saveUrl) continue;
|
||||
|
||||
const content = editor.getValue();
|
||||
try {
|
||||
const response = await fetch(editor.saveUrl, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'text/plain' },
|
||||
body: content
|
||||
});
|
||||
const data = await response.json();
|
||||
if (!response.ok || !data.success) {
|
||||
results.push({ id, success: false, error: data.detail || 'Unknown error' });
|
||||
} else {
|
||||
results.push({ id, success: true });
|
||||
}
|
||||
} catch (e) {
|
||||
results.push({ id, success: false, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Show result
|
||||
if (saveBtn && results.length > 0) {
|
||||
saveBtn.textContent = 'Saved!';
|
||||
setTimeout(() => saveBtn.textContent = saveBtn.id === 'save-config-btn' ? 'Save Config' : 'Save All', 2000);
|
||||
refreshDashboard();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize save button handler
|
||||
*/
|
||||
function initSaveButton() {
|
||||
const saveBtn = document.getElementById('save-btn') || document.getElementById('save-config-btn');
|
||||
if (!saveBtn) return;
|
||||
|
||||
saveBtn.onclick = saveAllEditors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Global keyboard shortcut handler
|
||||
*/
|
||||
function initKeyboardShortcuts() {
|
||||
document.addEventListener('keydown', function(e) {
|
||||
// Command+S (Mac) or Ctrl+S (Windows/Linux)
|
||||
if ((e.metaKey || e.ctrlKey) && e.key === 's') {
|
||||
// Only handle if we have editors and no Monaco editor is focused
|
||||
if (Object.keys(editors).length > 0) {
|
||||
// Check if any Monaco editor is focused
|
||||
const focusedEditor = Object.values(editors).find(ed => ed && ed.hasTextFocus && ed.hasTextFocus());
|
||||
if (!focusedEditor) {
|
||||
e.preventDefault();
|
||||
saveAllEditors();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize page components
|
||||
*/
|
||||
function initPage() {
|
||||
initMonacoEditors();
|
||||
initSaveButton();
|
||||
}
|
||||
|
||||
// Initialize on page load
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
initPage();
|
||||
initKeyboardShortcuts();
|
||||
});
|
||||
|
||||
// Re-initialize after HTMX swaps main content
|
||||
document.body.addEventListener('htmx:afterSwap', function(evt) {
|
||||
if (evt.detail.target.id === 'main-content') {
|
||||
initPage();
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Expand terminal collapse and scroll to it
|
||||
*/
|
||||
function expandTerminal() {
|
||||
const toggle = document.getElementById('terminal-toggle');
|
||||
if (toggle) toggle.checked = true;
|
||||
|
||||
const collapse = document.getElementById('terminal-collapse');
|
||||
if (collapse) {
|
||||
collapse.scrollIntoView({ behavior: 'smooth', block: 'start' });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Show/hide terminal loading spinner
|
||||
*/
|
||||
function setTerminalLoading(loading) {
|
||||
const spinner = document.getElementById('terminal-spinner');
|
||||
if (spinner) {
|
||||
spinner.classList.toggle('hidden', !loading);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle action responses (terminal streaming)
|
||||
document.body.addEventListener('htmx:afterRequest', function(evt) {
|
||||
if (!evt.detail.successful || !evt.detail.xhr) return;
|
||||
|
||||
const text = evt.detail.xhr.responseText;
|
||||
// Only try to parse if it looks like JSON (starts with {)
|
||||
if (!text || !text.trim().startsWith('{')) return;
|
||||
|
||||
try {
|
||||
const response = JSON.parse(text);
|
||||
if (response.task_id) {
|
||||
// Expand terminal and scroll to it
|
||||
expandTerminal();
|
||||
|
||||
// Wait for xterm to be loaded if needed
|
||||
const tryInit = (attempts) => {
|
||||
if (typeof Terminal !== 'undefined' && typeof FitAddon !== 'undefined') {
|
||||
initTerminal('terminal-output', response.task_id);
|
||||
} else if (attempts > 0) {
|
||||
setTimeout(() => tryInit(attempts - 1), 100);
|
||||
} else {
|
||||
console.error('xterm.js failed to load');
|
||||
}
|
||||
};
|
||||
tryInit(20); // Try for up to 2 seconds
|
||||
}
|
||||
} catch (e) {
|
||||
// Not valid JSON, ignore
|
||||
}
|
||||
});
|
||||
|
||||
// Command Palette
|
||||
(function() {
|
||||
const dialog = document.getElementById('cmd-palette');
|
||||
const input = document.getElementById('cmd-input');
|
||||
const list = document.getElementById('cmd-list');
|
||||
const fab = document.getElementById('cmd-fab');
|
||||
if (!dialog || !input || !list) return;
|
||||
|
||||
const colors = { service: '#22c55e', action: '#eab308', nav: '#3b82f6' };
|
||||
let commands = [];
|
||||
let filtered = [];
|
||||
let selected = 0;
|
||||
|
||||
const post = (url) => () => htmx.ajax('POST', url, {swap: 'none'});
|
||||
const nav = (url) => () => window.location.href = url;
|
||||
const cmd = (type, name, desc, action) => ({ type, name, desc, action });
|
||||
|
||||
function buildCommands() {
|
||||
const actions = [
|
||||
cmd('action', 'Apply', 'Make reality match config', post('/api/apply')),
|
||||
cmd('action', 'Refresh', 'Update state from reality', post('/api/refresh')),
|
||||
cmd('nav', 'Dashboard', 'Go to dashboard', nav('/')),
|
||||
];
|
||||
|
||||
// Add service-specific actions if on a service page
|
||||
const match = window.location.pathname.match(/^\/service\/(.+)$/);
|
||||
if (match) {
|
||||
const svc = decodeURIComponent(match[1]);
|
||||
const svcCmd = (name, desc, endpoint) => cmd('service', name, `${desc} ${svc}`, post(`/api/service/${svc}/${endpoint}`));
|
||||
actions.unshift(
|
||||
svcCmd('Up', 'Start', 'up'),
|
||||
svcCmd('Down', 'Stop', 'down'),
|
||||
svcCmd('Restart', 'Restart', 'restart'),
|
||||
svcCmd('Pull', 'Pull', 'pull'),
|
||||
svcCmd('Update', 'Pull + restart', 'update'),
|
||||
svcCmd('Logs', 'View logs for', 'logs'),
|
||||
);
|
||||
}
|
||||
|
||||
// Add nav commands for all services from sidebar
|
||||
const services = [...document.querySelectorAll('#sidebar-services li[data-svc] a[href]')].map(a => {
|
||||
const name = a.getAttribute('href').replace('/service/', '');
|
||||
return cmd('nav', name, 'Go to service', nav(`/service/${name}`));
|
||||
});
|
||||
|
||||
commands = [...actions, ...services];
|
||||
}
|
||||
|
||||
function filter() {
|
||||
const q = input.value.toLowerCase();
|
||||
filtered = commands.filter(c => c.name.toLowerCase().includes(q));
|
||||
selected = Math.max(0, Math.min(selected, filtered.length - 1));
|
||||
}
|
||||
|
||||
function render() {
|
||||
list.innerHTML = filtered.map((c, i) => `
|
||||
<a class="flex justify-between items-center px-3 py-2 rounded-r cursor-pointer hover:bg-base-200 border-l-4 ${i === selected ? 'bg-base-300' : ''}" style="border-left-color: ${colors[c.type] || '#666'}" data-idx="${i}">
|
||||
<span><span class="opacity-50 text-xs mr-2">${c.type}</span>${c.name}</span>
|
||||
<span class="opacity-40 text-xs">${c.desc}</span>
|
||||
</a>
|
||||
`).join('') || '<div class="opacity-50 p-2">No matches</div>';
|
||||
}
|
||||
|
||||
function open() {
|
||||
buildCommands();
|
||||
selected = 0;
|
||||
input.value = '';
|
||||
filter();
|
||||
render();
|
||||
dialog.showModal();
|
||||
input.focus();
|
||||
}
|
||||
|
||||
function exec() {
|
||||
if (filtered[selected]) {
|
||||
dialog.close();
|
||||
filtered[selected].action();
|
||||
}
|
||||
}
|
||||
|
||||
// Keyboard: Cmd+K to open
|
||||
document.addEventListener('keydown', e => {
|
||||
if ((e.metaKey || e.ctrlKey) && e.key === 'k') {
|
||||
e.preventDefault();
|
||||
open();
|
||||
}
|
||||
});
|
||||
|
||||
// Input filtering
|
||||
input.addEventListener('input', () => { filter(); render(); });
|
||||
|
||||
// Keyboard nav inside palette
|
||||
dialog.addEventListener('keydown', e => {
|
||||
if (!dialog.open) return;
|
||||
if (e.key === 'ArrowDown') { e.preventDefault(); selected = Math.min(selected + 1, filtered.length - 1); render(); }
|
||||
else if (e.key === 'ArrowUp') { e.preventDefault(); selected = Math.max(selected - 1, 0); render(); }
|
||||
else if (e.key === 'Enter') { e.preventDefault(); exec(); }
|
||||
});
|
||||
|
||||
// Click to execute
|
||||
list.addEventListener('click', e => {
|
||||
const a = e.target.closest('a[data-idx]');
|
||||
if (a) {
|
||||
selected = parseInt(a.dataset.idx, 10);
|
||||
exec();
|
||||
}
|
||||
});
|
||||
|
||||
// FAB click to open
|
||||
if (fab) fab.addEventListener('click', open);
|
||||
})();
|
||||
114
src/compose_farm/web/streaming.py
Normal file
114
src/compose_farm/web/streaming.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""Streaming executor adapter for web UI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
# ANSI escape codes for terminal output
|
||||
RED = "\x1b[31m"
|
||||
GREEN = "\x1b[32m"
|
||||
DIM = "\x1b[2m"
|
||||
RESET = "\x1b[0m"
|
||||
CRLF = "\r\n"
|
||||
|
||||
|
||||
def _get_ssh_auth_sock() -> str | None:
|
||||
"""Get SSH_AUTH_SOCK, auto-detecting forwarded agent if needed."""
|
||||
sock = os.environ.get("SSH_AUTH_SOCK")
|
||||
if sock and Path(sock).is_socket():
|
||||
return sock
|
||||
|
||||
# Try to find a forwarded SSH agent socket
|
||||
agent_dir = Path.home() / ".ssh" / "agent"
|
||||
if agent_dir.is_dir():
|
||||
sockets = sorted(
|
||||
agent_dir.glob("s.*.sshd.*"), key=lambda p: p.stat().st_mtime, reverse=True
|
||||
)
|
||||
for s in sockets:
|
||||
if s.is_socket():
|
||||
return str(s)
|
||||
return None
|
||||
|
||||
|
||||
# In-memory task registry
|
||||
tasks: dict[str, dict[str, Any]] = {}
|
||||
|
||||
|
||||
async def stream_to_task(task_id: str, message: str) -> None:
|
||||
"""Send a message to a task's output buffer."""
|
||||
if task_id in tasks:
|
||||
tasks[task_id]["output"].append(message)
|
||||
|
||||
|
||||
async def run_cli_streaming(
|
||||
config: Config,
|
||||
args: list[str],
|
||||
task_id: str,
|
||||
) -> None:
|
||||
"""Run a cf CLI command as subprocess and stream output to task buffer.
|
||||
|
||||
This reuses all CLI logic including Rich formatting, progress bars, etc.
|
||||
The subprocess gets a pseudo-TTY via FORCE_COLOR so Rich outputs ANSI codes.
|
||||
"""
|
||||
try:
|
||||
# Build command - config option goes after the subcommand
|
||||
cmd = ["cf", *args, f"--config={config.config_path}"]
|
||||
|
||||
# Show command being executed
|
||||
cmd_display = " ".join(["cf", *args])
|
||||
await stream_to_task(task_id, f"{DIM}$ {cmd_display}{RESET}{CRLF}")
|
||||
|
||||
# Force color output even though there's no real TTY
|
||||
# Set COLUMNS for Rich/Typer to format output correctly
|
||||
env = {"FORCE_COLOR": "1", "TERM": "xterm-256color", "COLUMNS": "120"}
|
||||
|
||||
# Ensure SSH agent is available (auto-detect if needed)
|
||||
ssh_sock = _get_ssh_auth_sock()
|
||||
if ssh_sock:
|
||||
env["SSH_AUTH_SOCK"] = ssh_sock
|
||||
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
env={**os.environ, **env},
|
||||
)
|
||||
|
||||
# Stream output line by line
|
||||
if process.stdout:
|
||||
async for line in process.stdout:
|
||||
text = line.decode("utf-8", errors="replace")
|
||||
# Convert \n to \r\n for xterm.js
|
||||
if text.endswith("\n") and not text.endswith("\r\n"):
|
||||
text = text[:-1] + "\r\n"
|
||||
await stream_to_task(task_id, text)
|
||||
|
||||
exit_code = await process.wait()
|
||||
tasks[task_id]["status"] = "completed" if exit_code == 0 else "failed"
|
||||
|
||||
except Exception as e:
|
||||
await stream_to_task(task_id, f"{RED}Error: {e}{RESET}{CRLF}")
|
||||
tasks[task_id]["status"] = "failed"
|
||||
|
||||
|
||||
async def run_compose_streaming(
|
||||
config: Config,
|
||||
service: str,
|
||||
command: str,
|
||||
task_id: str,
|
||||
) -> None:
|
||||
"""Run a compose command (up/down/pull/restart) via CLI subprocess."""
|
||||
# Split command into args (e.g., "up -d" -> ["up", "-d"])
|
||||
args = command.split()
|
||||
cli_cmd = args[0] # up, down, pull, restart
|
||||
extra_args = args[1:] # -d, etc.
|
||||
|
||||
# Build CLI args
|
||||
cli_args = [cli_cmd, service, *extra_args]
|
||||
await run_cli_streaming(config, cli_args, task_id)
|
||||
67
src/compose_farm/web/templates/base.html
Normal file
67
src/compose_farm/web/templates/base.html
Normal file
@@ -0,0 +1,67 @@
|
||||
{% from "partials/icons.html" import github, hamburger %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" data-theme="dark">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>{% block title %}Compose Farm{% endblock %}</title>
|
||||
|
||||
<!-- daisyUI + Tailwind -->
|
||||
<link href="https://cdn.jsdelivr.net/npm/daisyui@5" data-vendor="daisyui.css" rel="stylesheet" type="text/css" />
|
||||
<script src="https://cdn.jsdelivr.net/npm/@tailwindcss/browser@4" data-vendor="tailwind.js"></script>
|
||||
|
||||
<!-- xterm.js -->
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/css/xterm.css" data-vendor="xterm.css">
|
||||
|
||||
<!-- Custom styles -->
|
||||
<link rel="stylesheet" href="/static/app.css">
|
||||
</head>
|
||||
<body class="min-h-screen bg-base-200">
|
||||
<div class="drawer lg:drawer-open">
|
||||
<input id="drawer-toggle" type="checkbox" class="drawer-toggle" />
|
||||
|
||||
<!-- Main content -->
|
||||
<div class="drawer-content flex flex-col">
|
||||
<!-- Mobile navbar with hamburger -->
|
||||
<header class="navbar bg-base-100 border-b border-base-300 lg:hidden">
|
||||
<label for="drawer-toggle" class="btn btn-ghost btn-square">
|
||||
{{ hamburger() }}
|
||||
</label>
|
||||
<span class="font-semibold rainbow-hover">Compose Farm</span>
|
||||
</header>
|
||||
|
||||
<main id="main-content" class="flex-1 p-6 overflow-y-auto" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
|
||||
<!-- Sidebar -->
|
||||
<div class="drawer-side">
|
||||
<label for="drawer-toggle" class="drawer-overlay" aria-label="close sidebar"></label>
|
||||
<aside id="sidebar" class="w-64 bg-base-100 border-r border-base-300 flex flex-col min-h-screen">
|
||||
<header class="p-4 border-b border-base-300">
|
||||
<h2 class="text-lg font-semibold flex items-center gap-2">
|
||||
<span class="rainbow-hover">Compose Farm</span>
|
||||
<a href="https://github.com/basnijholt/compose-farm" target="_blank" title="GitHub" class="opacity-50 hover:opacity-100 transition-opacity">
|
||||
{{ github() }}
|
||||
</a>
|
||||
</h2>
|
||||
</header>
|
||||
<nav class="flex-1 overflow-y-auto p-2" hx-get="/partials/sidebar" hx-trigger="load" hx-swap="innerHTML">
|
||||
<span class="loading loading-spinner loading-sm"></span> Loading...
|
||||
</nav>
|
||||
</aside>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Command Palette -->
|
||||
{% include "partials/command_palette.html" %}
|
||||
|
||||
<!-- Scripts - HTMX first -->
|
||||
<script src="https://unpkg.com/htmx.org@2.0.4" data-vendor="htmx.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.js" data-vendor="xterm.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.js" data-vendor="xterm-fit.js"></script>
|
||||
<script src="/static/app.js"></script>
|
||||
{% block scripts %}{% endblock %}
|
||||
</body>
|
||||
</html>
|
||||
73
src/compose_farm/web/templates/index.html
Normal file
73
src/compose_farm/web/templates/index.html
Normal file
@@ -0,0 +1,73 @@
|
||||
{% extends "base.html" %}
|
||||
{% from "partials/components.html" import page_header, collapse, stat_card, table, action_btn %}
|
||||
{% from "partials/icons.html" import check, refresh_cw, save, settings, server, database %}
|
||||
{% block title %}Dashboard - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="max-w-5xl">
|
||||
{{ page_header("Compose Farm", "Cluster overview and management") }}
|
||||
|
||||
<!-- Stats Cards -->
|
||||
{% include "partials/stats.html" %}
|
||||
|
||||
<!-- Global Actions -->
|
||||
<div class="flex flex-wrap gap-2 mb-6">
|
||||
{{ action_btn("Apply", "/api/apply", "primary", "Make reality match config", check()) }}
|
||||
{{ action_btn("Refresh", "/api/refresh", "outline", "Update state from reality", refresh_cw()) }}
|
||||
<button id="save-config-btn" class="btn btn-outline">{{ save() }} Save Config</button>
|
||||
</div>
|
||||
|
||||
{% include "partials/terminal.html" %}
|
||||
|
||||
<!-- Config Error Banner -->
|
||||
<div id="config-error">
|
||||
{% if config_error %}
|
||||
{% include "partials/config_error.html" %}
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- Config Editor -->
|
||||
{% call collapse("Edit Config", badge="compose-farm.yaml", icon=settings(), checked=config_error) %}
|
||||
<div class="editor-wrapper yaml-wrapper">
|
||||
<div id="config-editor" class="yaml-editor" data-content="{{ config_content | e }}" data-save-url="/api/config"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
<!-- Pending Operations -->
|
||||
{% include "partials/pending.html" %}
|
||||
|
||||
<!-- Services by Host -->
|
||||
{% include "partials/services_by_host.html" %}
|
||||
|
||||
<!-- Hosts Configuration -->
|
||||
{% call collapse("Hosts (" ~ (hosts | length) ~ ")", icon=server()) %}
|
||||
{% call table() %}
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Address</th>
|
||||
<th>User</th>
|
||||
<th>Port</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for name, host in hosts.items() %}
|
||||
<tr class="hover:bg-base-300">
|
||||
<td class="font-semibold">{{ name }}</td>
|
||||
<td><code class="text-sm">{{ host.address }}</code></td>
|
||||
<td><code class="text-sm">{{ host.user }}</code></td>
|
||||
<td><code class="text-sm">{{ host.port }}</code></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
{% endcall %}
|
||||
{% endcall %}
|
||||
|
||||
<!-- State Viewer -->
|
||||
{% call collapse("Raw State", badge="compose-farm-state.yaml", icon=database()) %}
|
||||
<div class="editor-wrapper viewer-wrapper">
|
||||
<div id="state-viewer" class="yaml-viewer" data-content="{{ state_content | e }}"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
</div>
|
||||
{% endblock %}
|
||||
19
src/compose_farm/web/templates/partials/command_palette.html
Normal file
19
src/compose_farm/web/templates/partials/command_palette.html
Normal file
@@ -0,0 +1,19 @@
|
||||
{% from "partials/icons.html" import search, command %}
|
||||
<dialog id="cmd-palette" class="modal">
|
||||
<div class="modal-box max-w-lg p-0">
|
||||
<label class="input input-lg bg-base-100 border-0 border-b border-base-300 w-full rounded-none rounded-t-box sticky top-0 z-10 focus-within:outline-none">
|
||||
{{ search(20) }}
|
||||
<input type="text" id="cmd-input" class="grow" placeholder="Type a command..." autocomplete="off" />
|
||||
<kbd class="kbd kbd-sm opacity-50">esc</kbd>
|
||||
</label>
|
||||
<div id="cmd-list" class="flex flex-col p-2 max-h-80 overflow-y-auto">
|
||||
<!-- Populated by JS -->
|
||||
</div>
|
||||
</div>
|
||||
<form method="dialog" class="modal-backdrop"><button>close</button></form>
|
||||
</dialog>
|
||||
|
||||
<!-- Floating button to open command palette -->
|
||||
<button id="cmd-fab" class="btn btn-circle glass shadow-lg fixed bottom-6 right-6 z-50 hover:ring hover:ring-base-content/50" title="Command Palette (⌘K)">
|
||||
{{ command(24) }}
|
||||
</button>
|
||||
52
src/compose_farm/web/templates/partials/components.html
Normal file
52
src/compose_farm/web/templates/partials/components.html
Normal file
@@ -0,0 +1,52 @@
|
||||
{# Page header with title and optional subtitle (supports HTML) #}
|
||||
{% macro page_header(title, subtitle=None) %}
|
||||
<div class="mb-6">
|
||||
<h1 class="text-3xl font-bold rainbow-hover">{{ title }}</h1>
|
||||
{% if subtitle %}
|
||||
<p class="text-base-content/60 text-lg">{{ subtitle | safe }}</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{# Collapsible section #}
|
||||
{% macro collapse(title, id=None, checked=False, badge=None, icon=None) %}
|
||||
<div class="collapse collapse-arrow bg-base-100 shadow mb-4">
|
||||
<input type="checkbox" {% if id %}id="{{ id }}"{% endif %} {% if checked %}checked{% endif %} />
|
||||
<div class="collapse-title font-medium flex items-center gap-2">
|
||||
{% if icon %}{{ icon }}{% endif %}{{ title }}
|
||||
{% if badge %}<code class="text-xs ml-2 opacity-60">{{ badge }}</code>{% endif %}
|
||||
</div>
|
||||
<div class="collapse-content">
|
||||
{{ caller() }}
|
||||
</div>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{# Action button with htmx #}
|
||||
{% macro action_btn(label, url, style="outline", title=None, icon=None) %}
|
||||
<button hx-post="{{ url }}"
|
||||
hx-swap="none"
|
||||
class="btn btn-{{ style }}"
|
||||
{% if title %}title="{{ title }}"{% endif %}>
|
||||
{% if icon %}{{ icon }}{% endif %}{{ label }}
|
||||
</button>
|
||||
{% endmacro %}
|
||||
|
||||
{# Stat card for dashboard #}
|
||||
{% macro stat_card(label, value, color=None, icon=None) %}
|
||||
<div class="card bg-base-100 shadow">
|
||||
<div class="card-body items-center text-center">
|
||||
<h2 class="card-title text-base-content/60 text-sm gap-1">{% if icon %}{{ icon }}{% endif %}{{ label }}</h2>
|
||||
<p class="text-4xl font-bold {% if color %}text-{{ color }}{% endif %}">{{ value }}</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{# Data table wrapper #}
|
||||
{% macro table() %}
|
||||
<div class="overflow-x-auto">
|
||||
<table class="table table-zebra">
|
||||
{{ caller() }}
|
||||
</table>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
@@ -0,0 +1,8 @@
|
||||
{% from "partials/icons.html" import alert_triangle %}
|
||||
<div class="alert alert-error mb-4">
|
||||
{{ alert_triangle(size=20) }}
|
||||
<div>
|
||||
<h3 class="font-bold">Configuration Error</h3>
|
||||
<div class="text-sm">{{ config_error }}</div>
|
||||
</div>
|
||||
</div>
|
||||
27
src/compose_farm/web/templates/partials/containers.html
Normal file
27
src/compose_farm/web/templates/partials/containers.html
Normal file
@@ -0,0 +1,27 @@
|
||||
{# Container list for a service on a single host #}
|
||||
{% from "partials/icons.html" import terminal %}
|
||||
{% macro container_row(service, container, host) %}
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
{% if container.State == "running" %}
|
||||
<span class="badge badge-success">running</span>
|
||||
{% elif container.State == "unknown" %}
|
||||
<span class="badge badge-ghost"><span class="loading loading-spinner loading-xs"></span></span>
|
||||
{% else %}
|
||||
<span class="badge badge-warning">{{ container.State }}</span>
|
||||
{% endif %}
|
||||
<code class="text-sm flex-1">{{ container.Name }}</code>
|
||||
<button class="btn btn-sm btn-outline"
|
||||
onclick="initExecTerminal('{{ service }}', '{{ container.Name }}', '{{ host }}')">
|
||||
{{ terminal() }} Shell
|
||||
</button>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro host_containers(service, host, containers, show_header=False) %}
|
||||
{% if show_header %}
|
||||
<div class="font-semibold text-sm mt-3 mb-1">{{ host }}</div>
|
||||
{% endif %}
|
||||
{% for container in containers %}
|
||||
{{ container_row(service, container, host) }}
|
||||
{% endfor %}
|
||||
{% endmacro %}
|
||||
148
src/compose_farm/web/templates/partials/icons.html
Normal file
148
src/compose_farm/web/templates/partials/icons.html
Normal file
@@ -0,0 +1,148 @@
|
||||
{# Lucide-style icons (https://lucide.dev) - 24x24 viewBox, 2px stroke, round caps #}
|
||||
|
||||
{# Brand icons #}
|
||||
{% macro github(size=16) %}
|
||||
<svg height="{{ size }}" width="{{ size }}" viewBox="0 0 16 16" fill="currentColor"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"/></svg>
|
||||
{% endmacro %}
|
||||
|
||||
{# UI icons #}
|
||||
{% macro hamburger(size=20) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<line x1="4" x2="20" y1="6" y2="6"/><line x1="4" x2="20" y1="12" y2="12"/><line x1="4" x2="20" y1="18" y2="18"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro command(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M15 6v12a3 3 0 1 0 3-3H6a3 3 0 1 0 3 3V6a3 3 0 1 0-3 3h12a3 3 0 1 0-3-3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{# Action icons #}
|
||||
{% macro play(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<polygon points="6 3 20 12 6 21 6 3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro square(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect width="14" height="14" x="5" y="5" rx="2"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro rotate_cw(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M21 12a9 9 0 1 1-9-9c2.52 0 4.93 1 6.74 2.74L21 8"/><path d="M21 3v5h-5"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro download(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"/><polyline points="7 10 12 15 17 10"/><line x1="12" x2="12" y1="15" y2="3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro cloud_download(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M12 13v8l-4-4"/><path d="m12 21 4-4"/><path d="M4.393 15.269A7 7 0 1 1 15.71 8h1.79a4.5 4.5 0 0 1 2.436 8.284"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro file_text(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M15 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V7Z"/><path d="M14 2v4a2 2 0 0 0 2 2h4"/><path d="M10 9H8"/><path d="M16 13H8"/><path d="M16 17H8"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro save(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M15.2 3a2 2 0 0 1 1.4.6l3.8 3.8a2 2 0 0 1 .6 1.4V19a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V5a2 2 0 0 1 2-2z"/><path d="M17 21v-7a1 1 0 0 0-1-1H8a1 1 0 0 0-1 1v7"/><path d="M7 3v4a1 1 0 0 0 1 1h7"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro check(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M20 6 9 17l-5-5"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro refresh_cw(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M3 12a9 9 0 0 1 9-9 9.75 9.75 0 0 1 6.74 2.74L21 8"/><path d="M21 3v5h-5"/><path d="M21 12a9 9 0 0 1-9 9 9.75 9.75 0 0 1-6.74-2.74L3 16"/><path d="M8 16H3v5"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro terminal(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<polyline points="4 17 10 11 4 5"/><line x1="12" x2="20" y1="19" y2="19"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{# Stats/navigation icons #}
|
||||
{% macro server(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect width="20" height="8" x="2" y="2" rx="2" ry="2"/><rect width="20" height="8" x="2" y="14" rx="2" ry="2"/><line x1="6" x2="6.01" y1="6" y2="6"/><line x1="6" x2="6.01" y1="18" y2="18"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro layers(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="m12.83 2.18a2 2 0 0 0-1.66 0L2.6 6.08a1 1 0 0 0 0 1.83l8.58 3.91a2 2 0 0 0 1.66 0l8.58-3.9a1 1 0 0 0 0-1.83Z"/><path d="m22 17.65-9.17 4.16a2 2 0 0 1-1.66 0L2 17.65"/><path d="m22 12.65-9.17 4.16a2 2 0 0 1-1.66 0L2 12.65"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro circle_check(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="12" cy="12" r="10"/><path d="m9 12 2 2 4-4"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro circle_x(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="12" cy="12" r="10"/><path d="m15 9-6 6"/><path d="m9 9 6 6"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro home(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M15 21v-8a1 1 0 0 0-1-1h-4a1 1 0 0 0-1 1v8"/><path d="M3 10a2 2 0 0 1 .709-1.528l7-5.999a2 2 0 0 1 2.582 0l7 5.999A2 2 0 0 1 21 10v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2z"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro box(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M21 8a2 2 0 0 0-1-1.73l-7-4a2 2 0 0 0-2 0l-7 4A2 2 0 0 0 3 8v8a2 2 0 0 0 1 1.73l7 4a2 2 0 0 0 2 0l7-4A2 2 0 0 0 21 16Z"/><path d="m3.3 7 8.7 5 8.7-5"/><path d="M12 22V12"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{# Section icons #}
|
||||
{% macro settings(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M12.22 2h-.44a2 2 0 0 0-2 2v.18a2 2 0 0 1-1 1.73l-.43.25a2 2 0 0 1-2 0l-.15-.08a2 2 0 0 0-2.73.73l-.22.38a2 2 0 0 0 .73 2.73l.15.1a2 2 0 0 1 1 1.72v.51a2 2 0 0 1-1 1.74l-.15.09a2 2 0 0 0-.73 2.73l.22.38a2 2 0 0 0 2.73.73l.15-.08a2 2 0 0 1 2 0l.43.25a2 2 0 0 1 1 1.73V20a2 2 0 0 0 2 2h.44a2 2 0 0 0 2-2v-.18a2 2 0 0 1 1-1.73l.43-.25a2 2 0 0 1 2 0l.15.08a2 2 0 0 0 2.73-.73l.22-.39a2 2 0 0 0-.73-2.73l-.15-.08a2 2 0 0 1-1-1.74v-.5a2 2 0 0 1 1-1.74l.15-.09a2 2 0 0 0 .73-2.73l-.22-.38a2 2 0 0 0-2.73-.73l-.15.08a2 2 0 0 1-2 0l-.43-.25a2 2 0 0 1-1-1.73V4a2 2 0 0 0-2-2z"/><circle cx="12" cy="12" r="3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro file_code(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M10 12.5 8 15l2 2.5"/><path d="m14 12.5 2 2.5-2 2.5"/><path d="M14 2v4a2 2 0 0 0 2 2h4"/><path d="M15 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V7z"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro database(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<ellipse cx="12" cy="5" rx="9" ry="3"/><path d="M3 5V19A9 3 0 0 0 21 19V5"/><path d="M3 12A9 3 0 0 0 21 12"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro search(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="11" cy="11" r="8"/><path d="m21 21-4.3-4.3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro alert_triangle(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="m21.73 18-8-14a2 2 0 0 0-3.48 0l-8 14A2 2 0 0 0 4 21h16a2 2 0 0 0 1.73-3"/><path d="M12 9v4"/><path d="M12 17h.01"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
38
src/compose_farm/web/templates/partials/pending.html
Normal file
38
src/compose_farm/web/templates/partials/pending.html
Normal file
@@ -0,0 +1,38 @@
|
||||
{% from "partials/components.html" import collapse %}
|
||||
<div id="pending-operations">
|
||||
{% if orphaned or migrations or not_started %}
|
||||
{% call collapse("Pending Operations", id="pending-collapse", checked=expanded|default(true)) %}
|
||||
{% if orphaned %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Orphaned Services (will be stopped)</h4>
|
||||
<ul class="list-disc list-inside mb-4">
|
||||
{% for svc, host in orphaned.items() %}
|
||||
<li><a href="/service/{{ svc }}" class="badge badge-warning hover:badge-primary">{{ svc }}</a> on {{ host }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
|
||||
{% if migrations %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Services Needing Migration</h4>
|
||||
<ul class="list-disc list-inside mb-4">
|
||||
{% for svc in migrations %}
|
||||
<li><a href="/service/{{ svc }}" class="badge badge-info hover:badge-primary">{{ svc }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
|
||||
{% if not_started %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Services Not Started</h4>
|
||||
<ul class="menu menu-horizontal bg-base-200 rounded-box mb-2">
|
||||
{% for svc in not_started | sort %}
|
||||
<li><a href="/service/{{ svc }}">{{ svc }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endcall %}
|
||||
{% else %}
|
||||
<div role="alert" class="alert alert-success mb-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="stroke-current shrink-0 h-6 w-6" fill="none" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z" /></svg>
|
||||
<span>All services are in sync with configuration.</span>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
@@ -0,0 +1,41 @@
|
||||
{% from "partials/components.html" import collapse %}
|
||||
{% from "partials/icons.html" import layers, search %}
|
||||
<div id="services-by-host">
|
||||
{% call collapse("Services by Host", id="services-by-host-collapse", checked=expanded|default(true), icon=layers()) %}
|
||||
<div class="flex flex-wrap gap-2 mb-4 items-center">
|
||||
<label class="input input-sm input-bordered flex items-center gap-2 bg-base-200">
|
||||
{{ search() }}<input type="text" id="sbh-filter" class="w-32" placeholder="Filter..." onkeyup="sbhFilter()" />
|
||||
</label>
|
||||
<select id="sbh-host-select" class="select select-sm select-bordered bg-base-200" onchange="sbhFilter()">
|
||||
<option value="">All hosts</option>
|
||||
{% for h in services_by_host.keys() | sort %}<option value="{{ h }}">{{ h }}</option>{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
{% for host_name, host_services in services_by_host.items() | sort %}
|
||||
<div class="sbh-group" data-h="{{ host_name }}">
|
||||
<h4 class="font-semibold mt-3 mb-1">{{ host_name }}{% if host_name in hosts %}<code class="text-xs ml-2 opacity-60">{{ hosts[host_name].address }}</code>{% endif %}</h4>
|
||||
<ul class="menu menu-horizontal bg-base-200 rounded-box mb-2 flex-wrap">
|
||||
{% for svc in host_services | sort %}<li data-s="{{ svc | lower }}"><a href="/service/{{ svc }}">{{ svc }}</a></li>{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{% else %}
|
||||
<p class="text-base-content/60 italic">No services currently running.</p>
|
||||
{% endfor %}
|
||||
<script>
|
||||
function sbhFilter() {
|
||||
const q = (document.getElementById('sbh-filter')?.value || '').toLowerCase();
|
||||
const h = document.getElementById('sbh-host-select')?.value || '';
|
||||
document.querySelectorAll('.sbh-group').forEach(g => {
|
||||
if (h && g.dataset.h !== h) { g.hidden = true; return; }
|
||||
let n = 0;
|
||||
g.querySelectorAll('li[data-s]').forEach(li => {
|
||||
const show = !q || li.dataset.s.includes(q);
|
||||
li.hidden = !show;
|
||||
if (show) n++;
|
||||
});
|
||||
g.hidden = !n;
|
||||
});
|
||||
}
|
||||
</script>
|
||||
{% endcall %}
|
||||
</div>
|
||||
45
src/compose_farm/web/templates/partials/sidebar.html
Normal file
45
src/compose_farm/web/templates/partials/sidebar.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% from "partials/icons.html" import home, search %}
|
||||
<!-- Dashboard Link -->
|
||||
<div class="mb-4">
|
||||
<ul class="menu" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
<li><a href="/" class="font-semibold">{{ home() }} Dashboard</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<!-- Services Section -->
|
||||
<div class="mb-4">
|
||||
<h4 class="text-xs uppercase tracking-wide text-base-content/60 px-3 py-1">Services <span class="opacity-50" id="sidebar-count">({{ services | length }})</span></h4>
|
||||
<div class="px-2 mb-2 flex flex-col gap-1">
|
||||
<label class="input input-xs input-bordered flex items-center gap-2 bg-base-200">
|
||||
{{ search(14) }}<input type="text" id="sidebar-filter" placeholder="Filter..." onkeyup="sidebarFilter()" />
|
||||
</label>
|
||||
<select id="sidebar-host-select" class="select select-xs select-bordered bg-base-200 w-full" onchange="sidebarFilter()">
|
||||
<option value="">All hosts</option>
|
||||
{% for h in hosts %}<option value="{{ h }}">{{ h }}</option>{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
<ul class="menu menu-sm" id="sidebar-services" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
{% for service in services %}
|
||||
<li data-svc="{{ service | lower }}" data-h="{{ service_hosts.get(service, '') }}">
|
||||
<a href="/service/{{ service }}" class="flex items-center gap-2">
|
||||
{% if service in state %}<span class="status status-success" title="In state file"></span>
|
||||
{% else %}<span class="status status-neutral" title="Not in state file"></span>{% endif %}
|
||||
{{ service }}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
<script>
|
||||
function sidebarFilter() {
|
||||
const q = (document.getElementById('sidebar-filter')?.value || '').toLowerCase();
|
||||
const h = document.getElementById('sidebar-host-select')?.value || '';
|
||||
let n = 0;
|
||||
document.querySelectorAll('#sidebar-services li').forEach(li => {
|
||||
const show = (!q || li.dataset.svc.includes(q)) && (!h || !li.dataset.h || li.dataset.h === h);
|
||||
li.hidden = !show;
|
||||
if (show) n++;
|
||||
});
|
||||
document.getElementById('sidebar-count').textContent = '(' + n + ')';
|
||||
}
|
||||
</script>
|
||||
8
src/compose_farm/web/templates/partials/stats.html
Normal file
8
src/compose_farm/web/templates/partials/stats.html
Normal file
@@ -0,0 +1,8 @@
|
||||
{% from "partials/components.html" import stat_card %}
|
||||
{% from "partials/icons.html" import server, layers, circle_check, circle_x %}
|
||||
<div id="stats-cards" class="grid grid-cols-2 md:grid-cols-4 gap-4 mb-6">
|
||||
{{ stat_card("Hosts", hosts | length, icon=server()) }}
|
||||
{{ stat_card("Services", services | length, icon=layers()) }}
|
||||
{{ stat_card("Running", running_count, "success", circle_check()) }}
|
||||
{{ stat_card("Stopped", stopped_count, icon=circle_x()) }}
|
||||
</div>
|
||||
14
src/compose_farm/web/templates/partials/terminal.html
Normal file
14
src/compose_farm/web/templates/partials/terminal.html
Normal file
@@ -0,0 +1,14 @@
|
||||
{% from "partials/icons.html" import terminal %}
|
||||
<!-- Shared Terminal Component -->
|
||||
<div class="collapse collapse-arrow bg-base-100 shadow mb-4" id="terminal-collapse">
|
||||
<input type="checkbox" id="terminal-toggle" />
|
||||
<div class="collapse-title font-medium flex items-center gap-2">
|
||||
{{ terminal() }} Terminal Output
|
||||
<span id="terminal-spinner" class="loading loading-spinner loading-sm hidden"></span>
|
||||
</div>
|
||||
<div class="collapse-content">
|
||||
<div id="terminal-container" class="bg-[#1a1a2e] rounded-lg h-[300px] border border-white/10 resize-y overflow-hidden">
|
||||
<div id="terminal-output" class="h-full"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
67
src/compose_farm/web/templates/service.html
Normal file
67
src/compose_farm/web/templates/service.html
Normal file
@@ -0,0 +1,67 @@
|
||||
{% extends "base.html" %}
|
||||
{% from "partials/components.html" import collapse, action_btn %}
|
||||
{% from "partials/icons.html" import play, square, rotate_cw, download, cloud_download, file_text, save, file_code, terminal, settings %}
|
||||
{% block title %}{{ name }} - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="max-w-5xl">
|
||||
<div class="mb-6">
|
||||
<h1 class="text-3xl font-bold rainbow-hover">{{ name }}</h1>
|
||||
<div class="flex flex-wrap items-center gap-2 mt-2">
|
||||
{% if current_host %}
|
||||
<span class="badge badge-success">Running on {{ current_host }}</span>
|
||||
{% else %}
|
||||
<span class="badge badge-neutral">Not running</span>
|
||||
{% endif %}
|
||||
<span class="badge badge-outline">{{ hosts | join(', ') }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Action Buttons -->
|
||||
<div class="flex flex-wrap gap-2 mb-6">
|
||||
<!-- Lifecycle -->
|
||||
{{ action_btn("Up", "/api/service/" ~ name ~ "/up", "primary", "Start service (docker compose up -d)", play()) }}
|
||||
{{ action_btn("Down", "/api/service/" ~ name ~ "/down", "outline", "Stop service (docker compose down)", square()) }}
|
||||
{{ action_btn("Restart", "/api/service/" ~ name ~ "/restart", "secondary", "Restart service (down + up)", rotate_cw()) }}
|
||||
{{ action_btn("Update", "/api/service/" ~ name ~ "/update", "accent", "Update to latest (pull + build + down + up)", download()) }}
|
||||
|
||||
<div class="divider divider-horizontal mx-0"></div>
|
||||
|
||||
<!-- Other -->
|
||||
{{ action_btn("Pull", "/api/service/" ~ name ~ "/pull", "outline", "Pull latest images (no restart)", cloud_download()) }}
|
||||
{{ action_btn("Logs", "/api/service/" ~ name ~ "/logs", "outline", "Show recent logs", file_text()) }}
|
||||
<button id="save-btn" class="btn btn-outline">{{ save() }} Save All</button>
|
||||
</div>
|
||||
|
||||
{% call collapse("Compose File", badge=compose_path, icon=file_code()) %}
|
||||
<div class="editor-wrapper yaml-wrapper">
|
||||
<div id="compose-editor" class="yaml-editor" data-content="{{ compose_content | e }}" data-save-url="/api/service/{{ name }}/compose"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
{% call collapse(".env File", badge=env_path, icon=settings()) %}
|
||||
<div class="editor-wrapper env-wrapper">
|
||||
<div id="env-editor" class="env-editor" data-content="{{ env_content | e }}" data-save-url="/api/service/{{ name }}/env"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
{% include "partials/terminal.html" %}
|
||||
|
||||
<!-- Exec Terminal -->
|
||||
{% if current_host %}
|
||||
{% call collapse("Container Shell", id="exec-collapse", checked=True, icon=terminal()) %}
|
||||
<div id="containers-list" class="mb-4"
|
||||
hx-get="/api/service/{{ name }}/containers"
|
||||
hx-trigger="load"
|
||||
hx-target="this"
|
||||
hx-select="unset"
|
||||
hx-swap="innerHTML">
|
||||
<span class="loading loading-spinner loading-sm"></span> Loading containers...
|
||||
</div>
|
||||
<div id="exec-terminal-container" class="bg-[#1a1a2e] rounded-lg h-[400px] border border-white/10 hidden">
|
||||
<div id="exec-terminal" class="h-full"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endblock %}
|
||||
236
src/compose_farm/web/ws.py
Normal file
236
src/compose_farm/web/ws.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""WebSocket handler for terminal streaming."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import fcntl
|
||||
import json
|
||||
import os
|
||||
import pty
|
||||
import signal
|
||||
import struct
|
||||
import termios
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import asyncssh
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
|
||||
from compose_farm.executor import is_local
|
||||
from compose_farm.web.deps import get_config
|
||||
from compose_farm.web.streaming import CRLF, DIM, GREEN, RED, RESET, tasks
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Host
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def _parse_resize(msg: str) -> tuple[int, int] | None:
|
||||
"""Parse a resize message, return (cols, rows) or None if not a resize."""
|
||||
try:
|
||||
data = json.loads(msg)
|
||||
if data.get("type") == "resize":
|
||||
return int(data["cols"]), int(data["rows"])
|
||||
except (json.JSONDecodeError, KeyError, TypeError, ValueError):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _resize_pty(
|
||||
fd: int, cols: int, rows: int, proc: asyncio.subprocess.Process | None = None
|
||||
) -> None:
|
||||
"""Resize a local PTY and send SIGWINCH to the process."""
|
||||
winsize = struct.pack("HHHH", rows, cols, 0, 0)
|
||||
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
|
||||
# Explicitly send SIGWINCH so docker exec forwards it to the container
|
||||
if proc and proc.pid:
|
||||
os.kill(proc.pid, signal.SIGWINCH)
|
||||
|
||||
|
||||
async def _bridge_websocket_to_fd(
|
||||
websocket: WebSocket,
|
||||
master_fd: int,
|
||||
proc: asyncio.subprocess.Process,
|
||||
) -> None:
|
||||
"""Bridge WebSocket to a local PTY file descriptor."""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
async def read_output() -> None:
|
||||
while proc.returncode is None:
|
||||
try:
|
||||
data = await loop.run_in_executor(None, lambda: os.read(master_fd, 4096))
|
||||
except BlockingIOError:
|
||||
await asyncio.sleep(0.01)
|
||||
continue
|
||||
except OSError:
|
||||
break
|
||||
if not data:
|
||||
break
|
||||
await websocket.send_text(data.decode("utf-8", errors="replace"))
|
||||
|
||||
read_task = asyncio.create_task(read_output())
|
||||
|
||||
try:
|
||||
while proc.returncode is None:
|
||||
try:
|
||||
msg = await asyncio.wait_for(websocket.receive_text(), timeout=0.1)
|
||||
except TimeoutError:
|
||||
continue
|
||||
if size := _parse_resize(msg):
|
||||
_resize_pty(master_fd, *size, proc)
|
||||
else:
|
||||
os.write(master_fd, msg.encode())
|
||||
finally:
|
||||
read_task.cancel()
|
||||
os.close(master_fd)
|
||||
if proc.returncode is None:
|
||||
proc.terminate()
|
||||
|
||||
|
||||
async def _bridge_websocket_to_ssh(
|
||||
websocket: WebSocket,
|
||||
proc: Any, # asyncssh.SSHClientProcess
|
||||
) -> None:
|
||||
"""Bridge WebSocket to an SSH process with PTY."""
|
||||
assert proc.stdout is not None
|
||||
assert proc.stdin is not None
|
||||
|
||||
async def read_stdout() -> None:
|
||||
while proc.returncode is None:
|
||||
data = await proc.stdout.read(4096)
|
||||
if not data:
|
||||
break
|
||||
text = data if isinstance(data, str) else data.decode()
|
||||
await websocket.send_text(text)
|
||||
|
||||
read_task = asyncio.create_task(read_stdout())
|
||||
|
||||
try:
|
||||
while proc.returncode is None:
|
||||
try:
|
||||
msg = await asyncio.wait_for(websocket.receive_text(), timeout=0.1)
|
||||
except TimeoutError:
|
||||
continue
|
||||
if size := _parse_resize(msg):
|
||||
proc.change_terminal_size(*size)
|
||||
else:
|
||||
proc.stdin.write(msg)
|
||||
finally:
|
||||
read_task.cancel()
|
||||
proc.terminate()
|
||||
|
||||
|
||||
async def _run_local_exec(websocket: WebSocket, exec_cmd: str) -> None:
|
||||
"""Run docker exec locally with PTY."""
|
||||
master_fd, slave_fd = pty.openpty()
|
||||
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
exec_cmd,
|
||||
stdin=slave_fd,
|
||||
stdout=slave_fd,
|
||||
stderr=slave_fd,
|
||||
close_fds=True,
|
||||
)
|
||||
os.close(slave_fd)
|
||||
|
||||
# Set non-blocking
|
||||
flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
|
||||
fcntl.fcntl(master_fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
||||
|
||||
await _bridge_websocket_to_fd(websocket, master_fd, proc)
|
||||
|
||||
|
||||
async def _run_remote_exec(websocket: WebSocket, host: Host, exec_cmd: str) -> None:
|
||||
"""Run docker exec on remote host via SSH with PTY."""
|
||||
async with asyncssh.connect(
|
||||
host.address,
|
||||
port=host.port,
|
||||
username=host.user,
|
||||
known_hosts=None,
|
||||
) as conn:
|
||||
proc: asyncssh.SSHClientProcess[Any] = await conn.create_process(
|
||||
exec_cmd,
|
||||
term_type="xterm-256color",
|
||||
term_size=(80, 24),
|
||||
)
|
||||
async with proc:
|
||||
await _bridge_websocket_to_ssh(websocket, proc)
|
||||
|
||||
|
||||
async def _run_exec_session(
|
||||
websocket: WebSocket,
|
||||
container: str,
|
||||
host_name: str,
|
||||
) -> None:
|
||||
"""Run an interactive docker exec session over WebSocket."""
|
||||
config = get_config()
|
||||
host = config.hosts.get(host_name)
|
||||
if not host:
|
||||
await websocket.send_text(f"{RED}Host '{host_name}' not found{RESET}{CRLF}")
|
||||
return
|
||||
|
||||
exec_cmd = f"docker exec -it {container} /bin/sh -c 'command -v bash >/dev/null && exec bash || exec sh'"
|
||||
|
||||
if is_local(host):
|
||||
await _run_local_exec(websocket, exec_cmd)
|
||||
else:
|
||||
await _run_remote_exec(websocket, host, exec_cmd)
|
||||
|
||||
|
||||
@router.websocket("/ws/exec/{service}/{container}/{host}")
|
||||
async def exec_websocket(
|
||||
websocket: WebSocket,
|
||||
service: str, # noqa: ARG001
|
||||
container: str,
|
||||
host: str,
|
||||
) -> None:
|
||||
"""WebSocket endpoint for interactive container exec."""
|
||||
await websocket.accept()
|
||||
|
||||
try:
|
||||
await websocket.send_text(f"{DIM}[Connecting to {container} on {host}...]{RESET}{CRLF}")
|
||||
await _run_exec_session(websocket, container, host)
|
||||
await websocket.send_text(f"{CRLF}{DIM}[Disconnected]{RESET}{CRLF}")
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
with contextlib.suppress(Exception):
|
||||
await websocket.send_text(f"{RED}Error: {e}{RESET}{CRLF}")
|
||||
finally:
|
||||
with contextlib.suppress(Exception):
|
||||
await websocket.close()
|
||||
|
||||
|
||||
@router.websocket("/ws/terminal/{task_id}")
|
||||
async def terminal_websocket(websocket: WebSocket, task_id: str) -> None:
|
||||
"""WebSocket endpoint for terminal streaming."""
|
||||
await websocket.accept()
|
||||
|
||||
if task_id not in tasks:
|
||||
await websocket.send_text(f"{RED}Error: Task not found{RESET}{CRLF}")
|
||||
await websocket.close(code=4004)
|
||||
return
|
||||
|
||||
task = tasks[task_id]
|
||||
sent_count = 0
|
||||
|
||||
try:
|
||||
while True:
|
||||
# Send any new output
|
||||
while sent_count < len(task["output"]):
|
||||
await websocket.send_text(task["output"][sent_count])
|
||||
sent_count += 1
|
||||
|
||||
if task["status"] in ("completed", "failed"):
|
||||
status = "[Done]" if task["status"] == "completed" else "[Failed]"
|
||||
color = GREEN if task["status"] == "completed" else RED
|
||||
await websocket.send_text(f"{CRLF}{color}{status}{RESET}{CRLF}")
|
||||
await websocket.close()
|
||||
break
|
||||
|
||||
await asyncio.sleep(0.05)
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
finally:
|
||||
tasks.pop(task_id, None)
|
||||
426
tests/test_cli_lifecycle.py
Normal file
426
tests/test_cli_lifecycle.py
Normal file
@@ -0,0 +1,426 @@
|
||||
"""Tests for CLI lifecycle commands (apply, down --orphaned)."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.lifecycle import apply, down
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
|
||||
|
||||
def _make_config(tmp_path: Path, services: dict[str, str] | None = None) -> Config:
|
||||
"""Create a minimal config for testing."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
compose_dir.mkdir()
|
||||
|
||||
svc_dict = services or {"svc1": "host1", "svc2": "host2"}
|
||||
for svc in svc_dict:
|
||||
svc_dir = compose_dir / svc
|
||||
svc_dir.mkdir()
|
||||
(svc_dir / "docker-compose.yml").write_text("services: {}\n")
|
||||
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={"host1": Host(address="localhost"), "host2": Host(address="localhost")},
|
||||
services=svc_dict,
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
|
||||
def _make_result(service: str, success: bool = True) -> CommandResult:
|
||||
"""Create a command result."""
|
||||
return CommandResult(
|
||||
service=service,
|
||||
exit_code=0 if success else 1,
|
||||
success=success,
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
|
||||
class TestApplyCommand:
|
||||
"""Tests for the apply command."""
|
||||
|
||||
def test_apply_nothing_to_do(self, tmp_path: Path, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""When no migrations, orphans, or missing services, prints success message."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Nothing to apply" in captured.out
|
||||
|
||||
def test_apply_dry_run_shows_preview(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""Dry run shows what would be done without executing."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to migrate" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "Orphaned services to stop" in captured.out
|
||||
assert "old-svc" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
# Should not have called the actual operations
|
||||
mock_stop.assert_not_called()
|
||||
mock_up.assert_not_called()
|
||||
|
||||
def test_apply_executes_migrations(self, tmp_path: Path) -> None:
|
||||
"""Apply runs migrations when services need migration."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
|
||||
mock_up.assert_called_once()
|
||||
call_args = mock_up.call_args
|
||||
assert call_args[0][1] == ["svc1"] # services list
|
||||
|
||||
def test_apply_executes_orphan_cleanup(self, tmp_path: Path) -> None:
|
||||
"""Apply stops orphaned services."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("old-svc@host1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
|
||||
mock_stop.assert_called_once_with(cfg)
|
||||
|
||||
def test_apply_no_orphans_skips_orphan_cleanup(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--no-orphans flag skips orphan cleanup."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=True, full=False, config=None)
|
||||
|
||||
# Should run migrations but not orphan cleanup
|
||||
mock_up.assert_called_once()
|
||||
mock_stop.assert_not_called()
|
||||
|
||||
# Orphans should not appear in output
|
||||
captured = capsys.readouterr()
|
||||
assert "old-svc" not in captured.out
|
||||
|
||||
def test_apply_no_orphans_nothing_to_do(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--no-orphans with only orphans means nothing to do."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=True, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Nothing to apply" in captured.out
|
||||
|
||||
def test_apply_starts_missing_services(self, tmp_path: Path) -> None:
|
||||
"""Apply starts services that are in config but not in state."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
|
||||
mock_up.assert_called_once()
|
||||
call_args = mock_up.call_args
|
||||
assert call_args[0][1] == ["svc1"]
|
||||
|
||||
def test_apply_dry_run_shows_missing_services(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""Dry run shows services that would be started."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to start" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
def test_apply_full_refreshes_all_services(self, tmp_path: Path) -> None:
|
||||
"""--full runs up on all services to pick up config changes."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1"), _make_result("svc2")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=True, config=None)
|
||||
|
||||
mock_up.assert_called_once()
|
||||
call_args = mock_up.call_args
|
||||
# Should refresh all services in config
|
||||
assert set(call_args[0][1]) == {"svc1", "svc2"}
|
||||
|
||||
def test_apply_full_dry_run_shows_refresh(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--full --dry-run shows services that would be refreshed."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=True, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to refresh" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "svc2" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
def test_apply_full_excludes_already_handled_services(self, tmp_path: Path) -> None:
|
||||
"""--full doesn't double-process services that are migrating or starting."""
|
||||
cfg = _make_config(tmp_path, {"svc1": "host1", "svc2": "host2", "svc3": "host1"})
|
||||
mock_results = [_make_result("svc1"), _make_result("svc3")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
return_value=["svc2"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host2"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=True, config=None)
|
||||
|
||||
# up_services should be called 3 times: migrate, start, refresh
|
||||
assert mock_up.call_count == 3
|
||||
# Get the third call (refresh) and check it only has svc3
|
||||
refresh_call = mock_up.call_args_list[2]
|
||||
assert refresh_call[0][1] == ["svc3"]
|
||||
|
||||
|
||||
class TestDownOrphaned:
|
||||
"""Tests for down --orphaned flag."""
|
||||
|
||||
def test_down_orphaned_no_orphans(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""When no orphans exist, prints success message."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
):
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "No orphaned services to stop" in captured.out
|
||||
|
||||
def test_down_orphaned_stops_services(self, tmp_path: Path) -> None:
|
||||
"""--orphaned stops orphaned services."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("old-svc@host1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_stop.assert_called_once_with(cfg)
|
||||
|
||||
def test_down_orphaned_with_services_errors(self) -> None:
|
||||
"""--orphaned cannot be combined with service arguments."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
assert exc_info.value.exit_code == 1
|
||||
|
||||
def test_down_orphaned_with_all_errors(self) -> None:
|
||||
"""--orphaned cannot be combined with --all."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=None,
|
||||
all_services=True,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
assert exc_info.value.exit_code == 1
|
||||
|
||||
def test_down_orphaned_with_host_errors(self) -> None:
|
||||
"""--orphaned cannot be combined with --host."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
orphaned=True,
|
||||
host="host1",
|
||||
config=None,
|
||||
)
|
||||
|
||||
assert exc_info.value.exit_code == 1
|
||||
207
tests/test_cli_logs.py
Normal file
207
tests/test_cli_logs.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Tests for CLI logs command."""
|
||||
|
||||
from collections.abc import Coroutine
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.monitoring import logs
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
|
||||
|
||||
def _make_config(tmp_path: Path) -> Config:
|
||||
"""Create a minimal config for testing."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
compose_dir.mkdir()
|
||||
for svc in ("svc1", "svc2", "svc3"):
|
||||
svc_dir = compose_dir / svc
|
||||
svc_dir.mkdir()
|
||||
(svc_dir / "docker-compose.yml").write_text("services: {}\n")
|
||||
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={"local": Host(address="localhost"), "remote": Host(address="192.168.1.10")},
|
||||
services={"svc1": "local", "svc2": "local", "svc3": "remote"},
|
||||
)
|
||||
|
||||
|
||||
def _make_result(service: str) -> CommandResult:
|
||||
"""Create a successful command result."""
|
||||
return CommandResult(service=service, exit_code=0, success=True, stdout="", stderr="")
|
||||
|
||||
|
||||
def _mock_run_async_factory(
|
||||
services: list[str],
|
||||
) -> tuple[Any, list[CommandResult]]:
|
||||
"""Create a mock run_async that returns results for given services."""
|
||||
results = [_make_result(s) for s in services]
|
||||
|
||||
def mock_run_async(_coro: Coroutine[Any, Any, Any]) -> list[CommandResult]:
|
||||
return results
|
||||
|
||||
return mock_run_async, results
|
||||
|
||||
|
||||
class TestLogsContextualDefault:
|
||||
"""Tests for logs --tail contextual default behavior."""
|
||||
|
||||
def test_logs_all_services_defaults_to_20(self, tmp_path: Path) -> None:
|
||||
"""When --all is specified, default tail should be 20."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
mock_run.return_value = None
|
||||
|
||||
logs(services=None, all_services=True, host=None, follow=False, tail=None, config=None)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 20"
|
||||
|
||||
def test_logs_single_service_defaults_to_100(self, tmp_path: Path) -> None:
|
||||
"""When specific services are specified, default tail should be 100."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
host=None,
|
||||
follow=False,
|
||||
tail=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 100"
|
||||
|
||||
def test_logs_explicit_tail_overrides_default(self, tmp_path: Path) -> None:
|
||||
"""When --tail is explicitly provided, it should override the default."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=True,
|
||||
host=None,
|
||||
follow=False,
|
||||
tail=50,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 50"
|
||||
|
||||
def test_logs_follow_appends_flag(self, tmp_path: Path) -> None:
|
||||
"""When --follow is specified, -f should be appended to command."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
host=None,
|
||||
follow=True,
|
||||
tail=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 100 -f"
|
||||
|
||||
|
||||
class TestLogsHostFilter:
|
||||
"""Tests for logs --host filter behavior."""
|
||||
|
||||
def test_logs_host_filter_selects_services_on_host(self, tmp_path: Path) -> None:
|
||||
"""When --host is specified, only services on that host are included."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=False,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
# svc1 and svc2 are on "local", svc3 is on "remote"
|
||||
assert set(call_args[0][1]) == {"svc1", "svc2"}
|
||||
|
||||
def test_logs_host_filter_defaults_to_20_lines(self, tmp_path: Path) -> None:
|
||||
"""When --host is specified, default tail should be 20 (multiple services)."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=False,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 20"
|
||||
|
||||
def test_logs_all_and_host_mutually_exclusive(self) -> None:
|
||||
"""Using --all and --host together should error."""
|
||||
# No config mock needed - error is raised before config is loaded
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
logs(
|
||||
services=None,
|
||||
all_services=True,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
assert exc_info.value.exit_code == 1
|
||||
58
tests/test_cli_startup.py
Normal file
58
tests/test_cli_startup.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""Test CLI startup performance."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
# Thresholds in seconds, per OS
|
||||
if sys.platform == "win32":
|
||||
CLI_STARTUP_THRESHOLD = 2.0
|
||||
elif sys.platform == "darwin":
|
||||
CLI_STARTUP_THRESHOLD = 0.35
|
||||
else: # Linux
|
||||
CLI_STARTUP_THRESHOLD = 0.25
|
||||
|
||||
|
||||
def test_cli_startup_time() -> None:
|
||||
"""Verify CLI startup time stays within acceptable bounds.
|
||||
|
||||
This test ensures we don't accidentally introduce slow imports
|
||||
that degrade the user experience.
|
||||
"""
|
||||
cf_path = shutil.which("cf")
|
||||
assert cf_path is not None, "cf command not found in PATH"
|
||||
|
||||
# Run up to 6 times, return early if we hit the threshold
|
||||
times: list[float] = []
|
||||
for _ in range(6):
|
||||
start = time.perf_counter()
|
||||
result = subprocess.run(
|
||||
[cf_path, "--help"],
|
||||
check=False,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
elapsed = time.perf_counter() - start
|
||||
times.append(elapsed)
|
||||
|
||||
# Verify the command succeeded
|
||||
assert result.returncode == 0, f"CLI failed: {result.stderr}"
|
||||
|
||||
# Pass early if under threshold
|
||||
if elapsed < CLI_STARTUP_THRESHOLD:
|
||||
print(f"\nCLI startup: {elapsed:.3f}s (threshold: {CLI_STARTUP_THRESHOLD}s)")
|
||||
return
|
||||
|
||||
# All attempts exceeded threshold
|
||||
best_time = min(times)
|
||||
msg = (
|
||||
f"\nCLI startup times: {[f'{t:.3f}s' for t in times]}\n"
|
||||
f"Best: {best_time:.3f}s, Threshold: {CLI_STARTUP_THRESHOLD}s"
|
||||
)
|
||||
print(msg)
|
||||
|
||||
err_msg = f"CLI startup too slow!\n{msg}\nCheck for slow imports."
|
||||
raise AssertionError(err_msg)
|
||||
@@ -128,6 +128,8 @@ class TestLoadConfig:
|
||||
|
||||
def test_load_config_not_found(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "empty_config"))
|
||||
with pytest.raises(FileNotFoundError, match="Config file not found"):
|
||||
load_config()
|
||||
|
||||
|
||||
230
tests/test_config_cmd.py
Normal file
230
tests/test_config_cmd.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""Tests for config command module."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from typer.testing import CliRunner
|
||||
|
||||
from compose_farm.cli import app
|
||||
from compose_farm.cli.config import (
|
||||
_generate_template,
|
||||
_get_config_file,
|
||||
_get_editor,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner() -> CliRunner:
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_config_data() -> dict[str, Any]:
|
||||
return {
|
||||
"compose_dir": "/opt/compose",
|
||||
"hosts": {"server1": "192.168.1.10"},
|
||||
"services": {"nginx": "server1"},
|
||||
}
|
||||
|
||||
|
||||
class TestGetEditor:
|
||||
"""Tests for _get_editor function."""
|
||||
|
||||
def test_uses_editor_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setenv("EDITOR", "code")
|
||||
monkeypatch.delenv("VISUAL", raising=False)
|
||||
assert _get_editor() == "code"
|
||||
|
||||
def test_uses_visual_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.delenv("EDITOR", raising=False)
|
||||
monkeypatch.setenv("VISUAL", "subl")
|
||||
assert _get_editor() == "subl"
|
||||
|
||||
def test_editor_takes_precedence(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setenv("EDITOR", "vim")
|
||||
monkeypatch.setenv("VISUAL", "code")
|
||||
assert _get_editor() == "vim"
|
||||
|
||||
|
||||
class TestGetConfigFile:
|
||||
"""Tests for _get_config_file function."""
|
||||
|
||||
def test_explicit_path(self, tmp_path: Path) -> None:
|
||||
config_file = tmp_path / "my-config.yaml"
|
||||
config_file.touch()
|
||||
result = _get_config_file(config_file)
|
||||
assert result == config_file.resolve()
|
||||
|
||||
def test_cf_config_env(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
config_file = tmp_path / "env-config.yaml"
|
||||
config_file.touch()
|
||||
monkeypatch.setenv("CF_CONFIG", str(config_file))
|
||||
result = _get_config_file(None)
|
||||
assert result == config_file.resolve()
|
||||
|
||||
def test_returns_none_when_not_found(
|
||||
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
# Set XDG_CONFIG_HOME to a nonexistent path - config_search_paths() will
|
||||
# now return paths that don't exist
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "nonexistent"))
|
||||
result = _get_config_file(None)
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestGenerateTemplate:
|
||||
"""Tests for _generate_template function."""
|
||||
|
||||
def test_generates_valid_yaml(self) -> None:
|
||||
template = _generate_template()
|
||||
# Should be valid YAML
|
||||
data = yaml.safe_load(template)
|
||||
assert "compose_dir" in data
|
||||
assert "hosts" in data
|
||||
assert "services" in data
|
||||
|
||||
def test_has_documentation_comments(self) -> None:
|
||||
template = _generate_template()
|
||||
assert "# Compose Farm configuration" in template
|
||||
assert "hosts:" in template
|
||||
assert "services:" in template
|
||||
|
||||
|
||||
class TestConfigInit:
|
||||
"""Tests for cf config init command."""
|
||||
|
||||
def test_init_creates_file(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "new-config.yaml"
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file)])
|
||||
assert result.exit_code == 0
|
||||
assert config_file.exists()
|
||||
assert "Config file created" in result.stdout
|
||||
|
||||
def test_init_force_overwrites(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "existing.yaml"
|
||||
config_file.write_text("old content")
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file), "-f"])
|
||||
assert result.exit_code == 0
|
||||
content = config_file.read_text()
|
||||
assert "old content" not in content
|
||||
assert "compose_dir" in content
|
||||
|
||||
def test_init_prompts_on_existing(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "existing.yaml"
|
||||
config_file.write_text("old content")
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file)], input="n\n")
|
||||
assert result.exit_code == 0
|
||||
assert "Aborted" in result.stdout
|
||||
assert config_file.read_text() == "old content"
|
||||
|
||||
|
||||
class TestConfigPath:
|
||||
"""Tests for cf config path command."""
|
||||
|
||||
def test_path_shows_config(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "path"])
|
||||
assert result.exit_code == 0
|
||||
assert str(config_file) in result.stdout
|
||||
|
||||
def test_path_with_explicit_path(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
# When explicitly provided, path is returned even if file doesn't exist
|
||||
nonexistent = tmp_path / "nonexistent.yaml"
|
||||
result = runner.invoke(app, ["config", "path", "-p", str(nonexistent)])
|
||||
assert result.exit_code == 0
|
||||
assert str(nonexistent) in result.stdout
|
||||
|
||||
|
||||
class TestConfigShow:
|
||||
"""Tests for cf config show command."""
|
||||
|
||||
def test_show_displays_content(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "show"])
|
||||
assert result.exit_code == 0
|
||||
assert "Config file:" in result.stdout
|
||||
|
||||
def test_show_raw_output(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
content = yaml.dump(valid_config_data)
|
||||
config_file.write_text(content)
|
||||
result = runner.invoke(app, ["config", "show", "-r"])
|
||||
assert result.exit_code == 0
|
||||
assert content in result.stdout
|
||||
|
||||
|
||||
class TestConfigValidate:
|
||||
"""Tests for cf config validate command."""
|
||||
|
||||
def test_validate_valid_config(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "validate"])
|
||||
assert result.exit_code == 0
|
||||
assert "Valid config" in result.stdout
|
||||
assert "Hosts: 1" in result.stdout
|
||||
assert "Services: 1" in result.stdout
|
||||
|
||||
def test_validate_invalid_config(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
config_file = tmp_path / "invalid.yaml"
|
||||
config_file.write_text("invalid: [yaml: content")
|
||||
result = runner.invoke(app, ["config", "validate", "-p", str(config_file)])
|
||||
assert result.exit_code == 1
|
||||
# Error goes to stderr (captured in output when using CliRunner)
|
||||
output = result.stdout + (result.stderr or "")
|
||||
assert "Invalid config" in output or "✗" in output
|
||||
|
||||
def test_validate_missing_config(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
nonexistent = tmp_path / "nonexistent.yaml"
|
||||
result = runner.invoke(app, ["config", "validate", "-p", str(nonexistent)])
|
||||
assert result.exit_code == 1
|
||||
# Error goes to stderr
|
||||
output = result.stdout + (result.stderr or "")
|
||||
assert "Config file not found" in output or "not found" in output.lower()
|
||||
@@ -8,10 +8,10 @@ import pytest
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import (
|
||||
CommandResult,
|
||||
_is_local,
|
||||
_run_local_command,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
is_local,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_on_services,
|
||||
@@ -22,7 +22,7 @@ linux_only = pytest.mark.skipif(sys.platform != "linux", reason="Linux-only shel
|
||||
|
||||
|
||||
class TestIsLocal:
|
||||
"""Tests for _is_local function."""
|
||||
"""Tests for is_local function."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"address",
|
||||
@@ -30,7 +30,7 @@ class TestIsLocal:
|
||||
)
|
||||
def test_local_addresses(self, address: str) -> None:
|
||||
host = Host(address=address)
|
||||
assert _is_local(host) is True
|
||||
assert is_local(host) is True
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"address",
|
||||
@@ -38,7 +38,7 @@ class TestIsLocal:
|
||||
)
|
||||
def test_remote_addresses(self, address: str) -> None:
|
||||
host = Host(address=address)
|
||||
assert _is_local(host) is False
|
||||
assert is_local(host) is False
|
||||
|
||||
|
||||
class TestRunLocalCommand:
|
||||
|
||||
@@ -9,7 +9,14 @@ import pytest
|
||||
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
from compose_farm.logs import _parse_images_output, snapshot_services
|
||||
from compose_farm.logs import (
|
||||
_parse_images_output,
|
||||
collect_service_entries,
|
||||
isoformat,
|
||||
load_existing_entries,
|
||||
merge_entries,
|
||||
write_toml,
|
||||
)
|
||||
|
||||
|
||||
def test_parse_images_output_handles_list_and_lines() -> None:
|
||||
@@ -55,26 +62,29 @@ async def test_snapshot_preserves_first_seen(tmp_path: Path) -> None:
|
||||
|
||||
log_path = tmp_path / "dockerfarm-log.toml"
|
||||
|
||||
# First snapshot
|
||||
first_time = datetime(2025, 1, 1, tzinfo=UTC)
|
||||
await snapshot_services(
|
||||
config,
|
||||
["svc"],
|
||||
log_path=log_path,
|
||||
now=first_time,
|
||||
run_compose_fn=fake_run_compose,
|
||||
first_entries = await collect_service_entries(
|
||||
config, "svc", now=first_time, run_compose_fn=fake_run_compose
|
||||
)
|
||||
first_iso = isoformat(first_time)
|
||||
merged = merge_entries([], first_entries, now_iso=first_iso)
|
||||
meta = {"generated_at": first_iso, "compose_dir": str(config.compose_dir)}
|
||||
write_toml(log_path, meta=meta, entries=merged)
|
||||
|
||||
after_first = tomllib.loads(log_path.read_text())
|
||||
first_seen = after_first["entries"][0]["first_seen"]
|
||||
|
||||
# Second snapshot
|
||||
second_time = datetime(2025, 2, 1, tzinfo=UTC)
|
||||
await snapshot_services(
|
||||
config,
|
||||
["svc"],
|
||||
log_path=log_path,
|
||||
now=second_time,
|
||||
run_compose_fn=fake_run_compose,
|
||||
second_entries = await collect_service_entries(
|
||||
config, "svc", now=second_time, run_compose_fn=fake_run_compose
|
||||
)
|
||||
second_iso = isoformat(second_time)
|
||||
existing = load_existing_entries(log_path)
|
||||
merged = merge_entries(existing, second_entries, now_iso=second_iso)
|
||||
meta = {"generated_at": second_iso, "compose_dir": str(config.compose_dir)}
|
||||
write_toml(log_path, meta=meta, entries=merged)
|
||||
|
||||
after_second = tomllib.loads(log_path.read_text())
|
||||
entry = after_second["entries"][0]
|
||||
|
||||
111
tests/test_operations.py
Normal file
111
tests/test_operations.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Tests for operations module."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from compose_farm.cli import lifecycle
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
from compose_farm.operations import _migrate_service
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def basic_config(tmp_path: Path) -> Config:
|
||||
"""Create a basic test config."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
service_dir = compose_dir / "test-service"
|
||||
service_dir.mkdir(parents=True)
|
||||
(service_dir / "docker-compose.yml").write_text("services: {}")
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={
|
||||
"host1": Host(address="localhost"),
|
||||
"host2": Host(address="localhost"),
|
||||
},
|
||||
services={"test-service": "host2"},
|
||||
)
|
||||
|
||||
|
||||
class TestMigrationCommands:
|
||||
"""Tests for migration command sequence."""
|
||||
|
||||
@pytest.fixture
|
||||
def config(self, tmp_path: Path) -> Config:
|
||||
"""Create a test config."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
service_dir = compose_dir / "test-service"
|
||||
service_dir.mkdir(parents=True)
|
||||
(service_dir / "docker-compose.yml").write_text("services: {}")
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={
|
||||
"host1": Host(address="localhost"),
|
||||
"host2": Host(address="localhost"),
|
||||
},
|
||||
services={"test-service": "host2"},
|
||||
)
|
||||
|
||||
async def test_migration_uses_pull_ignore_buildable(self, config: Config) -> None:
|
||||
"""Migration should use 'pull --ignore-buildable' to skip buildable images."""
|
||||
commands_called: list[str] = []
|
||||
|
||||
async def mock_run_compose_step(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
command: str,
|
||||
*,
|
||||
raw: bool,
|
||||
host: str | None = None,
|
||||
) -> CommandResult:
|
||||
commands_called.append(command)
|
||||
return CommandResult(
|
||||
service=service,
|
||||
exit_code=0,
|
||||
success=True,
|
||||
)
|
||||
|
||||
with patch(
|
||||
"compose_farm.operations._run_compose_step",
|
||||
side_effect=mock_run_compose_step,
|
||||
):
|
||||
await _migrate_service(
|
||||
config,
|
||||
"test-service",
|
||||
current_host="host1",
|
||||
target_host="host2",
|
||||
prefix="[test]",
|
||||
raw=False,
|
||||
)
|
||||
|
||||
# Migration should call pull with --ignore-buildable, then build, then down
|
||||
assert "pull --ignore-buildable" in commands_called
|
||||
assert "build" in commands_called
|
||||
assert "down" in commands_called
|
||||
# pull should come before build
|
||||
pull_idx = commands_called.index("pull --ignore-buildable")
|
||||
build_idx = commands_called.index("build")
|
||||
assert pull_idx < build_idx
|
||||
|
||||
|
||||
class TestUpdateCommandSequence:
|
||||
"""Tests for update command sequence."""
|
||||
|
||||
def test_update_command_sequence_includes_build(self) -> None:
|
||||
"""Update command should use pull --ignore-buildable and build."""
|
||||
# This is a static check of the command sequence in lifecycle.py
|
||||
# The actual command sequence is defined in the update function
|
||||
|
||||
source = inspect.getsource(lifecycle.update)
|
||||
|
||||
# Verify the command sequence includes pull --ignore-buildable
|
||||
assert "pull --ignore-buildable" in source
|
||||
# Verify build is included
|
||||
assert '"build"' in source or "'build'" in source
|
||||
# Verify the sequence is pull, build, down, up
|
||||
assert "down" in source
|
||||
assert "up -d" in source
|
||||
@@ -1,15 +1,13 @@
|
||||
"""Tests for sync command and related functions."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from compose_farm import cli as cli_module
|
||||
from compose_farm import executor as executor_module
|
||||
from compose_farm import operations as operations_module
|
||||
from compose_farm import state as state_module
|
||||
from compose_farm.cli import management as cli_management_module
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult, check_service_running
|
||||
|
||||
@@ -95,48 +93,12 @@ class TestCheckServiceRunning:
|
||||
assert result is False
|
||||
|
||||
|
||||
class TestDiscoverRunningServices:
|
||||
"""Tests for discover_running_services function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_discovers_on_assigned_host(self, mock_config: Config) -> None:
|
||||
"""Discovers service running on its assigned host."""
|
||||
with patch.object(
|
||||
operations_module, "check_service_running", new_callable=AsyncMock
|
||||
) as mock_check:
|
||||
# plex running on nas01, jellyfin not running, sonarr on nas02
|
||||
async def check_side_effect(_cfg: Any, service: str, host: str) -> bool:
|
||||
return (service == "plex" and host == "nas01") or (
|
||||
service == "sonarr" and host == "nas02"
|
||||
)
|
||||
|
||||
mock_check.side_effect = check_side_effect
|
||||
|
||||
result = await operations_module.discover_running_services(mock_config)
|
||||
assert result == {"plex": "nas01", "sonarr": "nas02"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_discovers_on_different_host(self, mock_config: Config) -> None:
|
||||
"""Discovers service running on non-assigned host (after migration)."""
|
||||
with patch.object(
|
||||
operations_module, "check_service_running", new_callable=AsyncMock
|
||||
) as mock_check:
|
||||
# plex migrated to nas02
|
||||
async def check_side_effect(_cfg: Any, service: str, host: str) -> bool:
|
||||
return service == "plex" and host == "nas02"
|
||||
|
||||
mock_check.side_effect = check_side_effect
|
||||
|
||||
result = await operations_module.discover_running_services(mock_config)
|
||||
assert result == {"plex": "nas02"}
|
||||
|
||||
|
||||
class TestReportSyncChanges:
|
||||
"""Tests for _report_sync_changes function."""
|
||||
|
||||
def test_reports_added(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports newly discovered services."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=["plex", "jellyfin"],
|
||||
removed=[],
|
||||
changed=[],
|
||||
@@ -150,7 +112,7 @@ class TestReportSyncChanges:
|
||||
|
||||
def test_reports_removed(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports services that are no longer running."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=[],
|
||||
removed=["sonarr"],
|
||||
changed=[],
|
||||
@@ -163,7 +125,7 @@ class TestReportSyncChanges:
|
||||
|
||||
def test_reports_changed(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports services that moved to a different host."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=[],
|
||||
removed=[],
|
||||
changed=[("plex", "nas01", "nas02")],
|
||||
@@ -6,7 +6,9 @@ import pytest
|
||||
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
get_services_not_in_state,
|
||||
load_state,
|
||||
remove_service,
|
||||
save_state,
|
||||
@@ -130,3 +132,110 @@ class TestRemoveService:
|
||||
|
||||
result = load_state(config)
|
||||
assert result["plex"] == "nas01"
|
||||
|
||||
|
||||
class TestGetOrphanedServices:
|
||||
"""Tests for get_orphaned_services function."""
|
||||
|
||||
def test_no_orphans(self, config: Config) -> None:
|
||||
"""Returns empty dict when all services in state are in config."""
|
||||
state_file = config.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n")
|
||||
|
||||
result = get_orphaned_services(config)
|
||||
assert result == {}
|
||||
|
||||
def test_finds_orphaned_service(self, config: Config) -> None:
|
||||
"""Returns services in state but not in config."""
|
||||
state_file = config.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n")
|
||||
|
||||
result = get_orphaned_services(config)
|
||||
# plex is in config, jellyfin is not
|
||||
assert result == {"jellyfin": "nas02"}
|
||||
|
||||
def test_finds_orphaned_multi_host_service(self, config: Config) -> None:
|
||||
"""Returns multi-host orphaned services with host list."""
|
||||
state_file = config.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n dozzle:\n - nas01\n - nas02\n")
|
||||
|
||||
result = get_orphaned_services(config)
|
||||
assert result == {"dozzle": ["nas01", "nas02"]}
|
||||
|
||||
def test_empty_state(self, config: Config) -> None:
|
||||
"""Returns empty dict when state is empty."""
|
||||
result = get_orphaned_services(config)
|
||||
assert result == {}
|
||||
|
||||
def test_all_orphaned(self, tmp_path: Path) -> None:
|
||||
"""Returns all services when none are in config."""
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path / "compose",
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={}, # No services in config
|
||||
config_path=config_path,
|
||||
)
|
||||
state_file = cfg.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n")
|
||||
|
||||
result = get_orphaned_services(cfg)
|
||||
assert result == {"plex": "nas01", "jellyfin": "nas02"}
|
||||
|
||||
|
||||
class TestGetServicesNotInState:
|
||||
"""Tests for get_services_not_in_state function."""
|
||||
|
||||
def test_all_in_state(self, config: Config) -> None:
|
||||
"""Returns empty list when all services are in state."""
|
||||
state_file = config.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n")
|
||||
|
||||
result = get_services_not_in_state(config)
|
||||
assert result == []
|
||||
|
||||
def test_finds_missing_service(self, tmp_path: Path) -> None:
|
||||
"""Returns services in config but not in state."""
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path / "compose",
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01", "jellyfin": "nas01"},
|
||||
config_path=config_path,
|
||||
)
|
||||
state_file = cfg.get_state_path()
|
||||
state_file.write_text("deployed:\n plex: nas01\n")
|
||||
|
||||
result = get_services_not_in_state(cfg)
|
||||
assert result == ["jellyfin"]
|
||||
|
||||
def test_empty_state(self, tmp_path: Path) -> None:
|
||||
"""Returns all services when state is empty."""
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path / "compose",
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01", "jellyfin": "nas01"},
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
result = get_services_not_in_state(cfg)
|
||||
assert set(result) == {"plex", "jellyfin"}
|
||||
|
||||
def test_empty_config(self, config: Config) -> None:
|
||||
"""Returns empty list when config has no services."""
|
||||
# config fixture has plex: nas01, but we need empty config
|
||||
config_path = config.config_path
|
||||
config_path.write_text("")
|
||||
cfg = Config(
|
||||
compose_dir=config.compose_dir,
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={},
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
result = get_services_not_in_state(cfg)
|
||||
assert result == []
|
||||
|
||||
1
tests/web/__init__.py
Normal file
1
tests/web/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Web UI tests."""
|
||||
87
tests/web/conftest.py
Normal file
87
tests/web/conftest.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""Fixtures for web UI tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def compose_dir(tmp_path: Path) -> Path:
|
||||
"""Create a temporary compose directory with sample services."""
|
||||
compose_path = tmp_path / "compose"
|
||||
compose_path.mkdir()
|
||||
|
||||
# Create a sample service
|
||||
plex_dir = compose_path / "plex"
|
||||
plex_dir.mkdir()
|
||||
(plex_dir / "compose.yaml").write_text("""
|
||||
services:
|
||||
plex:
|
||||
image: plexinc/pms-docker
|
||||
container_name: plex
|
||||
ports:
|
||||
- "32400:32400"
|
||||
""")
|
||||
(plex_dir / ".env").write_text("PLEX_CLAIM=claim-xxx\n")
|
||||
|
||||
# Create another service
|
||||
sonarr_dir = compose_path / "sonarr"
|
||||
sonarr_dir.mkdir()
|
||||
(sonarr_dir / "compose.yaml").write_text("""
|
||||
services:
|
||||
sonarr:
|
||||
image: linuxserver/sonarr
|
||||
""")
|
||||
|
||||
return compose_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def config_file(tmp_path: Path, compose_dir: Path) -> Path:
|
||||
"""Create a temporary config file and state file."""
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text(f"""
|
||||
compose_dir: {compose_dir}
|
||||
|
||||
hosts:
|
||||
server-1:
|
||||
address: 192.168.1.10
|
||||
user: docker
|
||||
server-2:
|
||||
address: 192.168.1.11
|
||||
|
||||
services:
|
||||
plex: server-1
|
||||
sonarr: server-2
|
||||
""")
|
||||
|
||||
# State file must be alongside config file
|
||||
state_path = tmp_path / "compose-farm-state.yaml"
|
||||
state_path.write_text("""
|
||||
deployed:
|
||||
plex: server-1
|
||||
""")
|
||||
|
||||
return config_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config(config_file: Path, monkeypatch: pytest.MonkeyPatch) -> Config:
|
||||
"""Patch get_config to return a test config."""
|
||||
from compose_farm.config import load_config
|
||||
from compose_farm.web import deps as web_deps
|
||||
from compose_farm.web.routes import api as web_api
|
||||
|
||||
config = load_config(config_file)
|
||||
|
||||
# Patch in all modules that import get_config
|
||||
monkeypatch.setattr(web_deps, "get_config", lambda: config)
|
||||
monkeypatch.setattr(web_api, "get_config", lambda: config)
|
||||
|
||||
return config
|
||||
106
tests/web/test_helpers.py
Normal file
106
tests/web/test_helpers.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""Tests for web API helper functions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
|
||||
class TestValidateYaml:
|
||||
"""Tests for _validate_yaml helper."""
|
||||
|
||||
def test_valid_yaml(self) -> None:
|
||||
from compose_farm.web.routes.api import _validate_yaml
|
||||
|
||||
# Should not raise
|
||||
_validate_yaml("key: value")
|
||||
_validate_yaml("list:\n - item1\n - item2")
|
||||
_validate_yaml("")
|
||||
|
||||
def test_invalid_yaml(self) -> None:
|
||||
from compose_farm.web.routes.api import _validate_yaml
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
_validate_yaml("key: [unclosed")
|
||||
|
||||
assert exc_info.value.status_code == 400
|
||||
assert "Invalid YAML" in exc_info.value.detail
|
||||
|
||||
|
||||
class TestGetServiceComposePath:
|
||||
"""Tests for _get_service_compose_path helper."""
|
||||
|
||||
def test_service_found(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _get_service_compose_path
|
||||
|
||||
path = _get_service_compose_path("plex")
|
||||
assert isinstance(path, Path)
|
||||
assert path.name == "compose.yaml"
|
||||
assert path.parent.name == "plex"
|
||||
|
||||
def test_service_not_found(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _get_service_compose_path
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
_get_service_compose_path("nonexistent")
|
||||
|
||||
assert exc_info.value.status_code == 404
|
||||
assert "not found" in exc_info.value.detail
|
||||
|
||||
|
||||
class TestRenderContainers:
|
||||
"""Tests for container template rendering."""
|
||||
|
||||
def test_render_running_container(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _render_containers
|
||||
|
||||
containers = [{"Name": "plex", "State": "running"}]
|
||||
html = _render_containers("plex", "server-1", containers)
|
||||
|
||||
assert "badge-success" in html
|
||||
assert "plex" in html
|
||||
assert "initExecTerminal" in html
|
||||
|
||||
def test_render_unknown_state(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _render_containers
|
||||
|
||||
containers = [{"Name": "plex", "State": "unknown"}]
|
||||
html = _render_containers("plex", "server-1", containers)
|
||||
|
||||
assert "loading-spinner" in html
|
||||
|
||||
def test_render_other_state(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _render_containers
|
||||
|
||||
containers = [{"Name": "plex", "State": "exited"}]
|
||||
html = _render_containers("plex", "server-1", containers)
|
||||
|
||||
assert "badge-warning" in html
|
||||
assert "exited" in html
|
||||
|
||||
def test_render_with_header(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _render_containers
|
||||
|
||||
containers = [{"Name": "plex", "State": "running"}]
|
||||
html = _render_containers("plex", "server-1", containers, show_header=True)
|
||||
|
||||
assert "server-1" in html
|
||||
assert "font-semibold" in html
|
||||
|
||||
def test_render_multiple_containers(self, mock_config: Config) -> None:
|
||||
from compose_farm.web.routes.api import _render_containers
|
||||
|
||||
containers = [
|
||||
{"Name": "app-web-1", "State": "running"},
|
||||
{"Name": "app-db-1", "State": "running"},
|
||||
]
|
||||
html = _render_containers("app", "server-1", containers)
|
||||
|
||||
assert "app-web-1" in html
|
||||
assert "app-db-1" in html
|
||||
Reference in New Issue
Block a user