mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf94a62f37 | ||
|
|
81b4074827 | ||
|
|
455657c8df | ||
|
|
ee5a92788a | ||
|
|
2ba396a419 | ||
|
|
7144d58160 | ||
|
|
279fa2e5ef | ||
|
|
dbe0b8b597 | ||
|
|
b7315d255a | ||
|
|
f003d2931f | ||
|
|
6f7c557065 | ||
|
|
ecb6ee46b1 | ||
|
|
354967010f | ||
|
|
57122f31a3 | ||
|
|
cbbcec0d14 | ||
|
|
de38c35b8a | ||
|
|
def996ddf4 | ||
|
|
790e32e96b | ||
|
|
fd75c4d87f | ||
|
|
411a99cbc4 | ||
|
|
d2c6ab72b2 | ||
|
|
3656584eda | ||
|
|
8be370098d | ||
|
|
45057cb6df | ||
|
|
3f24484d60 | ||
|
|
b6d50a22b4 | ||
|
|
8a658210e1 | ||
|
|
583aaaa080 | ||
|
|
22ca4f64e8 | ||
|
|
32e798fcaa | ||
|
|
ced81c8b50 | ||
|
|
7ec4b71101 | ||
|
|
94aa58d380 | ||
|
|
f8d88e6f97 | ||
|
|
a95f6309b0 | ||
|
|
502de018af | ||
|
|
a3e8daad33 | ||
|
|
78a2f65c94 | ||
|
|
1689a6833a | ||
|
|
6d2f32eadf | ||
|
|
c549dd50c9 | ||
|
|
82312e9421 | ||
|
|
e13b367188 | ||
|
|
d73049cc1b | ||
|
|
4373b23cd3 | ||
|
|
73eb6ccf41 | ||
|
|
6ca48d0d56 | ||
|
|
b82599005e | ||
|
|
b044053674 |
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -16,10 +16,10 @@ jobs:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
@@ -39,10 +39,10 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12
|
||||
|
||||
92
.github/workflows/docker.yml
vendored
Normal file
92
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Upload Python Package"]
|
||||
types: [completed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to build (leave empty for latest)'
|
||||
required: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run if PyPI upload succeeded (or manual dispatch)
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_run" ]; then
|
||||
# Get version from the tag that triggered the release
|
||||
VERSION="${{ github.event.workflow_run.head_branch }}"
|
||||
# Strip 'v' prefix if present
|
||||
VERSION="${VERSION#v}"
|
||||
elif [ -n "${{ github.event.inputs.version }}" ]; then
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
else
|
||||
VERSION=""
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Wait for PyPI
|
||||
if: steps.version.outputs.version != ''
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
echo "Waiting for compose-farm==$VERSION on PyPI..."
|
||||
for i in {1..30}; do
|
||||
if curl -sf "https://pypi.org/pypi/compose-farm/$VERSION/json" > /dev/null; then
|
||||
echo "✓ Version $VERSION available on PyPI"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $i: not yet available, waiting 10s..."
|
||||
sleep 10
|
||||
done
|
||||
echo "✗ Timeout waiting for PyPI"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}},value=v${{ steps.version.outputs.version }}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -13,9 +13,9 @@ jobs:
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
- name: Build
|
||||
run: uv build
|
||||
- name: Publish package distributions to PyPI
|
||||
|
||||
6
.github/workflows/update-readme.yml
vendored
6
.github/workflows/update-readme.yml
vendored
@@ -11,16 +11,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Run markdown-code-runner
|
||||
env:
|
||||
|
||||
25
CLAUDE.md
25
CLAUDE.md
@@ -10,14 +10,22 @@
|
||||
|
||||
```
|
||||
compose_farm/
|
||||
├── cli.py # Typer commands (thin layer, delegates to operations)
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which service on which host)
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
└── traefik.py # Traefik file-provider config generation from labels
|
||||
├── cli/ # CLI subpackage
|
||||
│ ├── __init__.py # Imports modules to trigger command registration
|
||||
│ ├── app.py # Shared Typer app instance, version callback
|
||||
│ ├── common.py # Shared helpers, options, progress bar utilities
|
||||
│ ├── config.py # Config subcommand (init, show, path, validate, edit)
|
||||
│ ├── lifecycle.py # up, down, pull, restart, update commands
|
||||
│ ├── management.py # sync, check, init-network, traefik-file commands
|
||||
│ └── monitoring.py # logs, ps, stats commands
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── console.py # Shared Rich console instances
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which service on which host)
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
└── traefik.py # Traefik file-provider config generation from labels
|
||||
```
|
||||
|
||||
## Key Design Decisions
|
||||
@@ -59,3 +67,4 @@ CLI available as `cf` or `compose-farm`.
|
||||
| `check` | Validate config, traefik labels, mounts, networks; show host compatibility |
|
||||
| `init-network` | Create Docker network on hosts with consistent subnet/gateway |
|
||||
| `traefik-file` | Generate Traefik file-provider config from compose labels |
|
||||
| `config` | Manage config files (init, show, path, validate, edit) |
|
||||
|
||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM ghcr.io/astral-sh/uv:python3.14-alpine
|
||||
|
||||
# Install SSH client (required for remote host connections)
|
||||
RUN apk add --no-cache openssh-client
|
||||
|
||||
# Install compose-farm from PyPI
|
||||
ARG VERSION
|
||||
RUN uv tool install compose-farm${VERSION:+==$VERSION}
|
||||
|
||||
# Add uv tool bin to PATH
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
|
||||
# Default entrypoint
|
||||
ENTRYPOINT ["cf"]
|
||||
CMD ["--help"]
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Bas Nijholt
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
35
PLAN.md
35
PLAN.md
@@ -1,35 +0,0 @@
|
||||
# Compose Farm – Traefik Multihost Ingress Plan
|
||||
|
||||
## Goal
|
||||
Generate a Traefik file-provider fragment from existing docker-compose Traefik labels (no config duplication) so a single front-door Traefik on 192.168.1.66 with wildcard `*.lab.mydomain.org` can route to services running on other hosts. Keep the current simplicity (SSH + docker compose); no Swarm/K8s.
|
||||
|
||||
## Requirements
|
||||
- Traefik stays on main host; keep current `dynamic.yml` and Docker provider for local containers.
|
||||
- Add a watched directory provider (any path works) and load a generated fragment (e.g., `compose-farm.generated.yml`).
|
||||
- No edits to compose files: reuse existing `traefik.*` labels as the single source of truth; Compose Farm only reads them.
|
||||
- Generator infers routing from labels and reachability from `ports:` mappings; prefer host-published ports so Traefik can reach services across hosts. Upstreams point to `<host address>:<published host port>`; warn if no published port is found.
|
||||
- Only minimal data in `compose-farm.yaml`: hosts map and service→host mapping (already present).
|
||||
- No new orchestration/discovery layers; respect KISS/YAGNI/DRY.
|
||||
|
||||
## Non-Goals
|
||||
- No Swarm/Kubernetes adoption.
|
||||
- No global Docker provider across hosts.
|
||||
- No health checks/service discovery layer.
|
||||
|
||||
## Current State (Dec 2025)
|
||||
- Compose Farm: Typer CLI wrapping `docker compose` over SSH; config in `compose-farm.yaml`; parallel by default; snapshot/log tooling present.
|
||||
- Traefik: single instance on 192.168.1.66, wildcard `*.lab.mydomain.org`, Docker provider for local services, file provider via `dynamic.yml` already in use.
|
||||
|
||||
## Proposed Implementation Steps
|
||||
1) Add generator command: `compose-farm traefik-file --output <path>`.
|
||||
2) Resolve per-service host from `compose-farm.yaml`; read compose file at `{compose_dir}/{service}/docker-compose.yml`.
|
||||
3) Parse `traefik.*` labels to build routers/services/middlewares as in compose; map container port to published host port (from `ports:`) to form upstream URLs with host address.
|
||||
4) Emit file-provider YAML to the watched directory (recommended default: `/mnt/data/traefik/dynamic.d/compose-farm.generated.yml`, but user chooses via `--output`).
|
||||
5) Warnings: if no published port is found, warn that cross-host reachability requires L3 reachability to container IPs.
|
||||
6) Tests: label parsing, port mapping, YAML render; scenario with published port; scenario without published port.
|
||||
7) Docs: update README/CLAUDE to describe directory provider flags and the generator workflow; note that compose files remain unchanged.
|
||||
|
||||
## Open Questions
|
||||
- How to derive target host address: use `hosts.<name>.address` verbatim, or allow override per service? (Default: use host address.)
|
||||
- Should we support multiple hosts/backends per service for LB/HA? (Start with single server.)
|
||||
- Where to store generated file by default? (Default to user-specified `--output`; maybe fallback to `./compose-farm-traefik.yml`.)
|
||||
158
README.md
158
README.md
@@ -1,5 +1,10 @@
|
||||
# Compose Farm
|
||||
|
||||
[](https://pypi.org/project/compose-farm/)
|
||||
[](https://pypi.org/project/compose-farm/)
|
||||
[](LICENSE)
|
||||
[](https://github.com/basnijholt/compose-farm/stargazers)
|
||||
|
||||
<img src="http://files.nijho.lt/compose-farm.png" align="right" style="width: 300px;" />
|
||||
|
||||
A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
@@ -20,21 +25,41 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [Installation](#installation)
|
||||
- [Configuration](#configuration)
|
||||
- [Multi-Host Services](#multi-host-services)
|
||||
- [Config Command](#config-command)
|
||||
- [Usage](#usage)
|
||||
- [Auto-Migration](#auto-migration)
|
||||
- [Traefik Multihost Ingress (File Provider)](#traefik-multihost-ingress-file-provider)
|
||||
- [Comparison with Alternatives](#comparison-with-alternatives)
|
||||
- [License](#license)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Why Compose Farm?
|
||||
|
||||
I run 100+ Docker Compose stacks on an LXC container that frequently runs out of memory. I needed a way to distribute services across multiple machines without the complexity of:
|
||||
I used to run 100+ Docker Compose stacks on a single machine that kept running out of memory. I needed a way to distribute services across multiple machines without the complexity of:
|
||||
|
||||
- **Kubernetes**: Overkill for my use case. I don't need pods, services, ingress controllers, or YAML manifests 10x the size of my compose files.
|
||||
- **Docker Swarm**: Effectively in maintenance mode—no longer being invested in by Docker.
|
||||
|
||||
**Compose Farm is intentionally simple**: one YAML config mapping services to hosts, and a CLI that runs `docker compose` commands over SSH. That's it.
|
||||
Both require changes to your compose files. **Compose Farm requires zero changes**—your existing `docker-compose.yml` files work as-is.
|
||||
|
||||
I also wanted a declarative setup—one config file that defines where everything runs. Change the config, run `up`, and services migrate automatically. See [Comparison with Alternatives](#comparison-with-alternatives) for how this compares to other approaches.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://xkcd.com/927/">
|
||||
<img src="https://imgs.xkcd.com/comics/standards.png" alt="xkcd: Standards" width="400" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
Before you say it—no, this is not a new standard. I changed nothing about my existing setup. When I added more hosts, I just mounted my drives at the same paths, and everything worked. You can do all of this manually today—SSH into a host and run `docker compose up`.
|
||||
|
||||
Compose Farm just automates what you'd do by hand:
|
||||
- Runs `docker compose` commands over SSH
|
||||
- Tracks which service runs on which host
|
||||
- Auto-migrates services when you change the host assignment
|
||||
- Generates Traefik file-provider config for cross-host routing
|
||||
|
||||
**It's a convenience wrapper, not a new paradigm.**
|
||||
|
||||
## How It Works
|
||||
|
||||
@@ -108,6 +133,23 @@ uv tool install compose-farm
|
||||
pip install compose-farm
|
||||
```
|
||||
|
||||
<details><summary>🐳 Docker</summary>
|
||||
|
||||
Using the provided `docker-compose.yml`:
|
||||
```bash
|
||||
docker compose run --rm cf up --all
|
||||
```
|
||||
|
||||
Or directly:
|
||||
```bash
|
||||
docker run --rm \
|
||||
-v $SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent \
|
||||
-v ./compose-farm.yaml:/root/.config/compose-farm/compose-farm.yaml:ro \
|
||||
ghcr.io/basnijholt/compose-farm up --all
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml` (or `./compose-farm.yaml` in your working directory):
|
||||
@@ -166,10 +208,43 @@ When you run `cf up autokuma`, it starts the service on all hosts in parallel. M
|
||||
- Show output with `[service@host]` prefix for each host
|
||||
- Track all running hosts in state
|
||||
|
||||
### Config Command
|
||||
|
||||
Compose Farm includes a `config` subcommand to help manage configuration files:
|
||||
|
||||
```bash
|
||||
cf config init # Create a new config file with documented example
|
||||
cf config show # Display current config with syntax highlighting
|
||||
cf config path # Print the config file path (useful for scripting)
|
||||
cf config validate # Validate config syntax and schema
|
||||
cf config edit # Open config in $EDITOR
|
||||
```
|
||||
|
||||
Use `cf config init` to get started with a fully documented template.
|
||||
|
||||
## Usage
|
||||
|
||||
The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `cf up <svc>` | Start service (auto-migrates if host changed) |
|
||||
| `cf down <svc>` | Stop service |
|
||||
| `cf restart <svc>` | down + up |
|
||||
| `cf update <svc>` | pull + down + up |
|
||||
| `cf pull <svc>` | Pull latest images |
|
||||
| `cf logs -f <svc>` | Follow logs |
|
||||
| `cf ps` | Show status of all services |
|
||||
| `cf sync` | Discover running services + capture image digests |
|
||||
| `cf check` | Validate config, mounts, networks |
|
||||
| `cf init-network` | Create Docker network on hosts |
|
||||
| `cf traefik-file` | Generate Traefik file-provider config |
|
||||
| `cf config <cmd>` | Manage config files (init, show, path, validate, edit) |
|
||||
|
||||
All commands support `--all` to operate on all services.
|
||||
|
||||
Each command replaces: look up host → SSH → find compose file → run `ssh host "cd /opt/compose/plex && docker compose up -d"`.
|
||||
|
||||
```bash
|
||||
# Start services (auto-migrates if host changed in config)
|
||||
cf up plex jellyfin
|
||||
@@ -209,6 +284,60 @@ cf logs -f plex # follow
|
||||
cf ps
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Compose Farm - run docker compose commands across multiple hosts
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to │
|
||||
│ copy it or customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ──────────────────────────────────────────────────────────────────╮
|
||||
│ up Start services (docker compose up -d). Auto-migrates if host │
|
||||
│ changed. │
|
||||
│ down Stop services (docker compose down). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart services (down + up). │
|
||||
│ update Update services (pull + down + up). │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ──────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose │
|
||||
│ Traefik labels. │
|
||||
│ sync Sync local state with running services. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ─────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show service logs. │
|
||||
│ ps Show status of all services. │
|
||||
│ stats Show overview statistics for hosts and services. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
### Auto-Migration
|
||||
|
||||
When you change a service's host assignment in config and run `up`, Compose Farm automatically:
|
||||
@@ -329,6 +458,31 @@ Update your Traefik config to use directory watching instead of a single file:
|
||||
- --providers.file.watch=true
|
||||
```
|
||||
|
||||
## Comparison with Alternatives
|
||||
|
||||
There are many ways to run containers on multiple hosts. Here is where Compose Farm sits:
|
||||
|
||||
| | Compose Farm | Docker Contexts | K8s / Swarm | Ansible / Terraform | Portainer / Coolify |
|
||||
|---|:---:|:---:|:---:|:---:|:---:|
|
||||
| No compose rewrites | ✅ | ✅ | ❌ | ✅ | ✅ |
|
||||
| Version controlled | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| State tracking | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Auto-migration | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Interactive CLI | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||
| Parallel execution | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Agentless | ✅ | ✅ | ❌ | ✅ | ❌ |
|
||||
| High availability | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
|
||||
**Docker Contexts** — You can use `docker context create remote ssh://...` and `docker compose --context remote up`. But it's manual: you must remember which host runs which service, there's no global view, no parallel execution, and no auto-migration.
|
||||
|
||||
**Kubernetes / Docker Swarm** — Full orchestration that abstracts away the hardware. But they require cluster initialization, separate control planes, and often rewriting compose files. They introduce complexity (consensus, overlay networks) unnecessary for static "pet" servers.
|
||||
|
||||
**Ansible / Terraform** — Infrastructure-as-Code tools that can SSH in and deploy containers. But they're push-based configuration management, not interactive CLIs. Great for setting up state, clumsy for day-to-day operations like `cf logs -f` or quickly restarting a service.
|
||||
|
||||
**Portainer / Coolify** — Web-based management UIs. But they're UI-first and often require agents on your servers. Compose Farm is CLI-first and agentless.
|
||||
|
||||
**Compose Farm is the middle ground:** a robust CLI that productizes the manual SSH pattern. You get the "cluster feel" (unified commands, state tracking) without the "cluster cost" (complexity, agents, control planes).
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
11
docker-compose.yml
Normal file
11
docker-compose.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
services:
|
||||
cf:
|
||||
image: ghcr.io/basnijholt/compose-farm:latest
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# Compose directory (contains compose files AND compose-farm.yaml config)
|
||||
- ${CF_COMPOSE_DIR:-/opt/compose}:${CF_COMPOSE_DIR:-/opt/compose}
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
# Config file path (state stored alongside it)
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/compose}/compose-farm.yaml
|
||||
@@ -11,7 +11,7 @@
|
||||
name: paperless-ngx
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
image: redis:8
|
||||
container_name: paperless-redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
|
||||
@@ -4,6 +4,7 @@ dynamic = ["version"]
|
||||
description = "Compose Farm - run docker compose commands across multiple hosts"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
license-files = ["LICENSE"]
|
||||
authors = [
|
||||
{ name = "Bas Nijholt", email = "bas@nijho.lt" }
|
||||
]
|
||||
@@ -34,6 +35,7 @@ classifiers = [
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Programming Language :: Python :: 3.14",
|
||||
"Topic :: System :: Systems Administration",
|
||||
"Topic :: Utilities",
|
||||
"Typing :: Typed",
|
||||
|
||||
19
src/compose_farm/cli/__init__.py
Normal file
19
src/compose_farm/cli/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""CLI interface using Typer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Import command modules to trigger registration via @app.command() decorators
|
||||
from compose_farm.cli import (
|
||||
config, # noqa: F401
|
||||
lifecycle, # noqa: F401
|
||||
management, # noqa: F401
|
||||
monitoring, # noqa: F401
|
||||
)
|
||||
|
||||
# Import the shared app instance
|
||||
from compose_farm.cli.app import app
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
42
src/compose_farm/cli/app.py
Normal file
42
src/compose_farm/cli/app.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""Shared Typer app instance."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm import __version__
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
|
||||
def _version_callback(value: bool) -> None:
|
||||
"""Print version and exit."""
|
||||
if value:
|
||||
typer.echo(f"compose-farm {__version__}")
|
||||
raise typer.Exit
|
||||
|
||||
|
||||
app = typer.Typer(
|
||||
name="compose-farm",
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def main(
|
||||
version: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--version",
|
||||
"-v",
|
||||
help="Show version and exit",
|
||||
callback=_version_callback,
|
||||
is_eager=True,
|
||||
),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Compose Farm - run docker compose commands across multiple hosts."""
|
||||
203
src/compose_farm/cli/common.py
Normal file
203
src/compose_farm/cli/common.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""Shared CLI helpers, options, and utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Annotated, TypeVar
|
||||
|
||||
import typer
|
||||
from rich.progress import (
|
||||
BarColumn,
|
||||
MofNCompleteColumn,
|
||||
Progress,
|
||||
SpinnerColumn,
|
||||
TaskID,
|
||||
TextColumn,
|
||||
TimeElapsedColumn,
|
||||
)
|
||||
|
||||
from compose_farm.config import Config, load_config
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.executor import CommandResult # noqa: TC001
|
||||
from compose_farm.traefik import generate_traefik_config, render_traefik_config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Coroutine, Generator
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
# --- Shared CLI Options ---
|
||||
ServicesArg = Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Services to operate on"),
|
||||
]
|
||||
AllOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--all", "-a", help="Run on all services"),
|
||||
]
|
||||
ConfigOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--config", "-c", help="Path to config file"),
|
||||
]
|
||||
LogPathOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--log-path", "-l", help="Path to Dockerfarm TOML log"),
|
||||
]
|
||||
HostOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--host", "-H", help="Filter to services on this host"),
|
||||
]
|
||||
|
||||
# --- Constants (internal) ---
|
||||
_MISSING_PATH_PREVIEW_LIMIT = 2
|
||||
_STATS_PREVIEW_LIMIT = 3 # Max number of pending migrations to show by name
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def progress_bar(label: str, total: int) -> Generator[tuple[Progress, TaskID], None, None]:
|
||||
"""Create a standardized progress bar with consistent styling.
|
||||
|
||||
Yields (progress, task_id). Use progress.update(task_id, advance=1, description=...)
|
||||
to advance.
|
||||
"""
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn(f"[bold blue]{label}[/]"),
|
||||
BarColumn(),
|
||||
MofNCompleteColumn(),
|
||||
TextColumn("•"),
|
||||
TimeElapsedColumn(),
|
||||
TextColumn("•"),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console,
|
||||
transient=True,
|
||||
) as progress:
|
||||
task_id = progress.add_task("", total=total)
|
||||
yield progress, task_id
|
||||
|
||||
|
||||
def load_config_or_exit(config_path: Path | None) -> Config:
|
||||
"""Load config or exit with a friendly error message."""
|
||||
try:
|
||||
return load_config(config_path)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def get_services(
|
||||
services: list[str],
|
||||
all_services: bool,
|
||||
config_path: Path | None,
|
||||
) -> tuple[list[str], Config]:
|
||||
"""Resolve service list and load config."""
|
||||
config = load_config_or_exit(config_path)
|
||||
|
||||
if all_services:
|
||||
return list(config.services.keys()), config
|
||||
if not services:
|
||||
err_console.print("[red]✗[/] Specify services or use --all")
|
||||
raise typer.Exit(1)
|
||||
return list(services), config
|
||||
|
||||
|
||||
def run_async(coro: Coroutine[None, None, _T]) -> _T:
|
||||
"""Run async coroutine."""
|
||||
return asyncio.run(coro)
|
||||
|
||||
|
||||
def report_results(results: list[CommandResult]) -> None:
|
||||
"""Report command results and exit with appropriate code."""
|
||||
succeeded = [r for r in results if r.success]
|
||||
failed = [r for r in results if not r.success]
|
||||
|
||||
# Always print summary when there are multiple results
|
||||
if len(results) > 1:
|
||||
console.print() # Blank line before summary
|
||||
if failed:
|
||||
for r in failed:
|
||||
err_console.print(
|
||||
f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}"
|
||||
)
|
||||
console.print()
|
||||
console.print(
|
||||
f"[green]✓[/] {len(succeeded)}/{len(results)} services succeeded, "
|
||||
f"[red]✗[/] {len(failed)} failed"
|
||||
)
|
||||
else:
|
||||
console.print(f"[green]✓[/] All {len(results)} services succeeded")
|
||||
|
||||
elif failed:
|
||||
# Single service failed
|
||||
r = failed[0]
|
||||
err_console.print(f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}")
|
||||
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def maybe_regenerate_traefik(cfg: Config) -> None:
|
||||
"""Regenerate traefik config if traefik_file is configured."""
|
||||
if cfg.traefik_file is None:
|
||||
return
|
||||
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys()))
|
||||
new_content = render_traefik_config(dynamic)
|
||||
|
||||
# Check if content changed
|
||||
old_content = ""
|
||||
if cfg.traefik_file.exists():
|
||||
old_content = cfg.traefik_file.read_text()
|
||||
|
||||
if new_content != old_content:
|
||||
cfg.traefik_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
cfg.traefik_file.write_text(new_content)
|
||||
console.print() # Ensure we're on a new line after streaming output
|
||||
console.print(f"[green]✓[/] Traefik config updated: {cfg.traefik_file}")
|
||||
|
||||
for warning in warnings:
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
err_console.print(f"[yellow]![/] Failed to update traefik config: {exc}")
|
||||
|
||||
|
||||
def validate_host_for_service(cfg: Config, service: str, host: str) -> None:
|
||||
"""Validate that a host is valid for a service."""
|
||||
if host not in cfg.hosts:
|
||||
err_console.print(f"[red]✗[/] Host '{host}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
allowed_hosts = cfg.get_hosts(service)
|
||||
if host not in allowed_hosts:
|
||||
err_console.print(
|
||||
f"[red]✗[/] Service '{service}' is not configured for host '{host}' "
|
||||
f"(configured: {', '.join(allowed_hosts)})"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def run_host_operation(
|
||||
cfg: Config,
|
||||
svc_list: list[str],
|
||||
host: str,
|
||||
command: str,
|
||||
action_verb: str,
|
||||
state_callback: Callable[[Config, str, str], None],
|
||||
) -> None:
|
||||
"""Run an operation on a specific host for multiple services."""
|
||||
from compose_farm.executor import run_compose_on_host # noqa: PLC0415
|
||||
|
||||
results: list[CommandResult] = []
|
||||
for service in svc_list:
|
||||
validate_host_for_service(cfg, service, host)
|
||||
console.print(f"[cyan]\\[{service}][/] {action_verb} on [magenta]{host}[/]...")
|
||||
result = run_async(run_compose_on_host(cfg, service, host, command, raw=True))
|
||||
print() # Newline after raw output
|
||||
results.append(result)
|
||||
if result.success:
|
||||
state_callback(cfg, service, host)
|
||||
maybe_regenerate_traefik(cfg)
|
||||
report_results(results)
|
||||
265
src/compose_farm/cli/config.py
Normal file
265
src/compose_farm/cli/config.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""Configuration management commands for compose-farm."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
from importlib import resources
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.config import load_config, xdg_config_home
|
||||
from compose_farm.console import console, err_console
|
||||
|
||||
config_app = typer.Typer(
|
||||
name="config",
|
||||
help="Manage compose-farm configuration files.",
|
||||
no_args_is_help=True,
|
||||
)
|
||||
|
||||
# Default config location (internal)
|
||||
_USER_CONFIG_PATH = xdg_config_home() / "compose-farm" / "compose-farm.yaml"
|
||||
|
||||
# Search paths for existing config (internal)
|
||||
_CONFIG_PATHS = [
|
||||
Path("compose-farm.yaml"),
|
||||
_USER_CONFIG_PATH,
|
||||
]
|
||||
|
||||
# --- CLI Options (same pattern as cli.py) ---
|
||||
_PathOption = Annotated[
|
||||
Path | None,
|
||||
typer.Option("--path", "-p", help="Path to config file. Uses auto-detection if not specified."),
|
||||
]
|
||||
_ForceOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--force", "-f", help="Overwrite existing config without confirmation."),
|
||||
]
|
||||
_RawOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--raw", "-r", help="Output raw file contents (for copy-paste)."),
|
||||
]
|
||||
|
||||
|
||||
def _get_editor() -> str:
|
||||
"""Get the user's preferred editor.
|
||||
|
||||
Checks $EDITOR, then $VISUAL, then falls back to platform defaults.
|
||||
"""
|
||||
for env_var in ("EDITOR", "VISUAL"):
|
||||
editor = os.environ.get(env_var)
|
||||
if editor:
|
||||
return editor
|
||||
|
||||
if platform.system() == "Windows":
|
||||
return "notepad"
|
||||
|
||||
# Try common editors on Unix-like systems
|
||||
for editor in ("nano", "vim", "vi"):
|
||||
if shutil.which(editor):
|
||||
return editor
|
||||
|
||||
return "vi"
|
||||
|
||||
|
||||
def _generate_template() -> str:
|
||||
"""Generate a config template with documented schema."""
|
||||
try:
|
||||
template_file = resources.files("compose_farm") / "example-config.yaml"
|
||||
return template_file.read_text(encoding="utf-8")
|
||||
except FileNotFoundError as e:
|
||||
err_console.print("[red]Example config template is missing from the package.[/red]")
|
||||
err_console.print("Reinstall compose-farm or report this issue.")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def _get_config_file(path: Path | None) -> Path | None:
|
||||
"""Resolve config path, or auto-detect from standard locations."""
|
||||
if path:
|
||||
return path.expanduser().resolve()
|
||||
|
||||
# Check environment variable
|
||||
if env_path := os.environ.get("CF_CONFIG"):
|
||||
p = Path(env_path)
|
||||
if p.exists():
|
||||
return p.resolve()
|
||||
|
||||
# Check standard locations
|
||||
for p in _CONFIG_PATHS:
|
||||
if p.exists():
|
||||
return p.resolve()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@config_app.command("init")
|
||||
def config_init(
|
||||
path: _PathOption = None,
|
||||
force: _ForceOption = False,
|
||||
) -> None:
|
||||
"""Create a new config file with documented example.
|
||||
|
||||
The generated config file serves as a template showing all available
|
||||
options with explanatory comments.
|
||||
"""
|
||||
target_path = (path.expanduser().resolve() if path else None) or _USER_CONFIG_PATH
|
||||
|
||||
if target_path.exists() and not force:
|
||||
console.print(
|
||||
f"[bold yellow]Config file already exists at:[/bold yellow] [cyan]{target_path}[/cyan]",
|
||||
)
|
||||
if not typer.confirm("Overwrite existing config file?"):
|
||||
console.print("[dim]Aborted.[/dim]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
# Create parent directories
|
||||
target_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate and write template
|
||||
template_content = _generate_template()
|
||||
target_path.write_text(template_content, encoding="utf-8")
|
||||
|
||||
console.print(f"[green]✓[/] Config file created at: {target_path}")
|
||||
console.print("\n[dim]Edit the file to customize your settings:[/dim]")
|
||||
console.print(" [cyan]cf config edit[/cyan]")
|
||||
|
||||
|
||||
@config_app.command("edit")
|
||||
def config_edit(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Open the config file in your default editor.
|
||||
|
||||
The editor is determined by: $EDITOR > $VISUAL > platform default.
|
||||
"""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
console.print("\nSearched locations:")
|
||||
for p in _CONFIG_PATHS:
|
||||
console.print(f" - {p}")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if not config_file.exists():
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
editor = _get_editor()
|
||||
console.print(f"[dim]Opening {config_file} with {editor}...[/dim]")
|
||||
|
||||
try:
|
||||
editor_cmd = shlex.split(editor, posix=os.name != "nt")
|
||||
except ValueError as e:
|
||||
err_console.print("[red]Invalid editor command. Check $EDITOR/$VISUAL.[/red]")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
if not editor_cmd:
|
||||
err_console.print("[red]Editor command is empty.[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
subprocess.run([*editor_cmd, str(config_file)], check=True)
|
||||
except FileNotFoundError:
|
||||
err_console.print(f"[red]Editor '{editor_cmd[0]}' not found.[/red]")
|
||||
err_console.print("Set $EDITOR environment variable to your preferred editor.")
|
||||
raise typer.Exit(1) from None
|
||||
except subprocess.CalledProcessError as e:
|
||||
err_console.print(f"[red]Editor exited with error code {e.returncode}[/red]")
|
||||
raise typer.Exit(e.returncode) from None
|
||||
|
||||
|
||||
@config_app.command("show")
|
||||
def config_show(
|
||||
path: _PathOption = None,
|
||||
raw: _RawOption = False,
|
||||
) -> None:
|
||||
"""Display the config file location and contents."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nSearched locations:")
|
||||
for p in _CONFIG_PATHS:
|
||||
status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]"
|
||||
console.print(f" - {p} ({status})")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(0)
|
||||
|
||||
if not config_file.exists():
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]")
|
||||
console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.")
|
||||
raise typer.Exit(1)
|
||||
|
||||
content = config_file.read_text(encoding="utf-8")
|
||||
|
||||
if raw:
|
||||
print(content, end="")
|
||||
return
|
||||
|
||||
from rich.syntax import Syntax # noqa: PLC0415
|
||||
|
||||
console.print(f"[bold green]Config file:[/bold green] [cyan]{config_file}[/cyan]")
|
||||
console.print()
|
||||
syntax = Syntax(content, "yaml", theme="monokai", line_numbers=True, word_wrap=True)
|
||||
console.print(syntax)
|
||||
console.print()
|
||||
console.print("[dim]Tip: Use -r for copy-paste friendly output[/dim]")
|
||||
|
||||
|
||||
@config_app.command("path")
|
||||
def config_path(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Print the config file path (useful for scripting)."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
console.print("[yellow]No config file found.[/yellow]")
|
||||
console.print("\nSearched locations:")
|
||||
for p in _CONFIG_PATHS:
|
||||
status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]"
|
||||
console.print(f" - {p} ({status})")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Just print the path for easy piping
|
||||
print(config_file)
|
||||
|
||||
|
||||
@config_app.command("validate")
|
||||
def config_validate(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Validate the config file syntax and schema."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
err_console.print("[red]✗[/] No config file found")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
cfg = load_config(config_file)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
err_console.print(f"[red]✗[/] Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
console.print(f"[green]✓[/] Valid config: {config_file}")
|
||||
console.print(f" Hosts: {len(cfg.hosts)}")
|
||||
console.print(f" Services: {len(cfg.services)}")
|
||||
|
||||
|
||||
# Register config subcommand on the shared app
|
||||
app.add_typer(config_app, name="config", rich_help_panel="Configuration")
|
||||
144
src/compose_farm/cli/lifecycle.py
Normal file
144
src/compose_farm/cli/lifecycle.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""Lifecycle commands: up, down, pull, restart, update."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
load_config_or_exit,
|
||||
maybe_regenerate_traefik,
|
||||
report_results,
|
||||
run_async,
|
||||
run_host_operation,
|
||||
)
|
||||
from compose_farm.console import console
|
||||
from compose_farm.executor import run_on_services, run_sequential_on_services
|
||||
from compose_farm.operations import up_services
|
||||
from compose_farm.state import (
|
||||
add_service_to_host,
|
||||
get_services_needing_migration,
|
||||
remove_service,
|
||||
remove_service_from_host,
|
||||
)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def up(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
migrate: Annotated[
|
||||
bool, typer.Option("--migrate", "-m", help="Only services needing migration")
|
||||
] = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start services (docker compose up -d). Auto-migrates if host changed."""
|
||||
from compose_farm.console import err_console # noqa: PLC0415
|
||||
|
||||
if migrate and host:
|
||||
err_console.print("[red]✗[/] Cannot use --migrate and --host together")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if migrate:
|
||||
cfg = load_config_or_exit(config)
|
||||
svc_list = get_services_needing_migration(cfg)
|
||||
if not svc_list:
|
||||
console.print("[green]✓[/] No services need migration")
|
||||
return
|
||||
console.print(f"[cyan]Migrating {len(svc_list)} service(s):[/] {', '.join(svc_list)}")
|
||||
else:
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Per-host operation: run on specific host only
|
||||
if host:
|
||||
run_host_operation(cfg, svc_list, host, "up -d", "Starting", add_service_to_host)
|
||||
return
|
||||
|
||||
# Normal operation: use up_services with migration logic
|
||||
results = run_async(up_services(cfg, svc_list, raw=True))
|
||||
maybe_regenerate_traefik(cfg)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def down(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services (docker compose down)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Per-host operation: run on specific host only
|
||||
if host:
|
||||
run_host_operation(cfg, svc_list, host, "down", "Stopping", remove_service_from_host)
|
||||
return
|
||||
|
||||
# Normal operation
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "down", raw=raw))
|
||||
|
||||
# Remove from state on success
|
||||
# For multi-host services, result.service is "svc@host", extract base name
|
||||
removed_services: set[str] = set()
|
||||
for result in results:
|
||||
if result.success:
|
||||
base_service = result.service.split("@")[0]
|
||||
if base_service not in removed_services:
|
||||
remove_service(cfg, base_service)
|
||||
removed_services.add(base_service)
|
||||
|
||||
maybe_regenerate_traefik(cfg)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Pull latest images (docker compose pull)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "pull", raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def restart(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart services (down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_sequential_on_services(cfg, svc_list, ["down", "up -d"], raw=raw))
|
||||
maybe_regenerate_traefik(cfg)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def update(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update services (pull + down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_services(cfg, svc_list, ["pull", "down", "up -d"], raw=raw)
|
||||
)
|
||||
maybe_regenerate_traefik(cfg)
|
||||
report_results(results)
|
||||
File diff suppressed because it is too large
Load Diff
235
src/compose_farm/cli/monitoring.py
Normal file
235
src/compose_farm/cli/monitoring.py
Normal file
@@ -0,0 +1,235 @@
|
||||
"""Monitoring commands: logs, ps, stats."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
from rich.progress import Progress, TaskID # noqa: TC002
|
||||
from rich.table import Table
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
_STATS_PREVIEW_LIMIT,
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
load_config_or_exit,
|
||||
progress_bar,
|
||||
report_results,
|
||||
run_async,
|
||||
)
|
||||
from compose_farm.config import Config # noqa: TC001
|
||||
from compose_farm.console import console, err_console
|
||||
from compose_farm.executor import run_command, run_on_services
|
||||
from compose_farm.state import get_services_needing_migration, load_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
|
||||
def _group_services_by_host(
|
||||
services: dict[str, str | list[str]],
|
||||
hosts: Mapping[str, object],
|
||||
all_hosts: list[str] | None = None,
|
||||
) -> dict[str, list[str]]:
|
||||
"""Group services by their assigned host(s).
|
||||
|
||||
For multi-host services (list or "all"), the service appears in multiple host lists.
|
||||
"""
|
||||
by_host: dict[str, list[str]] = {h: [] for h in hosts}
|
||||
for service, host_value in services.items():
|
||||
if isinstance(host_value, list):
|
||||
# Explicit list of hosts
|
||||
for host_name in host_value:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
elif host_value == "all" and all_hosts:
|
||||
# "all" keyword - add to all hosts
|
||||
for host_name in all_hosts:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
elif host_value in by_host:
|
||||
# Single host
|
||||
by_host[host_value].append(service)
|
||||
return by_host
|
||||
|
||||
|
||||
def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
"""Get container counts from all hosts with a progress bar."""
|
||||
|
||||
async def get_count(host_name: str) -> tuple[str, int]:
|
||||
host = cfg.hosts[host_name]
|
||||
result = await run_command(host, "docker ps -q | wc -l", host_name, stream=False)
|
||||
count = 0
|
||||
if result.success:
|
||||
with contextlib.suppress(ValueError):
|
||||
count = int(result.stdout.strip())
|
||||
return host_name, count
|
||||
|
||||
async def gather_with_progress(progress: Progress, task_id: TaskID) -> dict[str, int]:
|
||||
hosts = list(cfg.hosts.keys())
|
||||
tasks = [asyncio.create_task(get_count(h)) for h in hosts]
|
||||
results: dict[str, int] = {}
|
||||
for coro in asyncio.as_completed(tasks):
|
||||
host_name, count = await coro
|
||||
results[host_name] = count
|
||||
progress.update(task_id, advance=1, description=f"[cyan]{host_name}[/]")
|
||||
return results
|
||||
|
||||
with progress_bar("Querying hosts", len(cfg.hosts)) as (progress, task_id):
|
||||
return asyncio.run(gather_with_progress(progress, task_id))
|
||||
|
||||
|
||||
def _build_host_table(
|
||||
cfg: Config,
|
||||
services_by_host: dict[str, list[str]],
|
||||
running_by_host: dict[str, list[str]],
|
||||
container_counts: dict[str, int],
|
||||
*,
|
||||
show_containers: bool,
|
||||
) -> Table:
|
||||
"""Build the hosts table."""
|
||||
table = Table(title="Hosts", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Host", style="magenta")
|
||||
table.add_column("Address")
|
||||
table.add_column("Configured", justify="right")
|
||||
table.add_column("Running", justify="right")
|
||||
if show_containers:
|
||||
table.add_column("Containers", justify="right")
|
||||
|
||||
for host_name in sorted(cfg.hosts.keys()):
|
||||
host = cfg.hosts[host_name]
|
||||
configured = len(services_by_host[host_name])
|
||||
running = len(running_by_host[host_name])
|
||||
|
||||
row = [
|
||||
host_name,
|
||||
host.address,
|
||||
str(configured),
|
||||
str(running) if running > 0 else "[dim]0[/]",
|
||||
]
|
||||
if show_containers:
|
||||
count = container_counts.get(host_name, 0)
|
||||
row.append(str(count) if count > 0 else "[dim]0[/]")
|
||||
|
||||
table.add_row(*row)
|
||||
return table
|
||||
|
||||
|
||||
def _build_summary_table(
|
||||
cfg: Config, state: dict[str, str | list[str]], pending: list[str]
|
||||
) -> Table:
|
||||
"""Build the summary table."""
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
|
||||
table = Table(title="Summary", show_header=False)
|
||||
table.add_column("Label", style="dim")
|
||||
table.add_column("Value", style="bold")
|
||||
|
||||
table.add_row("Total hosts", str(len(cfg.hosts)))
|
||||
table.add_row("Services (configured)", str(len(cfg.services)))
|
||||
table.add_row("Services (tracked)", str(len(state)))
|
||||
table.add_row("Compose files on disk", str(len(on_disk)))
|
||||
|
||||
if pending:
|
||||
preview = ", ".join(pending[:_STATS_PREVIEW_LIMIT])
|
||||
suffix = "..." if len(pending) > _STATS_PREVIEW_LIMIT else ""
|
||||
table.add_row("Pending migrations", f"[yellow]{len(pending)}[/] ({preview}{suffix})")
|
||||
else:
|
||||
table.add_row("Pending migrations", "[green]0[/]")
|
||||
|
||||
return table
|
||||
|
||||
|
||||
# --- Command functions ---
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def logs(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
host: HostOption = None,
|
||||
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
|
||||
tail: Annotated[
|
||||
int | None,
|
||||
typer.Option("--tail", "-n", help="Number of lines (default: 20 for --all, 100 otherwise)"),
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show service logs."""
|
||||
if all_services and host is not None:
|
||||
err_console.print("[red]✗[/] Cannot use --all and --host together")
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Determine service list based on options
|
||||
if host is not None:
|
||||
if host not in cfg.hosts:
|
||||
err_console.print(f"[red]✗[/] Host '{host}' not found in config")
|
||||
raise typer.Exit(1)
|
||||
# Include services where host is in the list of configured hosts
|
||||
svc_list = [s for s in cfg.services if host in cfg.get_hosts(s)]
|
||||
if not svc_list:
|
||||
err_console.print(f"[yellow]![/] No services configured for host '{host}'")
|
||||
return
|
||||
else:
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
|
||||
# Default to fewer lines when showing multiple services
|
||||
many_services = all_services or host is not None or len(svc_list) > 1
|
||||
effective_tail = tail if tail is not None else (20 if many_services else 100)
|
||||
cmd = f"logs --tail {effective_tail}"
|
||||
if follow:
|
||||
cmd += " -f"
|
||||
results = run_async(run_on_services(cfg, svc_list, cmd))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def ps(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of all services."""
|
||||
cfg = load_config_or_exit(config)
|
||||
results = run_async(run_on_services(cfg, list(cfg.services.keys()), "ps"))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def stats(
|
||||
live: Annotated[
|
||||
bool,
|
||||
typer.Option("--live", "-l", help="Query Docker for live container stats"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show overview statistics for hosts and services.
|
||||
|
||||
Without --live: Shows config/state info (hosts, services, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
state = load_state(cfg)
|
||||
pending = get_services_needing_migration(cfg)
|
||||
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
services_by_host = _group_services_by_host(cfg.services, cfg.hosts, all_hosts)
|
||||
running_by_host = _group_services_by_host(state, cfg.hosts, all_hosts)
|
||||
|
||||
container_counts: dict[str, int] = {}
|
||||
if live:
|
||||
container_counts = _get_container_counts(cfg)
|
||||
|
||||
host_table = _build_host_table(
|
||||
cfg, services_by_host, running_by_host, container_counts, show_containers=live
|
||||
)
|
||||
console.print(host_table)
|
||||
|
||||
console.print()
|
||||
console.print(_build_summary_table(cfg, state, pending))
|
||||
@@ -18,10 +18,10 @@ if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
|
||||
# Port parsing constants
|
||||
SINGLE_PART = 1
|
||||
PUBLISHED_TARGET_PARTS = 2
|
||||
HOST_PUBLISHED_PARTS = 3
|
||||
MIN_VOLUME_PARTS = 2
|
||||
_SINGLE_PART = 1
|
||||
_PUBLISHED_TARGET_PARTS = 2
|
||||
_HOST_PUBLISHED_PARTS = 3
|
||||
_MIN_VOLUME_PARTS = 2
|
||||
|
||||
_VAR_PATTERN = re.compile(r"\$\{([A-Za-z_][A-Za-z0-9_]*)(?::-(.*?))?\}")
|
||||
|
||||
@@ -34,7 +34,7 @@ class PortMapping:
|
||||
published: int | None
|
||||
|
||||
|
||||
def load_env(compose_path: Path) -> dict[str, str]:
|
||||
def _load_env(compose_path: Path) -> dict[str, str]:
|
||||
"""Load environment variables for compose interpolation.
|
||||
|
||||
Reads from .env file in the same directory as compose file,
|
||||
@@ -59,7 +59,7 @@ def load_env(compose_path: Path) -> dict[str, str]:
|
||||
return env
|
||||
|
||||
|
||||
def interpolate(value: str, env: dict[str, str]) -> str:
|
||||
def _interpolate(value: str, env: dict[str, str]) -> str:
|
||||
"""Perform ${VAR} and ${VAR:-default} interpolation."""
|
||||
|
||||
def replace(match: re.Match[str]) -> str:
|
||||
@@ -73,7 +73,7 @@ def interpolate(value: str, env: dict[str, str]) -> str:
|
||||
return _VAR_PATTERN.sub(replace, value)
|
||||
|
||||
|
||||
def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PLR0912
|
||||
def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PLR0912
|
||||
"""Parse port specifications from compose file.
|
||||
|
||||
Handles string formats like "8080", "8080:80", "0.0.0.0:8080:80",
|
||||
@@ -87,18 +87,22 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
|
||||
for item in items:
|
||||
if isinstance(item, str):
|
||||
interpolated = interpolate(item, env)
|
||||
interpolated = _interpolate(item, env)
|
||||
port_spec, _, _ = interpolated.partition("/")
|
||||
parts = port_spec.split(":")
|
||||
published: int | None = None
|
||||
target: int | None = None
|
||||
|
||||
if len(parts) == SINGLE_PART and parts[0].isdigit():
|
||||
if len(parts) == _SINGLE_PART and parts[0].isdigit():
|
||||
target = int(parts[0])
|
||||
elif len(parts) == PUBLISHED_TARGET_PARTS and parts[0].isdigit() and parts[1].isdigit():
|
||||
elif (
|
||||
len(parts) == _PUBLISHED_TARGET_PARTS and parts[0].isdigit() and parts[1].isdigit()
|
||||
):
|
||||
published = int(parts[0])
|
||||
target = int(parts[1])
|
||||
elif len(parts) == HOST_PUBLISHED_PARTS and parts[-2].isdigit() and parts[-1].isdigit():
|
||||
elif (
|
||||
len(parts) == _HOST_PUBLISHED_PARTS and parts[-2].isdigit() and parts[-1].isdigit()
|
||||
):
|
||||
published = int(parts[-2])
|
||||
target = int(parts[-1])
|
||||
|
||||
@@ -107,7 +111,7 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
elif isinstance(item, dict):
|
||||
target_raw = item.get("target")
|
||||
if isinstance(target_raw, str):
|
||||
target_raw = interpolate(target_raw, env)
|
||||
target_raw = _interpolate(target_raw, env)
|
||||
if target_raw is None:
|
||||
continue
|
||||
try:
|
||||
@@ -117,7 +121,7 @@ def parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PL
|
||||
|
||||
published_raw = item.get("published")
|
||||
if isinstance(published_raw, str):
|
||||
published_raw = interpolate(published_raw, env)
|
||||
published_raw = _interpolate(published_raw, env)
|
||||
published_val: int | None
|
||||
try:
|
||||
published_val = int(str(published_raw)) if published_raw is not None else None
|
||||
@@ -144,14 +148,14 @@ def _parse_volume_item(
|
||||
) -> str | None:
|
||||
"""Parse a single volume item and return host path if it's a bind mount."""
|
||||
if isinstance(item, str):
|
||||
interpolated = interpolate(item, env)
|
||||
interpolated = _interpolate(item, env)
|
||||
parts = interpolated.split(":")
|
||||
if len(parts) >= MIN_VOLUME_PARTS:
|
||||
if len(parts) >= _MIN_VOLUME_PARTS:
|
||||
return _resolve_host_path(parts[0], compose_dir)
|
||||
elif isinstance(item, dict) and item.get("type") == "bind":
|
||||
source = item.get("source")
|
||||
if source:
|
||||
interpolated = interpolate(str(source), env)
|
||||
interpolated = _interpolate(str(source), env)
|
||||
return _resolve_host_path(interpolated, compose_dir)
|
||||
return None
|
||||
|
||||
@@ -166,7 +170,7 @@ def parse_host_volumes(config: Config, service: str) -> list[str]:
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = load_env(compose_path)
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
@@ -234,7 +238,7 @@ def load_compose_services(
|
||||
message = f"[{stack}] Compose file not found: {compose_path}"
|
||||
raise FileNotFoundError(message)
|
||||
|
||||
env = load_env(compose_path)
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
@@ -248,7 +252,7 @@ def normalize_labels(raw: Any, env: dict[str, str]) -> dict[str, str]:
|
||||
return {}
|
||||
if isinstance(raw, dict):
|
||||
return {
|
||||
interpolate(str(k), env): interpolate(str(v), env)
|
||||
_interpolate(str(k), env): _interpolate(str(v), env)
|
||||
for k, v in raw.items()
|
||||
if k is not None
|
||||
}
|
||||
@@ -258,8 +262,8 @@ def normalize_labels(raw: Any, env: dict[str, str]) -> dict[str, str]:
|
||||
if not isinstance(item, str) or "=" not in item:
|
||||
continue
|
||||
key_raw, value_raw = item.split("=", 1)
|
||||
key = interpolate(key_raw.strip(), env)
|
||||
value = interpolate(value_raw.strip(), env)
|
||||
key = _interpolate(key_raw.strip(), env)
|
||||
value = _interpolate(value_raw.strip(), env)
|
||||
labels[key] = value
|
||||
return labels
|
||||
return {}
|
||||
@@ -278,5 +282,5 @@ def get_ports_for_service(
|
||||
if ref_service in all_services:
|
||||
ref_def = all_services[ref_service]
|
||||
if isinstance(ref_def, dict):
|
||||
return parse_ports(ref_def.get("ports"), env)
|
||||
return parse_ports(definition.get("ports"), env)
|
||||
return _parse_ports(ref_def.get("ports"), env)
|
||||
return _parse_ports(definition.get("ports"), env)
|
||||
|
||||
@@ -148,9 +148,10 @@ def load_config(path: Path | None = None) -> Config:
|
||||
"""Load configuration from YAML file.
|
||||
|
||||
Search order:
|
||||
1. Explicit path if provided
|
||||
2. ./compose-farm.yaml
|
||||
3. $XDG_CONFIG_HOME/compose-farm/compose-farm.yaml (defaults to ~/.config)
|
||||
1. Explicit path if provided via --config
|
||||
2. CF_CONFIG environment variable
|
||||
3. ./compose-farm.yaml
|
||||
4. $XDG_CONFIG_HOME/compose-farm/compose-farm.yaml (defaults to ~/.config)
|
||||
"""
|
||||
search_paths = [
|
||||
Path("compose-farm.yaml"),
|
||||
@@ -159,6 +160,8 @@ def load_config(path: Path | None = None) -> Config:
|
||||
|
||||
if path:
|
||||
config_path = path
|
||||
elif env_path := os.environ.get("CF_CONFIG"):
|
||||
config_path = Path(env_path)
|
||||
else:
|
||||
config_path = None
|
||||
for p in search_paths:
|
||||
@@ -170,6 +173,13 @@ def load_config(path: Path | None = None) -> Config:
|
||||
msg = f"Config file not found. Searched: {', '.join(str(p) for p in search_paths)}"
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
if config_path.is_dir():
|
||||
msg = (
|
||||
f"Config path is a directory, not a file: {config_path}\n"
|
||||
"This often happens when Docker creates an empty directory for a missing mount."
|
||||
)
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
with config_path.open() as f:
|
||||
raw = yaml.safe_load(f)
|
||||
|
||||
|
||||
89
src/compose_farm/example-config.yaml
Normal file
89
src/compose_farm/example-config.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
# Compose Farm configuration
|
||||
# Documentation: https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file configures compose-farm to manage Docker Compose services
|
||||
# across multiple hosts via SSH.
|
||||
#
|
||||
# Place this file at:
|
||||
# - ./compose-farm.yaml (current directory)
|
||||
# - ~/.config/compose-farm/compose-farm.yaml
|
||||
# - Or specify with: cf --config /path/to/config.yaml
|
||||
# - Or set CF_CONFIG environment variable
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# compose_dir: Directory containing service subdirectories with compose files
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each subdirectory should contain a compose.yaml (or docker-compose.yml).
|
||||
# This path must be the same on all hosts (NFS mount recommended).
|
||||
#
|
||||
compose_dir: /opt/compose
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# hosts: SSH connection details for each host
|
||||
# ------------------------------------------------------------------------------
|
||||
# Simple form:
|
||||
# hostname: ip-or-fqdn
|
||||
#
|
||||
# Full form:
|
||||
# hostname:
|
||||
# address: ip-or-fqdn
|
||||
# user: ssh-username # default: current user
|
||||
# port: 22 # default: 22
|
||||
#
|
||||
# Note: "all" is a reserved keyword and cannot be used as a host name.
|
||||
#
|
||||
hosts:
|
||||
# Example: simple form (uses current user, port 22)
|
||||
server1: 192.168.1.10
|
||||
|
||||
# Example: full form with explicit user
|
||||
server2:
|
||||
address: 192.168.1.20
|
||||
user: admin
|
||||
|
||||
# Example: full form with custom port
|
||||
server3:
|
||||
address: 192.168.1.30
|
||||
user: root
|
||||
port: 2222
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# services: Map service names to their target host(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each service name must match a subdirectory in compose_dir.
|
||||
#
|
||||
# Single host:
|
||||
# service-name: hostname
|
||||
#
|
||||
# Multiple hosts (explicit list):
|
||||
# service-name: [host1, host2]
|
||||
#
|
||||
# All hosts:
|
||||
# service-name: all
|
||||
#
|
||||
services:
|
||||
# Example: service runs on a single host
|
||||
nginx: server1
|
||||
postgres: server2
|
||||
|
||||
# Example: service runs on multiple specific hosts
|
||||
# prometheus: [server1, server2]
|
||||
|
||||
# Example: service runs on ALL hosts (e.g., monitoring agents)
|
||||
# node-exporter: all
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# traefik_file: (optional) Auto-generate Traefik file-provider config
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, compose-farm automatically regenerates this file after
|
||||
# up/down/restart/update commands. Traefik watches this file for changes.
|
||||
#
|
||||
# traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# traefik_service: (optional) Service name running Traefik
|
||||
# ------------------------------------------------------------------------------
|
||||
# When generating traefik_file, services on the same host as Traefik are
|
||||
# skipped (they're handled by Traefik's Docker provider directly).
|
||||
#
|
||||
# traefik_service: traefik
|
||||
@@ -12,8 +12,7 @@ from typing import TYPE_CHECKING, Any
|
||||
import asyncssh
|
||||
from rich.markup import escape
|
||||
|
||||
from .console import console as _console
|
||||
from .console import err_console as _err_console
|
||||
from .console import console, err_console
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
@@ -55,7 +54,7 @@ class CommandResult:
|
||||
stderr: str = ""
|
||||
|
||||
|
||||
def _is_local(host: Host) -> bool:
|
||||
def is_local(host: Host) -> bool:
|
||||
"""Check if host should run locally (no SSH)."""
|
||||
addr = host.address.lower()
|
||||
if addr in LOCAL_ADDRESSES:
|
||||
@@ -101,14 +100,14 @@ async def _run_local_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
console = _err_console if is_stderr else _console
|
||||
out = err_console if is_stderr else console
|
||||
while True:
|
||||
line = await reader.readline()
|
||||
if not line:
|
||||
break
|
||||
text = line.decode()
|
||||
if text.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(text)}", end="")
|
||||
out.print(f"[cyan]\\[{prefix}][/] {escape(text)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -130,7 +129,7 @@ async def _run_local_command(
|
||||
stderr=stderr_data.decode() if stderr_data else "",
|
||||
)
|
||||
except OSError as e:
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
@@ -174,10 +173,10 @@ async def _run_ssh_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
console = _err_console if is_stderr else _console
|
||||
out = err_console if is_stderr else console
|
||||
async for line in reader:
|
||||
if line.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(line)}", end="")
|
||||
out.print(f"[cyan]\\[{prefix}][/] {escape(line)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -199,7 +198,7 @@ async def _run_ssh_command(
|
||||
stderr=stderr_data,
|
||||
)
|
||||
except (OSError, asyncssh.Error) as e:
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
@@ -212,7 +211,7 @@ async def run_command(
|
||||
raw: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Run a command on a host (locally or via SSH)."""
|
||||
if _is_local(host):
|
||||
if is_local(host):
|
||||
return await _run_local_command(command, service, stream=stream, raw=raw)
|
||||
return await _run_ssh_command(host, command, service, stream=stream, raw=raw)
|
||||
|
||||
@@ -253,23 +252,6 @@ async def run_compose_on_host(
|
||||
return await run_command(host, command, service, stream=stream, raw=raw)
|
||||
|
||||
|
||||
async def run_compose_multi_host(
|
||||
config: Config,
|
||||
service: str,
|
||||
compose_cmd: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Run a docker compose command on all hosts for a multi-host service.
|
||||
|
||||
Returns a list of results, one per host.
|
||||
"""
|
||||
return await run_sequential_commands_multi_host(
|
||||
config, service, [compose_cmd], stream=stream, raw=raw
|
||||
)
|
||||
|
||||
|
||||
async def run_on_services(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
@@ -286,7 +268,7 @@ async def run_on_services(
|
||||
return await run_sequential_on_services(config, services, [compose_cmd], stream=stream, raw=raw)
|
||||
|
||||
|
||||
async def run_sequential_commands(
|
||||
async def _run_sequential_commands(
|
||||
config: Config,
|
||||
service: str,
|
||||
commands: list[str],
|
||||
@@ -302,7 +284,7 @@ async def run_sequential_commands(
|
||||
return CommandResult(service=service, exit_code=0, success=True)
|
||||
|
||||
|
||||
async def run_sequential_commands_multi_host(
|
||||
async def _run_sequential_commands_multi_host(
|
||||
config: Config,
|
||||
service: str,
|
||||
commands: list[str],
|
||||
@@ -356,13 +338,13 @@ async def run_sequential_on_services(
|
||||
for service in services:
|
||||
if config.is_multi_host(service):
|
||||
multi_host_tasks.append(
|
||||
run_sequential_commands_multi_host(
|
||||
_run_sequential_commands_multi_host(
|
||||
config, service, commands, stream=stream, raw=raw
|
||||
)
|
||||
)
|
||||
else:
|
||||
single_host_tasks.append(
|
||||
run_sequential_commands(config, service, commands, stream=stream, raw=raw)
|
||||
_run_sequential_commands(config, service, commands, stream=stream, raw=raw)
|
||||
)
|
||||
|
||||
# Gather results separately to maintain type safety
|
||||
|
||||
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
DEFAULT_LOG_PATH = xdg_config_home() / "compose-farm" / "dockerfarm-log.toml"
|
||||
DIGEST_HEX_LENGTH = 64
|
||||
_DIGEST_HEX_LENGTH = 64
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@@ -47,7 +47,8 @@ class SnapshotEntry:
|
||||
}
|
||||
|
||||
|
||||
def _isoformat(dt: datetime) -> str:
|
||||
def isoformat(dt: datetime) -> str:
|
||||
"""Format a datetime as an ISO 8601 string with Z suffix for UTC."""
|
||||
return dt.astimezone(UTC).replace(microsecond=0).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
@@ -96,13 +97,13 @@ def _extract_image_fields(record: dict[str, Any]) -> tuple[str, str]:
|
||||
or ""
|
||||
)
|
||||
|
||||
if digest and not digest.startswith("sha256:") and len(digest) == DIGEST_HEX_LENGTH:
|
||||
if digest and not digest.startswith("sha256:") and len(digest) == _DIGEST_HEX_LENGTH:
|
||||
digest = f"sha256:{digest}"
|
||||
|
||||
return image, digest
|
||||
|
||||
|
||||
async def _collect_service_entries(
|
||||
async def collect_service_entries(
|
||||
config: Config,
|
||||
service: str,
|
||||
*,
|
||||
@@ -139,19 +140,21 @@ async def _collect_service_entries(
|
||||
return entries
|
||||
|
||||
|
||||
def _load_existing_entries(log_path: Path) -> list[dict[str, str]]:
|
||||
def load_existing_entries(log_path: Path) -> list[dict[str, str]]:
|
||||
"""Load existing snapshot entries from a TOML log file."""
|
||||
if not log_path.exists():
|
||||
return []
|
||||
data = tomllib.loads(log_path.read_text())
|
||||
return list(data.get("entries", []))
|
||||
|
||||
|
||||
def _merge_entries(
|
||||
def merge_entries(
|
||||
existing: Iterable[dict[str, str]],
|
||||
new_entries: Iterable[SnapshotEntry],
|
||||
*,
|
||||
now_iso: str,
|
||||
) -> list[dict[str, str]]:
|
||||
"""Merge new snapshot entries with existing ones, preserving first_seen timestamps."""
|
||||
merged: dict[tuple[str, str, str], dict[str, str]] = {
|
||||
(e["service"], e["host"], e["digest"]): dict(e) for e in existing
|
||||
}
|
||||
@@ -164,7 +167,8 @@ def _merge_entries(
|
||||
return list(merged.values())
|
||||
|
||||
|
||||
def _write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str, str]]) -> None:
|
||||
def write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str, str]]) -> None:
|
||||
"""Write snapshot entries to a TOML log file."""
|
||||
lines: list[str] = ["[meta]"]
|
||||
lines.extend(f'{key} = "{_escape(meta[key])}"' for key in sorted(meta))
|
||||
|
||||
@@ -189,45 +193,3 @@ def _write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str,
|
||||
content = "\n".join(lines).rstrip() + "\n"
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
log_path.write_text(content)
|
||||
|
||||
|
||||
async def snapshot_services(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
*,
|
||||
log_path: Path | None = None,
|
||||
now: datetime | None = None,
|
||||
run_compose_fn: Callable[..., Awaitable[CommandResult]] = run_compose,
|
||||
) -> Path:
|
||||
"""Capture current image digests for services and write them to a TOML log.
|
||||
|
||||
- Preserves the earliest `first_seen` per (service, host, digest)
|
||||
- Updates `last_seen` for digests observed in this snapshot
|
||||
- Leaves untouched digests that were not part of this run (history is kept)
|
||||
"""
|
||||
if not services:
|
||||
error = "No services specified for snapshot"
|
||||
raise RuntimeError(error)
|
||||
|
||||
log_path = log_path or DEFAULT_LOG_PATH
|
||||
now_dt = now or datetime.now(UTC)
|
||||
now_iso = _isoformat(now_dt)
|
||||
|
||||
existing_entries = _load_existing_entries(log_path)
|
||||
|
||||
snapshot_entries: list[SnapshotEntry] = []
|
||||
for service in services:
|
||||
snapshot_entries.extend(
|
||||
await _collect_service_entries(
|
||||
config, service, now=now_dt, run_compose_fn=run_compose_fn
|
||||
)
|
||||
)
|
||||
|
||||
if not snapshot_entries:
|
||||
error = "No image digests were captured"
|
||||
raise RuntimeError(error)
|
||||
|
||||
merged_entries = _merge_entries(existing_entries, snapshot_entries, now_iso=now_iso)
|
||||
meta = {"generated_at": now_iso, "compose_dir": str(config.compose_dir)}
|
||||
_write_toml(log_path, meta=meta, entries=merged_entries)
|
||||
return log_path
|
||||
|
||||
@@ -6,7 +6,6 @@ CLI commands are thin wrappers around these functions.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .compose import parse_external_networks, parse_host_volumes
|
||||
@@ -15,7 +14,6 @@ from .executor import (
|
||||
CommandResult,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
check_service_running,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_compose_on_host,
|
||||
@@ -23,8 +21,6 @@ from .executor import (
|
||||
from .state import get_service_host, set_multi_host_service, set_service_host
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Awaitable, Callable
|
||||
|
||||
from .config import Config
|
||||
|
||||
|
||||
@@ -35,7 +31,7 @@ def get_service_paths(cfg: Config, service: str) -> list[str]:
|
||||
return paths
|
||||
|
||||
|
||||
async def check_mounts_for_migration(
|
||||
async def _check_mounts_for_migration(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
@@ -46,7 +42,7 @@ async def check_mounts_for_migration(
|
||||
return [p for p, found in exists.items() if not found]
|
||||
|
||||
|
||||
async def check_networks_for_migration(
|
||||
async def _check_networks_for_migration(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
@@ -59,7 +55,7 @@ async def check_networks_for_migration(
|
||||
return [n for n, found in exists.items() if not found]
|
||||
|
||||
|
||||
async def preflight_check(
|
||||
async def _preflight_check(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
target_host: str,
|
||||
@@ -68,12 +64,12 @@ async def preflight_check(
|
||||
|
||||
Returns (missing_paths, missing_networks).
|
||||
"""
|
||||
missing_paths = await check_mounts_for_migration(cfg, service, target_host)
|
||||
missing_networks = await check_networks_for_migration(cfg, service, target_host)
|
||||
missing_paths = await _check_mounts_for_migration(cfg, service, target_host)
|
||||
missing_networks = await _check_networks_for_migration(cfg, service, target_host)
|
||||
return missing_paths, missing_networks
|
||||
|
||||
|
||||
def report_preflight_failures(
|
||||
def _report_preflight_failures(
|
||||
service: str,
|
||||
target_host: str,
|
||||
missing_paths: list[str],
|
||||
@@ -104,9 +100,9 @@ async def _up_multi_host_service(
|
||||
|
||||
# Pre-flight checks on all hosts
|
||||
for host_name in host_names:
|
||||
missing_paths, missing_networks = await preflight_check(cfg, service, host_name)
|
||||
missing_paths, missing_networks = await _preflight_check(cfg, service, host_name)
|
||||
if missing_paths or missing_networks:
|
||||
report_preflight_failures(service, host_name, missing_paths, missing_networks)
|
||||
_report_preflight_failures(service, host_name, missing_paths, missing_networks)
|
||||
results.append(
|
||||
CommandResult(service=f"{service}@{host_name}", exit_code=1, success=False)
|
||||
)
|
||||
@@ -134,6 +130,52 @@ async def _up_multi_host_service(
|
||||
return results
|
||||
|
||||
|
||||
async def _migrate_service(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
current_host: str,
|
||||
target_host: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> CommandResult | None:
|
||||
"""Migrate a service from current_host to target_host.
|
||||
|
||||
Pre-pulls/builds images on target, then stops service on current host.
|
||||
Returns failure result if migration prep fails, None on success.
|
||||
"""
|
||||
console.print(
|
||||
f"{prefix} Migrating from [magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
)
|
||||
# Prepare images on target host before stopping old service to minimize downtime.
|
||||
# Pull handles image-based services; build handles Dockerfile-based services.
|
||||
# Each command is a no-op for the other type (exit 0, no work done).
|
||||
pull_result = await run_compose(cfg, service, "pull", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if not pull_result.success:
|
||||
err_console.print(
|
||||
f"{prefix} [red]✗[/] Pull failed on [magenta]{target_host}[/], "
|
||||
"leaving service on current host"
|
||||
)
|
||||
return pull_result
|
||||
build_result = await run_compose(cfg, service, "build", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if not build_result.success:
|
||||
err_console.print(
|
||||
f"{prefix} [red]✗[/] Build failed on [magenta]{target_host}[/], "
|
||||
"leaving service on current host"
|
||||
)
|
||||
return build_result
|
||||
down_result = await run_compose_on_host(cfg, service, current_host, "down", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if not down_result.success:
|
||||
return down_result
|
||||
return None
|
||||
|
||||
|
||||
async def up_services(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
@@ -157,24 +199,20 @@ async def up_services(
|
||||
current_host = get_service_host(cfg, service)
|
||||
|
||||
# Pre-flight check: verify paths and networks exist on target
|
||||
missing_paths, missing_networks = await preflight_check(cfg, service, target_host)
|
||||
missing_paths, missing_networks = await _preflight_check(cfg, service, target_host)
|
||||
if missing_paths or missing_networks:
|
||||
report_preflight_failures(service, target_host, missing_paths, missing_networks)
|
||||
_report_preflight_failures(service, target_host, missing_paths, missing_networks)
|
||||
results.append(CommandResult(service=service, exit_code=1, success=False))
|
||||
continue
|
||||
|
||||
# If service is deployed elsewhere, migrate it
|
||||
if current_host and current_host != target_host:
|
||||
if current_host in cfg.hosts:
|
||||
console.print(
|
||||
f"{prefix} Migrating from "
|
||||
f"[magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
failure = await _migrate_service(
|
||||
cfg, service, current_host, target_host, prefix, raw=raw
|
||||
)
|
||||
down_result = await run_compose_on_host(cfg, service, current_host, "down", raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if not down_result.success:
|
||||
results.append(down_result)
|
||||
if failure:
|
||||
results.append(failure)
|
||||
continue
|
||||
else:
|
||||
err_console.print(
|
||||
@@ -196,45 +234,6 @@ async def up_services(
|
||||
return results
|
||||
|
||||
|
||||
async def discover_running_services(cfg: Config) -> dict[str, str | list[str]]:
|
||||
"""Discover which services are running on which hosts.
|
||||
|
||||
Returns a dict mapping service names to host name(s).
|
||||
Multi-host services return a list of hosts where they're running.
|
||||
"""
|
||||
discovered: dict[str, str | list[str]] = {}
|
||||
|
||||
for service in cfg.services:
|
||||
assigned_hosts = cfg.get_hosts(service)
|
||||
|
||||
if cfg.is_multi_host(service):
|
||||
# For multi-host services, find all hosts where it's running (check in parallel)
|
||||
checks = await asyncio.gather(
|
||||
*[check_service_running(cfg, service, h) for h in assigned_hosts]
|
||||
)
|
||||
running_hosts = [
|
||||
h for h, running in zip(assigned_hosts, checks, strict=True) if running
|
||||
]
|
||||
if running_hosts:
|
||||
discovered[service] = running_hosts
|
||||
else:
|
||||
# Single-host service - check assigned host first
|
||||
assigned_host = assigned_hosts[0]
|
||||
if await check_service_running(cfg, service, assigned_host):
|
||||
discovered[service] = assigned_host
|
||||
continue
|
||||
|
||||
# Check other hosts in case service was migrated but state is stale
|
||||
for host_name in cfg.hosts:
|
||||
if host_name == assigned_host:
|
||||
continue
|
||||
if await check_service_running(cfg, service, host_name):
|
||||
discovered[service] = host_name
|
||||
break
|
||||
|
||||
return discovered
|
||||
|
||||
|
||||
async def check_host_compatibility(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
@@ -253,50 +252,3 @@ async def check_host_compatibility(
|
||||
results[host_name] = (found, len(paths), missing)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def _check_resources(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
get_resources: Callable[[Config, str], list[str]],
|
||||
check_exists: Callable[[Config, str, list[str]], Awaitable[dict[str, bool]]],
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Generic check for resources (mounts, networks) on configured hosts."""
|
||||
missing: list[tuple[str, str, str]] = []
|
||||
|
||||
for service in services:
|
||||
host_names = cfg.get_hosts(service)
|
||||
resources = get_resources(cfg, service)
|
||||
if not resources:
|
||||
continue
|
||||
|
||||
for host_name in host_names:
|
||||
exists = await check_exists(cfg, host_name, resources)
|
||||
|
||||
for item, found in exists.items():
|
||||
if not found:
|
||||
missing.append((service, host_name, item))
|
||||
|
||||
return missing
|
||||
|
||||
|
||||
async def check_mounts_on_configured_hosts(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Check mount paths exist on configured hosts.
|
||||
|
||||
Returns list of (service, host, missing_path) tuples.
|
||||
"""
|
||||
return await _check_resources(cfg, services, get_service_paths, check_paths_exist)
|
||||
|
||||
|
||||
async def check_networks_on_configured_hosts(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Check Docker networks exist on configured hosts.
|
||||
|
||||
Returns list of (service, host, missing_network) tuples.
|
||||
"""
|
||||
return await _check_resources(cfg, services, parse_external_networks, check_networks_exist)
|
||||
|
||||
@@ -64,17 +64,6 @@ def get_service_host(config: Config, service: str) -> str | None:
|
||||
return value
|
||||
|
||||
|
||||
def get_service_hosts(config: Config, service: str) -> list[str]:
|
||||
"""Get all hosts where a service is currently deployed."""
|
||||
state = load_state(config)
|
||||
value = state.get(service)
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return [value]
|
||||
|
||||
|
||||
def set_service_host(config: Config, service: str, host: str) -> None:
|
||||
"""Record that a service is deployed on a host."""
|
||||
with _modify_state(config) as state:
|
||||
|
||||
@@ -26,7 +26,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraefikServiceSource:
|
||||
class _TraefikServiceSource:
|
||||
"""Source information to build an upstream for a Traefik service."""
|
||||
|
||||
traefik_service: str
|
||||
@@ -38,9 +38,9 @@ class TraefikServiceSource:
|
||||
scheme: str | None = None
|
||||
|
||||
|
||||
LIST_VALUE_KEYS = {"entrypoints", "middlewares"}
|
||||
MIN_ROUTER_PARTS = 3
|
||||
MIN_SERVICE_LABEL_PARTS = 6
|
||||
_LIST_VALUE_KEYS = {"entrypoints", "middlewares"}
|
||||
_MIN_ROUTER_PARTS = 3
|
||||
_MIN_SERVICE_LABEL_PARTS = 6
|
||||
|
||||
|
||||
def _parse_value(key: str, raw_value: str) -> Any:
|
||||
@@ -51,7 +51,7 @@ def _parse_value(key: str, raw_value: str) -> Any:
|
||||
if value.isdigit():
|
||||
return int(value)
|
||||
last_segment = key.rsplit(".", 1)[-1]
|
||||
if last_segment in LIST_VALUE_KEYS:
|
||||
if last_segment in _LIST_VALUE_KEYS:
|
||||
parts = [v.strip() for v in value.split(",")] if "," in value else [value]
|
||||
return [part for part in parts if part]
|
||||
return value
|
||||
@@ -102,7 +102,7 @@ def _insert(root: dict[str, Any], key_path: list[str], value: Any) -> None: # n
|
||||
current = container_list[list_index]
|
||||
|
||||
|
||||
def _resolve_published_port(source: TraefikServiceSource) -> tuple[int | None, str | None]:
|
||||
def _resolve_published_port(source: _TraefikServiceSource) -> tuple[int | None, str | None]:
|
||||
"""Resolve host-published port for a Traefik service.
|
||||
|
||||
Returns (published_port, warning_message).
|
||||
@@ -140,7 +140,7 @@ def _resolve_published_port(source: TraefikServiceSource) -> tuple[int | None, s
|
||||
|
||||
def _finalize_http_services(
|
||||
dynamic: dict[str, Any],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
warnings: list[str],
|
||||
) -> None:
|
||||
for traefik_service, source in sources.items():
|
||||
@@ -211,7 +211,7 @@ def _process_router_label(
|
||||
if not key_without_prefix.startswith("http.routers."):
|
||||
return
|
||||
router_parts = key_without_prefix.split(".")
|
||||
if len(router_parts) < MIN_ROUTER_PARTS:
|
||||
if len(router_parts) < _MIN_ROUTER_PARTS:
|
||||
return
|
||||
router_name = router_parts[2]
|
||||
router_remainder = router_parts[3:]
|
||||
@@ -229,12 +229,12 @@ def _process_service_label(
|
||||
host_address: str,
|
||||
ports: list[PortMapping],
|
||||
service_names: set[str],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
) -> None:
|
||||
if not key_without_prefix.startswith("http.services."):
|
||||
return
|
||||
parts = key_without_prefix.split(".")
|
||||
if len(parts) < MIN_SERVICE_LABEL_PARTS:
|
||||
if len(parts) < _MIN_SERVICE_LABEL_PARTS:
|
||||
return
|
||||
traefik_service = parts[2]
|
||||
service_names.add(traefik_service)
|
||||
@@ -242,7 +242,7 @@ def _process_service_label(
|
||||
|
||||
source = sources.get(traefik_service)
|
||||
if source is None:
|
||||
source = TraefikServiceSource(
|
||||
source = _TraefikServiceSource(
|
||||
traefik_service=traefik_service,
|
||||
stack=stack,
|
||||
compose_service=compose_service,
|
||||
@@ -267,7 +267,7 @@ def _process_service_labels(
|
||||
host_address: str,
|
||||
env: dict[str, str],
|
||||
dynamic: dict[str, Any],
|
||||
sources: dict[str, TraefikServiceSource],
|
||||
sources: dict[str, _TraefikServiceSource],
|
||||
warnings: list[str],
|
||||
) -> None:
|
||||
labels = normalize_labels(definition.get("labels"), env)
|
||||
@@ -328,7 +328,7 @@ def generate_traefik_config(
|
||||
"""
|
||||
dynamic: dict[str, Any] = {}
|
||||
warnings: list[str] = []
|
||||
sources: dict[str, TraefikServiceSource] = {}
|
||||
sources: dict[str, _TraefikServiceSource] = {}
|
||||
|
||||
# Determine Traefik's host from service assignment
|
||||
traefik_host = None
|
||||
@@ -366,7 +366,7 @@ def generate_traefik_config(
|
||||
return dynamic, warnings
|
||||
|
||||
|
||||
TRAEFIK_CONFIG_HEADER = """\
|
||||
_TRAEFIK_CONFIG_HEADER = """\
|
||||
# Auto-generated by compose-farm
|
||||
# https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
@@ -382,4 +382,4 @@ TRAEFIK_CONFIG_HEADER = """\
|
||||
def render_traefik_config(dynamic: dict[str, Any]) -> str:
|
||||
"""Render Traefik dynamic config as YAML with a header comment."""
|
||||
body = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
return TRAEFIK_CONFIG_HEADER + body
|
||||
return _TRAEFIK_CONFIG_HEADER + body
|
||||
|
||||
@@ -8,7 +8,7 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from compose_farm.cli import logs
|
||||
from compose_farm.cli.monitoring import logs
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
|
||||
@@ -37,7 +37,7 @@ def _make_result(service: str) -> CommandResult:
|
||||
def _mock_run_async_factory(
|
||||
services: list[str],
|
||||
) -> tuple[Any, list[CommandResult]]:
|
||||
"""Create a mock _run_async that returns results for given services."""
|
||||
"""Create a mock run_async that returns results for given services."""
|
||||
results = [_make_result(s) for s in services]
|
||||
|
||||
def mock_run_async(_coro: Coroutine[Any, Any, Any]) -> list[CommandResult]:
|
||||
@@ -55,9 +55,10 @@ class TestLogsContextualDefault:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
mock_run.return_value = None
|
||||
|
||||
@@ -73,9 +74,10 @@ class TestLogsContextualDefault:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
@@ -96,9 +98,10 @@ class TestLogsContextualDefault:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
@@ -119,9 +122,10 @@ class TestLogsContextualDefault:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
@@ -146,9 +150,9 @@ class TestLogsHostFilter:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
@@ -170,9 +174,9 @@ class TestLogsHostFilter:
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
@@ -187,14 +191,10 @@ class TestLogsHostFilter:
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 20"
|
||||
|
||||
def test_logs_all_and_host_mutually_exclusive(self, tmp_path: Path) -> None:
|
||||
def test_logs_all_and_host_mutually_exclusive(self) -> None:
|
||||
"""Using --all and --host together should error."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
|
||||
pytest.raises(typer.Exit) as exc_info,
|
||||
):
|
||||
# No config mock needed - error is raised before config is loaded
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
logs(
|
||||
services=None,
|
||||
all_services=True,
|
||||
|
||||
@@ -128,6 +128,8 @@ class TestLoadConfig:
|
||||
|
||||
def test_load_config_not_found(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "empty_config"))
|
||||
with pytest.raises(FileNotFoundError, match="Config file not found"):
|
||||
load_config()
|
||||
|
||||
|
||||
238
tests/test_config_cmd.py
Normal file
238
tests/test_config_cmd.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Tests for config command module."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from typer.testing import CliRunner
|
||||
|
||||
import compose_farm.cli.config as config_cmd_module
|
||||
from compose_farm.cli import app
|
||||
from compose_farm.cli.config import (
|
||||
_generate_template,
|
||||
_get_config_file,
|
||||
_get_editor,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner() -> CliRunner:
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def valid_config_data() -> dict[str, Any]:
|
||||
return {
|
||||
"compose_dir": "/opt/compose",
|
||||
"hosts": {"server1": "192.168.1.10"},
|
||||
"services": {"nginx": "server1"},
|
||||
}
|
||||
|
||||
|
||||
class TestGetEditor:
|
||||
"""Tests for _get_editor function."""
|
||||
|
||||
def test_uses_editor_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setenv("EDITOR", "code")
|
||||
monkeypatch.delenv("VISUAL", raising=False)
|
||||
assert _get_editor() == "code"
|
||||
|
||||
def test_uses_visual_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.delenv("EDITOR", raising=False)
|
||||
monkeypatch.setenv("VISUAL", "subl")
|
||||
assert _get_editor() == "subl"
|
||||
|
||||
def test_editor_takes_precedence(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setenv("EDITOR", "vim")
|
||||
monkeypatch.setenv("VISUAL", "code")
|
||||
assert _get_editor() == "vim"
|
||||
|
||||
|
||||
class TestGetConfigFile:
|
||||
"""Tests for _get_config_file function."""
|
||||
|
||||
def test_explicit_path(self, tmp_path: Path) -> None:
|
||||
config_file = tmp_path / "my-config.yaml"
|
||||
config_file.touch()
|
||||
result = _get_config_file(config_file)
|
||||
assert result == config_file.resolve()
|
||||
|
||||
def test_cf_config_env(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
config_file = tmp_path / "env-config.yaml"
|
||||
config_file.touch()
|
||||
monkeypatch.setenv("CF_CONFIG", str(config_file))
|
||||
result = _get_config_file(None)
|
||||
assert result == config_file.resolve()
|
||||
|
||||
def test_returns_none_when_not_found(
|
||||
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "nonexistent"))
|
||||
# Monkeypatch _CONFIG_PATHS to avoid finding existing files
|
||||
monkeypatch.setattr(
|
||||
config_cmd_module,
|
||||
"_CONFIG_PATHS",
|
||||
[
|
||||
tmp_path / "compose-farm.yaml",
|
||||
tmp_path / "nonexistent" / "compose-farm" / "compose-farm.yaml",
|
||||
],
|
||||
)
|
||||
result = _get_config_file(None)
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestGenerateTemplate:
|
||||
"""Tests for _generate_template function."""
|
||||
|
||||
def test_generates_valid_yaml(self) -> None:
|
||||
template = _generate_template()
|
||||
# Should be valid YAML
|
||||
data = yaml.safe_load(template)
|
||||
assert "compose_dir" in data
|
||||
assert "hosts" in data
|
||||
assert "services" in data
|
||||
|
||||
def test_has_documentation_comments(self) -> None:
|
||||
template = _generate_template()
|
||||
assert "# Compose Farm configuration" in template
|
||||
assert "hosts:" in template
|
||||
assert "services:" in template
|
||||
|
||||
|
||||
class TestConfigInit:
|
||||
"""Tests for cf config init command."""
|
||||
|
||||
def test_init_creates_file(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "new-config.yaml"
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file)])
|
||||
assert result.exit_code == 0
|
||||
assert config_file.exists()
|
||||
assert "Config file created" in result.stdout
|
||||
|
||||
def test_init_force_overwrites(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "existing.yaml"
|
||||
config_file.write_text("old content")
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file), "-f"])
|
||||
assert result.exit_code == 0
|
||||
content = config_file.read_text()
|
||||
assert "old content" not in content
|
||||
assert "compose_dir" in content
|
||||
|
||||
def test_init_prompts_on_existing(
|
||||
self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "existing.yaml"
|
||||
config_file.write_text("old content")
|
||||
result = runner.invoke(app, ["config", "init", "-p", str(config_file)], input="n\n")
|
||||
assert result.exit_code == 0
|
||||
assert "Aborted" in result.stdout
|
||||
assert config_file.read_text() == "old content"
|
||||
|
||||
|
||||
class TestConfigPath:
|
||||
"""Tests for cf config path command."""
|
||||
|
||||
def test_path_shows_config(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "path"])
|
||||
assert result.exit_code == 0
|
||||
assert str(config_file) in result.stdout
|
||||
|
||||
def test_path_with_explicit_path(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
# When explicitly provided, path is returned even if file doesn't exist
|
||||
nonexistent = tmp_path / "nonexistent.yaml"
|
||||
result = runner.invoke(app, ["config", "path", "-p", str(nonexistent)])
|
||||
assert result.exit_code == 0
|
||||
assert str(nonexistent) in result.stdout
|
||||
|
||||
|
||||
class TestConfigShow:
|
||||
"""Tests for cf config show command."""
|
||||
|
||||
def test_show_displays_content(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "show"])
|
||||
assert result.exit_code == 0
|
||||
assert "Config file:" in result.stdout
|
||||
|
||||
def test_show_raw_output(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
content = yaml.dump(valid_config_data)
|
||||
config_file.write_text(content)
|
||||
result = runner.invoke(app, ["config", "show", "-r"])
|
||||
assert result.exit_code == 0
|
||||
assert content in result.stdout
|
||||
|
||||
|
||||
class TestConfigValidate:
|
||||
"""Tests for cf config validate command."""
|
||||
|
||||
def test_validate_valid_config(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.chdir(tmp_path)
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
result = runner.invoke(app, ["config", "validate"])
|
||||
assert result.exit_code == 0
|
||||
assert "Valid config" in result.stdout
|
||||
assert "Hosts: 1" in result.stdout
|
||||
assert "Services: 1" in result.stdout
|
||||
|
||||
def test_validate_invalid_config(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
config_file = tmp_path / "invalid.yaml"
|
||||
config_file.write_text("invalid: [yaml: content")
|
||||
result = runner.invoke(app, ["config", "validate", "-p", str(config_file)])
|
||||
assert result.exit_code == 1
|
||||
# Error goes to stderr (captured in output when using CliRunner)
|
||||
output = result.stdout + (result.stderr or "")
|
||||
assert "Invalid config" in output or "✗" in output
|
||||
|
||||
def test_validate_missing_config(self, runner: CliRunner, tmp_path: Path) -> None:
|
||||
nonexistent = tmp_path / "nonexistent.yaml"
|
||||
result = runner.invoke(app, ["config", "validate", "-p", str(nonexistent)])
|
||||
assert result.exit_code == 1
|
||||
# Error goes to stderr
|
||||
output = result.stdout + (result.stderr or "")
|
||||
assert "Config file not found" in output or "not found" in output.lower()
|
||||
@@ -8,10 +8,10 @@ import pytest
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import (
|
||||
CommandResult,
|
||||
_is_local,
|
||||
_run_local_command,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
is_local,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_on_services,
|
||||
@@ -22,7 +22,7 @@ linux_only = pytest.mark.skipif(sys.platform != "linux", reason="Linux-only shel
|
||||
|
||||
|
||||
class TestIsLocal:
|
||||
"""Tests for _is_local function."""
|
||||
"""Tests for is_local function."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"address",
|
||||
@@ -30,7 +30,7 @@ class TestIsLocal:
|
||||
)
|
||||
def test_local_addresses(self, address: str) -> None:
|
||||
host = Host(address=address)
|
||||
assert _is_local(host) is True
|
||||
assert is_local(host) is True
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"address",
|
||||
@@ -38,7 +38,7 @@ class TestIsLocal:
|
||||
)
|
||||
def test_remote_addresses(self, address: str) -> None:
|
||||
host = Host(address=address)
|
||||
assert _is_local(host) is False
|
||||
assert is_local(host) is False
|
||||
|
||||
|
||||
class TestRunLocalCommand:
|
||||
|
||||
@@ -9,7 +9,14 @@ import pytest
|
||||
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
from compose_farm.logs import _parse_images_output, snapshot_services
|
||||
from compose_farm.logs import (
|
||||
_parse_images_output,
|
||||
collect_service_entries,
|
||||
isoformat,
|
||||
load_existing_entries,
|
||||
merge_entries,
|
||||
write_toml,
|
||||
)
|
||||
|
||||
|
||||
def test_parse_images_output_handles_list_and_lines() -> None:
|
||||
@@ -55,26 +62,29 @@ async def test_snapshot_preserves_first_seen(tmp_path: Path) -> None:
|
||||
|
||||
log_path = tmp_path / "dockerfarm-log.toml"
|
||||
|
||||
# First snapshot
|
||||
first_time = datetime(2025, 1, 1, tzinfo=UTC)
|
||||
await snapshot_services(
|
||||
config,
|
||||
["svc"],
|
||||
log_path=log_path,
|
||||
now=first_time,
|
||||
run_compose_fn=fake_run_compose,
|
||||
first_entries = await collect_service_entries(
|
||||
config, "svc", now=first_time, run_compose_fn=fake_run_compose
|
||||
)
|
||||
first_iso = isoformat(first_time)
|
||||
merged = merge_entries([], first_entries, now_iso=first_iso)
|
||||
meta = {"generated_at": first_iso, "compose_dir": str(config.compose_dir)}
|
||||
write_toml(log_path, meta=meta, entries=merged)
|
||||
|
||||
after_first = tomllib.loads(log_path.read_text())
|
||||
first_seen = after_first["entries"][0]["first_seen"]
|
||||
|
||||
# Second snapshot
|
||||
second_time = datetime(2025, 2, 1, tzinfo=UTC)
|
||||
await snapshot_services(
|
||||
config,
|
||||
["svc"],
|
||||
log_path=log_path,
|
||||
now=second_time,
|
||||
run_compose_fn=fake_run_compose,
|
||||
second_entries = await collect_service_entries(
|
||||
config, "svc", now=second_time, run_compose_fn=fake_run_compose
|
||||
)
|
||||
second_iso = isoformat(second_time)
|
||||
existing = load_existing_entries(log_path)
|
||||
merged = merge_entries(existing, second_entries, now_iso=second_iso)
|
||||
meta = {"generated_at": second_iso, "compose_dir": str(config.compose_dir)}
|
||||
write_toml(log_path, meta=meta, entries=merged)
|
||||
|
||||
after_second = tomllib.loads(log_path.read_text())
|
||||
entry = after_second["entries"][0]
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
"""Tests for sync command and related functions."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from compose_farm import cli as cli_module
|
||||
from compose_farm import executor as executor_module
|
||||
from compose_farm import operations as operations_module
|
||||
from compose_farm import state as state_module
|
||||
from compose_farm.cli import management as cli_management_module
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult, check_service_running
|
||||
|
||||
@@ -95,48 +93,12 @@ class TestCheckServiceRunning:
|
||||
assert result is False
|
||||
|
||||
|
||||
class TestDiscoverRunningServices:
|
||||
"""Tests for discover_running_services function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_discovers_on_assigned_host(self, mock_config: Config) -> None:
|
||||
"""Discovers service running on its assigned host."""
|
||||
with patch.object(
|
||||
operations_module, "check_service_running", new_callable=AsyncMock
|
||||
) as mock_check:
|
||||
# plex running on nas01, jellyfin not running, sonarr on nas02
|
||||
async def check_side_effect(_cfg: Any, service: str, host: str) -> bool:
|
||||
return (service == "plex" and host == "nas01") or (
|
||||
service == "sonarr" and host == "nas02"
|
||||
)
|
||||
|
||||
mock_check.side_effect = check_side_effect
|
||||
|
||||
result = await operations_module.discover_running_services(mock_config)
|
||||
assert result == {"plex": "nas01", "sonarr": "nas02"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_discovers_on_different_host(self, mock_config: Config) -> None:
|
||||
"""Discovers service running on non-assigned host (after migration)."""
|
||||
with patch.object(
|
||||
operations_module, "check_service_running", new_callable=AsyncMock
|
||||
) as mock_check:
|
||||
# plex migrated to nas02
|
||||
async def check_side_effect(_cfg: Any, service: str, host: str) -> bool:
|
||||
return service == "plex" and host == "nas02"
|
||||
|
||||
mock_check.side_effect = check_side_effect
|
||||
|
||||
result = await operations_module.discover_running_services(mock_config)
|
||||
assert result == {"plex": "nas02"}
|
||||
|
||||
|
||||
class TestReportSyncChanges:
|
||||
"""Tests for _report_sync_changes function."""
|
||||
|
||||
def test_reports_added(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports newly discovered services."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=["plex", "jellyfin"],
|
||||
removed=[],
|
||||
changed=[],
|
||||
@@ -150,7 +112,7 @@ class TestReportSyncChanges:
|
||||
|
||||
def test_reports_removed(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports services that are no longer running."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=[],
|
||||
removed=["sonarr"],
|
||||
changed=[],
|
||||
@@ -163,7 +125,7 @@ class TestReportSyncChanges:
|
||||
|
||||
def test_reports_changed(self, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""Reports services that moved to a different host."""
|
||||
cli_module._report_sync_changes(
|
||||
cli_management_module._report_sync_changes(
|
||||
added=[],
|
||||
removed=[],
|
||||
changed=[("plex", "nas01", "nas02")],
|
||||
|
||||
Reference in New Issue
Block a user