mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
471936439e | ||
|
|
36e4bef46d | ||
|
|
2cac0bf263 | ||
|
|
3d07cbdff0 | ||
|
|
0f67c17281 | ||
|
|
bd22a1a55e | ||
|
|
cc54e89b33 | ||
|
|
f71e5cffd6 | ||
|
|
0e32729763 | ||
|
|
b0b501fa98 | ||
|
|
7e00596046 | ||
|
|
d1e4d9b05c | ||
|
|
3fbae630f9 | ||
|
|
3e3c919714 | ||
|
|
59b797a89d | ||
|
|
7caf006e07 | ||
|
|
45040b75f1 | ||
|
|
fa1c5c1044 | ||
|
|
67e832f687 | ||
|
|
da986fab6a | ||
|
|
5dd6e2ca05 | ||
|
|
16435065de | ||
|
|
5921b5e405 | ||
|
|
f0cd85b5f5 | ||
|
|
fe95443733 | ||
|
|
8df9288156 | ||
|
|
124bde7575 | ||
|
|
350947ad12 | ||
|
|
bb019bcae6 | ||
|
|
6d50f90344 | ||
|
|
474b7ca044 | ||
|
|
7555d8443b | ||
|
|
de46c3ff0f | ||
|
|
fff064cf03 | ||
|
|
187f83b61d | ||
|
|
d2b9113b9d | ||
|
|
be77eb7c75 |
2
.github/check_readme_commands.py
vendored
2
.github/check_readme_commands.py
vendored
@@ -24,7 +24,7 @@ def get_all_commands(typer_app: typer.Typer, prefix: str = "cf") -> set[str]:
|
||||
continue
|
||||
name = command.name
|
||||
if not name and command.callback:
|
||||
name = command.callback.__name__
|
||||
name = getattr(command.callback, "__name__", None)
|
||||
if name:
|
||||
commands.add(f"{prefix} {name}")
|
||||
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
run: uv run playwright install chromium --with-deps
|
||||
|
||||
- name: Run browser tests
|
||||
run: uv run pytest -m browser -v --no-cov
|
||||
run: uv run pytest -m browser -n auto -v
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
12
.github/workflows/docs.yml
vendored
12
.github/workflows/docs.yml
vendored
@@ -7,6 +7,11 @@ on:
|
||||
- "docs/**"
|
||||
- "zensical.toml"
|
||||
- ".github/workflows/docs.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "zensical.toml"
|
||||
- ".github/workflows/docs.yml"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -15,8 +20,8 @@ permissions:
|
||||
id-token: write
|
||||
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
group: "pages-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -39,14 +44,17 @@ jobs:
|
||||
run: zensical build
|
||||
|
||||
- name: Setup Pages
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload artifact
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: "./site"
|
||||
|
||||
deploy:
|
||||
if: github.event_name != 'pull_request'
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -44,3 +44,4 @@ compose-farm.yaml
|
||||
coverage.xml
|
||||
.env
|
||||
homepage/
|
||||
site/
|
||||
|
||||
@@ -25,12 +25,18 @@ repos:
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.14.0
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies:
|
||||
- pydantic>=2.0.0
|
||||
- typer>=0.9.0
|
||||
- asyncssh>=2.14.0
|
||||
- types-PyYAML
|
||||
name: mypy (type checker)
|
||||
entry: uv run mypy src tests
|
||||
language: system
|
||||
types: [python]
|
||||
pass_filenames: false
|
||||
|
||||
- id: ty
|
||||
name: ty (type checker)
|
||||
entry: uv run ty check
|
||||
language: system
|
||||
types: [python]
|
||||
pass_filenames: false
|
||||
|
||||
@@ -75,6 +75,30 @@ Check for conflicts between documentation files:
|
||||
- Command tables match across files
|
||||
- Config examples are consistent
|
||||
|
||||
### 8. Recent Changes Check
|
||||
|
||||
Before starting the review:
|
||||
|
||||
- Run `git log --oneline -20` to see recent commits
|
||||
- Look for commits with `feat:`, `fix:`, or that mention new options/commands
|
||||
- Cross-reference these against the documentation to catch undocumented features
|
||||
|
||||
### 9. Auto-Generated Content
|
||||
|
||||
For README.md or docs with `<!-- CODE:BASH:START -->` blocks:
|
||||
|
||||
- Run `uv run markdown-code-runner <file>` to regenerate outputs
|
||||
- Check for missing `<!-- OUTPUT:START -->` markers (blocks that never ran)
|
||||
- Verify help output matches current CLI behavior
|
||||
|
||||
### 10. CLI Options Completeness
|
||||
|
||||
For each command, run `cf <command> --help` and verify:
|
||||
|
||||
- Every option shown in help is documented
|
||||
- Short flags (-x) are listed alongside long flags (--xxx)
|
||||
- Default values in help match documented defaults
|
||||
|
||||
## Output Format
|
||||
|
||||
Provide findings in these categories:
|
||||
|
||||
53
CLAUDE.md
53
CLAUDE.md
@@ -15,7 +15,7 @@ src/compose_farm/
|
||||
│ ├── app.py # Shared Typer app instance, version callback
|
||||
│ ├── common.py # Shared helpers, options, progress bar utilities
|
||||
│ ├── config.py # Config subcommand (init, show, path, validate, edit, symlink)
|
||||
│ ├── lifecycle.py # up, down, pull, restart, update, apply commands
|
||||
│ ├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose commands
|
||||
│ ├── management.py # refresh, check, init-network, traefik-file commands
|
||||
│ ├── monitoring.py # logs, ps, stats commands
|
||||
│ ├── ssh.py # SSH key management (setup, status, keygen)
|
||||
@@ -25,7 +25,7 @@ src/compose_farm/
|
||||
├── console.py # Shared Rich console instances
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which service on which host)
|
||||
├── state.py # Deployment state tracking (which stack on which host)
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
├── paths.py # Path utilities, config file discovery
|
||||
├── ssh_keys.py # SSH key path constants and utilities
|
||||
@@ -46,34 +46,49 @@ Icons use [Lucide](https://lucide.dev/). Add new icons as macros in `web/templat
|
||||
## Key Design Decisions
|
||||
|
||||
1. **Hybrid SSH approach**: asyncssh for parallel streaming with prefixes; native `ssh -t` for raw mode (progress bars)
|
||||
2. **Parallel by default**: Multiple services run concurrently via `asyncio.gather`
|
||||
3. **Streaming output**: Real-time stdout/stderr with `[service]` prefix using Rich
|
||||
2. **Parallel by default**: Multiple stacks run concurrently via `asyncio.gather`
|
||||
3. **Streaming output**: Real-time stdout/stderr with `[stack]` prefix using Rich
|
||||
4. **SSH key auth only**: Uses ssh-agent, no password handling (YAGNI)
|
||||
5. **NFS assumption**: Compose files at same path on all hosts
|
||||
6. **Local IP auto-detection**: Skips SSH when target host matches local machine's IP
|
||||
7. **State tracking**: Tracks where services are deployed for auto-migration
|
||||
7. **State tracking**: Tracks where stacks are deployed for auto-migration
|
||||
8. **Pre-flight checks**: Verifies NFS mounts and Docker networks exist before starting/migrating
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Imports at top level**: Never add imports inside functions unless they are explicitly marked with `# noqa: PLC0415` and a comment explaining it speeds up CLI startup. Heavy modules like `pydantic`, `yaml`, and `rich.table` are lazily imported to keep `cf --help` fast.
|
||||
|
||||
## Development Commands
|
||||
|
||||
Use `just` for common tasks. Run `just` to list available commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `just install` | Install dev dependencies |
|
||||
| `just test` | Run all tests |
|
||||
| `just test-cli` | Run CLI tests (parallel) |
|
||||
| `just test-web` | Run web UI tests (parallel) |
|
||||
| `just lint` | Lint, format, and type check |
|
||||
| `just web` | Start web UI (port 9001) |
|
||||
| `just doc` | Build and serve docs (port 9002) |
|
||||
| `just clean` | Clean build artifacts |
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with `uv run pytest`. Browser tests require Chromium (system-installed or via `playwright install chromium`):
|
||||
Run tests with `just test` or `uv run pytest`. Browser tests require Chromium (system-installed or via `playwright install chromium`):
|
||||
|
||||
```bash
|
||||
# Unit tests only (skip browser tests, can parallelize)
|
||||
# Unit tests only (parallel)
|
||||
uv run pytest -m "not browser" -n auto
|
||||
|
||||
# Browser tests only (run sequentially, no coverage)
|
||||
uv run pytest -m browser --no-cov
|
||||
# Browser tests only (parallel)
|
||||
uv run pytest -m browser -n auto
|
||||
|
||||
# All tests
|
||||
uv run pytest --no-cov
|
||||
uv run pytest
|
||||
```
|
||||
|
||||
Browser tests are marked with `@pytest.mark.browser`. They use Playwright to test HTMX behavior, JavaScript functionality (sidebar filter, command palette, terminals), and content stability during navigation. Run sequentially (no `-n`) to avoid resource contention.
|
||||
Browser tests are marked with `@pytest.mark.browser`. They use Playwright to test HTMX behavior, JavaScript functionality (sidebar filter, command palette, terminals), and content stability during navigation.
|
||||
|
||||
## Communication Notes
|
||||
|
||||
@@ -114,16 +129,18 @@ CLI available as `cf` or `compose-farm`.
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `up` | Start services (`docker compose up -d`), auto-migrates if host changed |
|
||||
| `down` | Stop services (`docker compose down`). Use `--orphaned` to stop services removed from config |
|
||||
| `up` | Start stacks (`docker compose up -d`), auto-migrates if host changed |
|
||||
| `down` | Stop stacks (`docker compose down`). Use `--orphaned` to stop stacks removed from config |
|
||||
| `stop` | Stop services without removing containers (`docker compose stop`) |
|
||||
| `pull` | Pull latest images |
|
||||
| `restart` | `down` + `up -d` |
|
||||
| `update` | `pull` + `build` + `down` + `up -d` |
|
||||
| `apply` | Make reality match config: migrate services + stop orphans. Use `--dry-run` to preview |
|
||||
| `logs` | Show service logs |
|
||||
| `ps` | Show status of all services |
|
||||
| `stats` | Show overview (hosts, services, pending migrations; `--live` for container counts) |
|
||||
| `refresh` | Update state from reality: discover running services, capture image digests |
|
||||
| `apply` | Make reality match config: migrate stacks + stop orphans. Use `--dry-run` to preview |
|
||||
| `compose` | Run any docker compose command on a stack (passthrough) |
|
||||
| `logs` | Show stack logs |
|
||||
| `ps` | Show status of all stacks |
|
||||
| `stats` | Show overview (hosts, stacks, pending migrations; `--live` for container counts) |
|
||||
| `refresh` | Update state from reality: discover running stacks, capture image digests |
|
||||
| `check` | Validate config, traefik labels, mounts, networks; show host compatibility |
|
||||
| `init-network` | Create Docker network on hosts with consistent subnet/gateway |
|
||||
| `traefik-file` | Generate Traefik file-provider config from compose labels |
|
||||
|
||||
466
README.md
466
README.md
@@ -10,7 +10,19 @@
|
||||
A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
|
||||
> [!NOTE]
|
||||
> Run `docker compose` commands across multiple hosts via SSH. One YAML maps services to hosts. Run `cf apply` and reality matches your config—services start, migrate, or stop as needed. No Kubernetes, no Swarm, no magic.
|
||||
> Run `docker compose` commands across multiple hosts via SSH. One YAML maps stacks to hosts. Run `cf apply` and reality matches your config—stacks start, migrate, or stop as needed. No Kubernetes, no Swarm, no magic.
|
||||
|
||||
## Quick Demo
|
||||
|
||||
**CLI:**
|
||||
|
||||

|
||||
|
||||
**Web UI:**
|
||||
|
||||

|
||||
|
||||
## Table of Contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
@@ -19,7 +31,7 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [How It Works](#how-it-works)
|
||||
- [Requirements](#requirements)
|
||||
- [Limitations & Best Practices](#limitations--best-practices)
|
||||
- [What breaks when you move a service](#what-breaks-when-you-move-a-service)
|
||||
- [What breaks when you move a stack](#what-breaks-when-you-move-a-stack)
|
||||
- [Best practices](#best-practices)
|
||||
- [What Compose Farm doesn't do](#what-compose-farm-doesnt-do)
|
||||
- [Installation](#installation)
|
||||
@@ -27,7 +39,9 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [SSH Agent (default)](#ssh-agent-default)
|
||||
- [Dedicated SSH Key (recommended for Docker/Web UI)](#dedicated-ssh-key-recommended-for-dockerweb-ui)
|
||||
- [Configuration](#configuration)
|
||||
- [Multi-Host Services](#multi-host-services)
|
||||
- [Single-host example](#single-host-example)
|
||||
- [Multi-host example](#multi-host-example)
|
||||
- [Multi-Host Stacks](#multi-host-stacks)
|
||||
- [Config Command](#config-command)
|
||||
- [Usage](#usage)
|
||||
- [CLI `--help` Output](#cli---help-output)
|
||||
@@ -40,14 +54,14 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
|
||||
## Why Compose Farm?
|
||||
|
||||
I used to run 100+ Docker Compose stacks on a single machine that kept running out of memory. I needed a way to distribute services across multiple machines without the complexity of:
|
||||
I used to run 100+ Docker Compose stacks on a single machine that kept running out of memory. I needed a way to distribute stacks across multiple machines without the complexity of:
|
||||
|
||||
- **Kubernetes**: Overkill for my use case. I don't need pods, services, ingress controllers, or YAML manifests 10x the size of my compose files.
|
||||
- **Docker Swarm**: Effectively in maintenance mode—no longer being invested in by Docker.
|
||||
|
||||
Both require changes to your compose files. **Compose Farm requires zero changes**—your existing `docker-compose.yml` files work as-is.
|
||||
|
||||
I also wanted a declarative setup—one config file that defines where everything runs. Change the config, run `cf apply`, and everything reconciles—services start, migrate, or stop as needed. See [Comparison with Alternatives](#comparison-with-alternatives) for how this compares to other approaches.
|
||||
I also wanted a declarative setup—one config file that defines where everything runs. Change the config, run `cf apply`, and everything reconciles—stacks start, migrate, or stop as needed. See [Comparison with Alternatives](#comparison-with-alternatives) for how this compares to other approaches.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://xkcd.com/927/">
|
||||
@@ -59,8 +73,8 @@ Before you say it—no, this is not a new standard. I changed nothing about my e
|
||||
|
||||
Compose Farm just automates what you'd do by hand:
|
||||
- Runs `docker compose` commands over SSH
|
||||
- Tracks which service runs on which host
|
||||
- **One command (`cf apply`) to reconcile everything**—start missing services, migrate moved ones, stop removed ones
|
||||
- Tracks which stack runs on which host
|
||||
- **One command (`cf apply`) to reconcile everything**—start missing stacks, migrate moved ones, stop removed ones
|
||||
- Generates Traefik file-provider config for cross-host routing
|
||||
|
||||
**It's a convenience wrapper, not a new paradigm.**
|
||||
@@ -70,13 +84,13 @@ Compose Farm just automates what you'd do by hand:
|
||||
**The declarative way** — run `cf apply` and reality matches your config:
|
||||
|
||||
1. Compose Farm compares your config to what's actually running
|
||||
2. Services in config but not running? **Starts them**
|
||||
3. Services on the wrong host? **Migrates them** (stops on old host, starts on new)
|
||||
4. Services running but removed from config? **Stops them**
|
||||
2. Stacks in config but not running? **Starts them**
|
||||
3. Stacks on the wrong host? **Migrates them** (stops on old host, starts on new)
|
||||
4. Stacks running but removed from config? **Stops them**
|
||||
|
||||
**Under the hood** — each service operation is just SSH + docker compose:
|
||||
**Under the hood** — each stack operation is just SSH + docker compose:
|
||||
|
||||
1. Look up which host runs the service (e.g., `plex` → `server-1`)
|
||||
1. Look up which host runs the stack (e.g., `plex` → `server-1`)
|
||||
2. SSH to `server-1` (or run locally if `localhost`)
|
||||
3. Execute `docker compose -f /opt/compose/plex/docker-compose.yml up -d`
|
||||
4. Stream output back with `[plex]` prefix
|
||||
@@ -104,13 +118,13 @@ nas:/volume1/compose → /opt/compose (on server-2)
|
||||
nas:/volume1/compose → /opt/compose (on server-3)
|
||||
```
|
||||
|
||||
Compose Farm simply runs `docker compose -f /opt/compose/{service}/docker-compose.yml` on the appropriate host—it doesn't copy or sync files.
|
||||
Compose Farm simply runs `docker compose -f /opt/compose/{stack}/docker-compose.yml` on the appropriate host—it doesn't copy or sync files.
|
||||
|
||||
## Limitations & Best Practices
|
||||
|
||||
Compose Farm moves containers between hosts but **does not provide cross-host networking**. Docker's internal DNS and networks don't span hosts.
|
||||
|
||||
### What breaks when you move a service
|
||||
### What breaks when you move a stack
|
||||
|
||||
- **Docker DNS** - `http://redis:6379` won't resolve from another host
|
||||
- **Docker networks** - Containers can't reach each other via network names
|
||||
@@ -120,7 +134,7 @@ Compose Farm moves containers between hosts but **does not provide cross-host ne
|
||||
|
||||
1. **Keep dependent services together** - If an app needs a database, redis, or worker, keep them in the same compose file on the same host
|
||||
|
||||
2. **Only migrate standalone services** - Services that don't talk to other containers (or only talk to external APIs) are safe to move
|
||||
2. **Only migrate standalone stacks** - Stacks whose services don't talk to other containers (or only talk to external APIs) are safe to move
|
||||
|
||||
3. **Expose ports for cross-host communication** - If services must communicate across hosts, publish ports and use IP addresses instead of container names:
|
||||
```yaml
|
||||
@@ -141,7 +155,7 @@ If you need containers on different hosts to communicate seamlessly, you need Do
|
||||
|
||||
```bash
|
||||
# One-liner (installs uv if needed)
|
||||
curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
|
||||
# Or if you already have uv/pip
|
||||
uv tool install compose-farm
|
||||
@@ -197,7 +211,7 @@ This creates `~/.ssh/compose-farm/id_ed25519` (ED25519, no passphrase) and copie
|
||||
|
||||
<details><summary>🐳 Docker volume options for SSH keys</summary>
|
||||
|
||||
When running in Docker, mount a volume to persist the SSH keys. Choose ONE option and use it for both `cf` and `web` services:
|
||||
When running in Docker, mount a volume to persist the SSH keys. Choose ONE option and use it for both `cf` and `web` Compose services:
|
||||
|
||||
**Option 1: Host path (default)** - keys at `~/.ssh/compose-farm/id_ed25519`
|
||||
```yaml
|
||||
@@ -223,8 +237,25 @@ The keys will persist across restarts.
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml` (or `./compose-farm.yaml` in your working directory):
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands (e.g., `/opt/stacks`). This keeps config near your stacks. Alternatively, use `~/.config/compose-farm/compose-farm.yaml` for a global config, or symlink from one to the other with `cf config symlink`.
|
||||
|
||||
### Single-host example
|
||||
|
||||
No SSH, shared storage, or Traefik file-provider required.
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/stacks
|
||||
|
||||
hosts:
|
||||
local: localhost # Run locally without SSH
|
||||
|
||||
stacks:
|
||||
plex: local
|
||||
jellyfin: local
|
||||
traefik: local
|
||||
```
|
||||
|
||||
### Multi-host example
|
||||
```yaml
|
||||
compose_dir: /opt/compose # Must be the same path on all hosts
|
||||
|
||||
@@ -235,24 +266,24 @@ hosts:
|
||||
server-2:
|
||||
address: 192.168.1.11
|
||||
# user defaults to current user
|
||||
local: localhost # Run locally without SSH
|
||||
|
||||
services:
|
||||
stacks:
|
||||
plex: server-1
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
radarr: local # Runs on the machine where you invoke compose-farm
|
||||
grafana: server-1
|
||||
|
||||
# Multi-host services (run on multiple/all hosts)
|
||||
# Multi-host stacks (run on multiple/all hosts)
|
||||
autokuma: all # Runs on ALL configured hosts
|
||||
dozzle: [server-1, server-2] # Explicit list of hosts
|
||||
```
|
||||
|
||||
Compose files are expected at `{compose_dir}/{service}/compose.yaml` (also supports `compose.yml`, `docker-compose.yml`, `docker-compose.yaml`).
|
||||
For cross-host HTTP routing, add Traefik labels to your compose files and set `traefik_file` so Compose Farm can generate the file-provider config.
|
||||
|
||||
### Multi-Host Services
|
||||
Each entry in `stacks:` maps to a folder under `compose_dir` that contains a compose file. Compose files are expected at `{compose_dir}/{stack}/compose.yaml` (also supports `compose.yml`, `docker-compose.yml`, `docker-compose.yaml`).
|
||||
|
||||
Some services need to run on every host. This is typically required for tools that access **host-local resources** like the Docker socket (`/var/run/docker.sock`), which cannot be accessed remotely without security risks.
|
||||
### Multi-Host Stacks
|
||||
|
||||
Some stacks need to run on every host. This is typically required for tools that access **host-local resources** like the Docker socket (`/var/run/docker.sock`), which cannot be accessed remotely without security risks.
|
||||
|
||||
Common use cases:
|
||||
- **AutoKuma** - auto-creates Uptime Kuma monitors from container labels (needs local Docker socket)
|
||||
@@ -265,7 +296,7 @@ This is the same pattern as Docker Swarm's `deploy.mode: global`.
|
||||
Use the `all` keyword or an explicit list:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
# Run on all configured hosts
|
||||
autokuma: all
|
||||
dozzle: all
|
||||
@@ -274,9 +305,9 @@ services:
|
||||
node-exporter: [server-1, server-2, server-3]
|
||||
```
|
||||
|
||||
When you run `cf up autokuma`, it starts the service on all hosts in parallel. Multi-host services:
|
||||
When you run `cf up autokuma`, it starts the stack on all hosts in parallel. Multi-host stacks:
|
||||
- Are excluded from migration logic (they always run everywhere)
|
||||
- Show output with `[service@host]` prefix for each host
|
||||
- Show output with `[stack@host]` prefix for each host
|
||||
- Track all running hosts in state
|
||||
|
||||
### Config Command
|
||||
@@ -300,20 +331,21 @@ The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| **`cf apply`** | **Make reality match config (start + migrate + stop orphans)** |
|
||||
| `cf up <svc>` | Start service (auto-migrates if host changed) |
|
||||
| `cf down <svc>` | Stop service |
|
||||
| `cf restart <svc>` | down + up |
|
||||
| `cf update <svc>` | pull + down + up |
|
||||
| `cf pull <svc>` | Pull latest images |
|
||||
| `cf logs -f <svc>` | Follow logs |
|
||||
| `cf ps` | Show status of all services |
|
||||
| `cf refresh` | Update state from running services |
|
||||
| `cf up <stack>` | Start stack (auto-migrates if host changed) |
|
||||
| `cf down <stack>` | Stop and remove stack containers |
|
||||
| `cf stop <stack>` | Stop stack without removing containers |
|
||||
| `cf restart <stack>` | down + up |
|
||||
| `cf update <stack>` | pull + build + down + up |
|
||||
| `cf pull <stack>` | Pull latest images |
|
||||
| `cf logs -f <stack>` | Follow logs |
|
||||
| `cf ps` | Show status of all stacks |
|
||||
| `cf refresh` | Update state from running stacks |
|
||||
| `cf check` | Validate config, mounts, networks |
|
||||
| `cf init-network` | Create Docker network on hosts |
|
||||
| `cf traefik-file` | Generate Traefik file-provider config |
|
||||
| `cf config <cmd>` | Manage config files (init, show, path, validate, edit, symlink) |
|
||||
|
||||
All commands support `--all` to operate on all services.
|
||||
All commands support `--all` to operate on all stacks.
|
||||
|
||||
Each command replaces: look up host → SSH → find compose file → run `ssh host "cd /opt/compose/plex && docker compose up -d"`.
|
||||
|
||||
@@ -321,14 +353,14 @@ Each command replaces: look up host → SSH → find compose file → run `ssh h
|
||||
# The main command: make reality match your config
|
||||
cf apply # start missing + migrate + stop orphans
|
||||
cf apply --dry-run # preview what would change
|
||||
cf apply --no-orphans # skip stopping orphaned services
|
||||
cf apply --full # also refresh all services (picks up config changes)
|
||||
cf apply --no-orphans # skip stopping orphaned stacks
|
||||
cf apply --full # also refresh all stacks (picks up config changes)
|
||||
|
||||
# Or operate on individual services
|
||||
cf up plex jellyfin # start services (auto-migrates if host changed)
|
||||
# Or operate on individual stacks
|
||||
cf up plex jellyfin # start stacks (auto-migrates if host changed)
|
||||
cf up --all
|
||||
cf down plex # stop services
|
||||
cf down --orphaned # stop services removed from config
|
||||
cf down plex # stop stacks
|
||||
cf down --orphaned # stop stacks removed from config
|
||||
|
||||
# Pull latest images
|
||||
cf pull --all
|
||||
@@ -336,19 +368,19 @@ cf pull --all
|
||||
# Restart (down + up)
|
||||
cf restart plex
|
||||
|
||||
# Update (pull + down + up) - the end-to-end update command
|
||||
# Update (pull + build + down + up) - the end-to-end update command
|
||||
cf update --all
|
||||
|
||||
# Update state from reality (discovers running services + captures digests)
|
||||
cf refresh # updates state.yaml and dockerfarm-log.toml
|
||||
# Update state from reality (discovers running stacks + captures digests)
|
||||
cf refresh # updates compose-farm-state.yaml and dockerfarm-log.toml
|
||||
cf refresh --dry-run # preview without writing
|
||||
|
||||
# Validate config, traefik labels, mounts, and networks
|
||||
cf check # full validation (includes SSH checks)
|
||||
cf check --local # fast validation (skip SSH)
|
||||
cf check jellyfin # check service + show which hosts can run it
|
||||
cf check jellyfin # check stack + show which hosts can run it
|
||||
|
||||
# Create Docker network on new hosts (before migrating services)
|
||||
# Create Docker network on new hosts (before migrating stacks)
|
||||
cf init-network nuc hp # create mynetwork on specific hosts
|
||||
cf init-network # create on all hosts
|
||||
|
||||
@@ -391,27 +423,33 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ──────────────────────────────────────────────────────────────────╮
|
||||
│ up Start services (docker compose up -d). Auto-migrates if host │
|
||||
│ up Start stacks (docker compose up -d). Auto-migrates if host │
|
||||
│ changed. │
|
||||
│ down Stop services (docker compose down). │
|
||||
│ down Stop stacks (docker compose down). │
|
||||
│ stop Stop services without removing containers (docker compose │
|
||||
│ stop). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart services (down + up). │
|
||||
│ update Update services (pull + build + down + up). │
|
||||
│ restart Restart stacks (down + up). With --service, restarts just │
|
||||
│ that service. │
|
||||
│ update Update stacks (pull + build + down + up). With --service, │
|
||||
│ updates just that service. │
|
||||
│ apply Make reality match config (start, migrate, stop as needed). │
|
||||
│ compose Run any docker compose command on a stack. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ──────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose │
|
||||
│ Traefik labels. │
|
||||
│ refresh Update local state from running services. │
|
||||
│ refresh Update local state from running stacks. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
│ ssh Manage SSH keys for passwordless authentication. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ─────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show service logs. │
|
||||
│ ps Show status of services. │
|
||||
│ stats Show overview statistics for hosts and services. │
|
||||
│ logs Show stack logs. With --service, shows logs for just that │
|
||||
│ service. │
|
||||
│ ps Show status of stacks. │
|
||||
│ stats Show overview statistics for hosts and stacks. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ─────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
@@ -440,18 +478,19 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf up [OPTIONS] [SERVICES]...
|
||||
Usage: cf up [OPTIONS] [STACKS]...
|
||||
|
||||
Start services (docker compose up -d). Auto-migrates if host changed.
|
||||
Start stacks (docker compose up -d). Auto-migrates if host changed.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -475,18 +514,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf down [OPTIONS] [SERVICES]...
|
||||
Usage: cf down [OPTIONS] [STACKS]...
|
||||
|
||||
Stop services (docker compose down).
|
||||
Stop stacks (docker compose down).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --orphaned Stop orphaned services (in state but removed from │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --orphaned Stop orphaned stacks (in state but removed from │
|
||||
│ config) │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
@@ -497,6 +536,41 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf stop --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf stop --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf stop [OPTIONS] [STACKS]...
|
||||
|
||||
Stop services without removing containers (docker compose stop).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf pull --help</code></summary>
|
||||
|
||||
@@ -512,17 +586,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf pull [OPTIONS] [SERVICES]...
|
||||
Usage: cf pull [OPTIONS] [STACKS]...
|
||||
|
||||
Pull latest images (docker compose pull).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -546,17 +621,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf restart [OPTIONS] [SERVICES]...
|
||||
Usage: cf restart [OPTIONS] [STACKS]...
|
||||
|
||||
Restart services (down + up).
|
||||
Restart stacks (down + up). With --service, restarts just that service.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -580,17 +656,19 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf update [OPTIONS] [SERVICES]...
|
||||
Usage: cf update [OPTIONS] [STACKS]...
|
||||
|
||||
Update services (pull + build + down + up).
|
||||
Update stacks (pull + build + down + up). With --service, updates just that
|
||||
service.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -618,21 +696,21 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Make reality match config (start, migrate, stop as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running services match your
|
||||
This is the "reconcile" command that ensures running stacks match your
|
||||
config file. It will:
|
||||
|
||||
1. Stop orphaned services (in state but removed from config)
|
||||
2. Migrate services on wrong host (host in state ≠ host in config)
|
||||
3. Start missing services (in config but not in state)
|
||||
1. Stop orphaned stacks (in state but removed from config)
|
||||
2. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
3. Start missing stacks (in config but not in state)
|
||||
|
||||
Use --dry-run to preview changes before applying.
|
||||
Use --no-orphans to only migrate/start without stopping orphaned services.
|
||||
Use --full to also run 'up' on all services (picks up compose/env changes).
|
||||
Use --no-orphans to only migrate/start without stopping orphaned stacks.
|
||||
Use --full to also run 'up' on all stacks (picks up compose/env changes).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned services │
|
||||
│ --full -f Also run up on all services to apply config │
|
||||
│ --no-orphans Only migrate, don't stop orphaned stacks │
|
||||
│ --full -f Also run up on all stacks to apply config │
|
||||
│ changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
@@ -644,6 +722,53 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf compose --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf compose --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf compose [OPTIONS] STACK COMMAND [ARGS]...
|
||||
|
||||
Run any docker compose command on a stack.
|
||||
|
||||
Passthrough to docker compose for commands not wrapped by cf.
|
||||
Options after COMMAND are passed to docker compose, not cf.
|
||||
|
||||
Examples:
|
||||
cf compose mystack --help - show docker compose help
|
||||
cf compose mystack top - view running processes
|
||||
cf compose mystack images - list images
|
||||
cf compose mystack exec web bash - interactive shell
|
||||
cf compose mystack config - view parsed config
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ * stack TEXT Stack to operate on (use '.' for current dir) │
|
||||
│ [required] │
|
||||
│ * command TEXT Docker compose command [required] │
|
||||
│ args [ARGS]... Additional arguments │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
**Configuration**
|
||||
|
||||
<details>
|
||||
@@ -661,15 +786,15 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf traefik-file [OPTIONS] [SERVICES]...
|
||||
Usage: cf traefik-file [OPTIONS] [STACKS]...
|
||||
|
||||
Generate a Traefik file-provider fragment from compose Traefik labels.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path │
|
||||
│ (stdout if omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
@@ -697,17 +822,24 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf refresh [OPTIONS]
|
||||
Usage: cf refresh [OPTIONS] [STACKS]...
|
||||
|
||||
Update local state from running services.
|
||||
Update local state from running stacks.
|
||||
|
||||
Discovers which services are running on which hosts, updates the state
|
||||
Discovers which stacks are running on which hosts, updates the state
|
||||
file, and captures image digests. This is a read operation - it updates
|
||||
your local state to match reality, not the other way around.
|
||||
|
||||
Without arguments: refreshes all stacks (same as --all).
|
||||
With stack names: refreshes only those stacks.
|
||||
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
@@ -736,18 +868,17 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf check [OPTIONS] [SERVICES]...
|
||||
Usage: cf check [OPTIONS] [STACKS]...
|
||||
|
||||
Validate configuration, traefik labels, mounts, and networks.
|
||||
|
||||
Without arguments: validates all services against configured hosts.
|
||||
With service arguments: validates specific services and shows host
|
||||
compatibility.
|
||||
Without arguments: validates all stacks against configured hosts.
|
||||
With stack arguments: validates specific stacks and shows host compatibility.
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
@@ -781,7 +912,7 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Create Docker network on hosts with consistent settings.
|
||||
|
||||
Creates an external Docker network that services can use for cross-host
|
||||
Creates an external Docker network that stacks can use for cross-host
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
|
||||
@@ -853,6 +984,26 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- cf ssh --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf ssh [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Manage SSH keys for passwordless authentication.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ───────────────────────────────────────────────────────────────────╮
|
||||
│ keygen Generate SSH key (does not distribute to hosts). │
|
||||
│ setup Generate SSH key and distribute to all configured hosts. │
|
||||
│ status Show SSH key status and host connectivity. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
@@ -873,21 +1024,22 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf logs [OPTIONS] [SERVICES]...
|
||||
Usage: cf logs [OPTIONS] [STACKS]...
|
||||
|
||||
Show service logs.
|
||||
Show stack logs. With --service, shows logs for just that service.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 │
|
||||
│ otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 │
|
||||
│ otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -912,22 +1064,24 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf ps [OPTIONS] [SERVICES]...
|
||||
Usage: cf ps [OPTIONS] [STACKS]...
|
||||
|
||||
Show status of services.
|
||||
Show status of stacks.
|
||||
|
||||
Without arguments: shows all services (same as --all).
|
||||
With service names: shows only those services.
|
||||
With --host: shows services on that host.
|
||||
Without arguments: shows all stacks (same as --all).
|
||||
With stack names: shows only those stacks.
|
||||
With --host: shows stacks on that host.
|
||||
With --service: filters to a specific service within the stack.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ services [SERVICES]... Services to operate on │
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all services │
|
||||
│ --host -H TEXT Filter to services on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
@@ -954,9 +1108,9 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf stats [OPTIONS]
|
||||
|
||||
Show overview statistics for hosts and services.
|
||||
Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, services, pending migrations).
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
@@ -984,41 +1138,59 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- cf web --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf web [OPTIONS]
|
||||
|
||||
Start the web UI server.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Host to bind to [default: 0.0.0.0] │
|
||||
│ --port -p INTEGER Port to listen on [default: 8000] │
|
||||
│ --reload -r Enable auto-reload for development │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
### Auto-Migration
|
||||
|
||||
When you change a service's host assignment in config and run `up`, Compose Farm automatically:
|
||||
When you change a stack's host assignment in config and run `up`, Compose Farm automatically:
|
||||
1. Checks that required mounts and networks exist on the new host (aborts if missing)
|
||||
2. Runs `down` on the old host
|
||||
3. Runs `up -d` on the new host
|
||||
4. Updates state tracking
|
||||
|
||||
Use `cf apply` to automatically reconcile all services—it finds and migrates services on wrong hosts, stops orphaned services, and starts missing services.
|
||||
Use `cf apply` to automatically reconcile all stacks—it finds and migrates stacks on wrong hosts, stops orphaned stacks, and starts missing stacks.
|
||||
|
||||
```yaml
|
||||
# Before: plex runs on server-1
|
||||
services:
|
||||
stacks:
|
||||
plex: server-1
|
||||
|
||||
# After: change to server-2, then run `cf up plex`
|
||||
services:
|
||||
stacks:
|
||||
plex: server-2 # Compose Farm will migrate automatically
|
||||
```
|
||||
|
||||
**Orphaned services**: When you remove (or comment out) a service from config, it becomes "orphaned"—tracked in state but no longer in config. Use these commands to handle orphans:
|
||||
**Orphaned stacks**: When you remove (or comment out) a stack from config, it becomes "orphaned"—tracked in state but no longer in config. Use these commands to handle orphans:
|
||||
|
||||
- `cf apply` — Migrate services AND stop orphans (the full reconcile)
|
||||
- `cf down --orphaned` — Only stop orphaned services
|
||||
- `cf apply` — Migrate stacks AND stop orphans (the full reconcile)
|
||||
- `cf down --orphaned` — Only stop orphaned stacks
|
||||
- `cf apply --dry-run` — Preview what would change before applying
|
||||
|
||||
This makes the config truly declarative: comment out a service, run `cf apply`, and it stops.
|
||||
This makes the config truly declarative: comment out a stack, run `cf apply`, and it stops.
|
||||
|
||||
## Traefik Multihost Ingress (File Provider)
|
||||
|
||||
If you run a single Traefik instance on one "front‑door" host and want it to route to
|
||||
Compose Farm services on other hosts, Compose Farm can generate a Traefik file‑provider
|
||||
Compose Farm stacks on other hosts, Compose Farm can generate a Traefik file‑provider
|
||||
fragment from your existing compose labels.
|
||||
|
||||
**How it works**
|
||||
@@ -1028,11 +1200,11 @@ fragment from your existing compose labels.
|
||||
- Labels and port specs may use `${VAR}` / `${VAR:-default}`; Compose Farm resolves these
|
||||
using the stack's `.env` file and your current environment, just like Docker Compose.
|
||||
- Publish a host port for that container (via `ports:`). The generator prefers
|
||||
host‑published ports so Traefik can reach the service across hosts; if none are found,
|
||||
host‑published ports so Traefik can reach the stack across hosts; if none are found,
|
||||
it warns and you'd need L3 reachability to container IPs.
|
||||
- If a router label doesn't specify `traefik.http.routers.<name>.service` and there's only
|
||||
one Traefik service defined on that container, Compose Farm wires the router to it.
|
||||
- `compose-farm.yaml` stays unchanged: just `hosts` and `services: service → host`.
|
||||
- `compose-farm.yaml` stays unchanged: just `hosts` and `stacks: stack → host`.
|
||||
|
||||
Example `docker-compose.yml` pattern:
|
||||
|
||||
@@ -1066,7 +1238,7 @@ providers:
|
||||
cf traefik-file --all --output /mnt/data/traefik/dynamic.d/compose-farm.yml
|
||||
```
|
||||
|
||||
Re‑run this after changing Traefik labels, moving a service to another host, or changing
|
||||
Re‑run this after changing Traefik labels, moving a stack to another host, or changing
|
||||
published ports.
|
||||
|
||||
**Auto-regeneration**
|
||||
@@ -1077,17 +1249,17 @@ add `traefik_file` to your config:
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml # auto-regenerate on up/down/restart/update
|
||||
traefik_service: traefik # skip services on same host (docker provider handles them)
|
||||
traefik_stack: traefik # skip stacks on same host (docker provider handles them)
|
||||
|
||||
hosts:
|
||||
# ...
|
||||
services:
|
||||
stacks:
|
||||
traefik: server-1 # Traefik runs here
|
||||
plex: server-2 # Services on other hosts get file-provider entries
|
||||
plex: server-2 # Stacks on other hosts get file-provider entries
|
||||
# ...
|
||||
```
|
||||
|
||||
The `traefik_service` option specifies which service runs Traefik. Services on the same host
|
||||
The `traefik_stack` option specifies which stack runs Traefik. Stacks on the same host
|
||||
are skipped in the file-provider config since Traefik's docker provider handles them directly.
|
||||
|
||||
Now `cf up plex` will update the Traefik config automatically—no separate
|
||||
@@ -1130,11 +1302,11 @@ There are many ways to run containers on multiple hosts. Here is where Compose F
|
||||
| Agentless | ✅ | ✅ | ❌ | ✅ | ❌ |
|
||||
| High availability | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
|
||||
**Docker Contexts** — You can use `docker context create remote ssh://...` and `docker compose --context remote up`. But it's manual: you must remember which host runs which service, there's no global view, no parallel execution, and no auto-migration.
|
||||
**Docker Contexts** — You can use `docker context create remote ssh://...` and `docker compose --context remote up`. But it's manual: you must remember which host runs which stack, there's no global view, no parallel execution, and no auto-migration.
|
||||
|
||||
**Kubernetes / Docker Swarm** — Full orchestration that abstracts away the hardware. But they require cluster initialization, separate control planes, and often rewriting compose files. They introduce complexity (consensus, overlay networks) unnecessary for static "pet" servers.
|
||||
|
||||
**Ansible / Terraform** — Infrastructure-as-Code tools that can SSH in and deploy containers. But they're push-based configuration management, not interactive CLIs. Great for setting up state, clumsy for day-to-day operations like `cf logs -f` or quickly restarting a service.
|
||||
**Ansible / Terraform** — Infrastructure-as-Code tools that can SSH in and deploy containers. But they're push-based configuration management, not interactive CLIs. Great for setting up state, clumsy for day-to-day operations like `cf logs -f` or quickly restarting a stack.
|
||||
|
||||
**Portainer / Coolify** — Web-based management UIs. But they're UI-first and often require agents on your servers. Compose Farm is CLI-first and agentless.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ compose_dir: /opt/compose
|
||||
|
||||
# Optional: Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik # Skip services on same host (docker provider handles them)
|
||||
traefik_stack: traefik # Skip stacks on same host (docker provider handles them)
|
||||
|
||||
hosts:
|
||||
# Full form with all options
|
||||
@@ -20,11 +20,11 @@ hosts:
|
||||
# Local execution (no SSH)
|
||||
local: localhost
|
||||
|
||||
services:
|
||||
# Map service names to hosts
|
||||
# Compose file expected at: {compose_dir}/{service}/compose.yaml
|
||||
stacks:
|
||||
# Map stack names to hosts
|
||||
# Compose file expected at: {compose_dir}/{stack}/compose.yaml
|
||||
traefik: server-1 # Traefik runs here
|
||||
plex: server-2 # Services on other hosts get file-provider entries
|
||||
plex: server-2 # Stacks on other hosts get file-provider entries
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
radarr: local
|
||||
grafana: server-1
|
||||
nextcloud: local
|
||||
|
||||
@@ -32,7 +32,7 @@ services:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/stacks}/compose-farm.yaml
|
||||
# Used to detect self-updates and run via SSH to survive container restart
|
||||
- CF_WEB_SERVICE=compose-farm
|
||||
- CF_WEB_STACK=compose-farm
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.compose-farm.rule=Host(`compose-farm.${DOMAIN}`)
|
||||
|
||||
@@ -46,13 +46,12 @@ Compose Farm follows three core principles:
|
||||
|
||||
Pydantic models for YAML configuration:
|
||||
|
||||
- **Config** - Root configuration with compose_dir, hosts, services
|
||||
- **HostConfig** - Host address and SSH user
|
||||
- **ServiceConfig** - Service-to-host mappings
|
||||
- **Config** - Root configuration with compose_dir, hosts, stacks
|
||||
- **Host** - Host address, SSH user, and port
|
||||
|
||||
Key features:
|
||||
- Validation with Pydantic
|
||||
- Multi-host service expansion (`all` → list of hosts)
|
||||
- Multi-host stack expansion (`all` → list of hosts)
|
||||
- YAML loading with sensible defaults
|
||||
|
||||
### State Tracking (`src/compose_farm/state.py`)
|
||||
@@ -62,22 +61,22 @@ Tracks deployment state in `compose-farm-state.yaml` (stored alongside the confi
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
Used for:
|
||||
- Detecting migrations (service moved to different host)
|
||||
- Identifying orphans (services removed from config)
|
||||
- Detecting migrations (stack moved to different host)
|
||||
- Identifying orphans (stacks removed from config)
|
||||
- `cf ps` status display
|
||||
|
||||
### Operations (`src/compose_farm/operations.py`)
|
||||
|
||||
Business logic for service operations:
|
||||
Business logic for stack operations:
|
||||
|
||||
- **up** - Start service, handle migration if needed
|
||||
- **down** - Stop service
|
||||
- **up** - Start stack, handle migration if needed
|
||||
- **down** - Stop stack
|
||||
- **preflight checks** - Verify mounts, networks exist before operations
|
||||
- **discover** - Find running services on hosts
|
||||
- **discover** - Find running stacks on hosts
|
||||
- **migrate** - Down on old host, up on new host
|
||||
|
||||
### Executor (`src/compose_farm/executor.py`)
|
||||
@@ -85,8 +84,8 @@ Business logic for service operations:
|
||||
SSH and local command execution:
|
||||
|
||||
- **Hybrid SSH approach**: asyncssh for parallel streaming, native `ssh -t` for raw mode
|
||||
- **Parallel by default**: Multiple services via `asyncio.gather`
|
||||
- **Streaming output**: Real-time stdout/stderr with `[service]` prefix
|
||||
- **Parallel by default**: Multiple stacks via `asyncio.gather`
|
||||
- **Streaming output**: Real-time stdout/stderr with `[stack]` prefix
|
||||
- **Local detection**: Skips SSH when target matches local machine IP
|
||||
|
||||
### CLI (`src/compose_farm/cli/`)
|
||||
@@ -98,7 +97,7 @@ cli/
|
||||
├── app.py # Shared Typer app, version callback
|
||||
├── common.py # Shared helpers, options, progress utilities
|
||||
├── config.py # config subcommand (init, show, path, validate, edit, symlink)
|
||||
├── lifecycle.py # up, down, pull, restart, update, apply
|
||||
├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose
|
||||
├── management.py # refresh, check, init-network, traefik-file
|
||||
├── monitoring.py # logs, ps, stats
|
||||
├── ssh.py # SSH key management (setup, status, keygen)
|
||||
@@ -112,7 +111,7 @@ cli/
|
||||
```
|
||||
1. Load configuration
|
||||
└─► Parse compose-farm.yaml
|
||||
└─► Validate service exists
|
||||
└─► Validate stack exists
|
||||
|
||||
2. Check state
|
||||
└─► Load state.yaml
|
||||
@@ -129,7 +128,7 @@ cli/
|
||||
└─► SSH to old host
|
||||
└─► Run: docker compose down
|
||||
|
||||
5. Start service
|
||||
5. Start stack
|
||||
└─► SSH to target host
|
||||
└─► cd /opt/compose/plex
|
||||
└─► Run: docker compose up -d
|
||||
@@ -154,7 +153,7 @@ cli/
|
||||
3. Stop orphans
|
||||
└─► For each orphan: cf down
|
||||
|
||||
4. Migrate services
|
||||
4. Migrate stacks
|
||||
└─► For each migration: down old, up new
|
||||
|
||||
5. Start missing
|
||||
@@ -176,7 +175,7 @@ async def run_command(host, command):
|
||||
return result.stdout, result.stderr
|
||||
```
|
||||
|
||||
Multiple services run concurrently via `asyncio.gather`.
|
||||
Multiple stacks run concurrently via `asyncio.gather`.
|
||||
|
||||
### Raw Mode (native ssh)
|
||||
|
||||
@@ -208,7 +207,7 @@ Location: `compose-farm-state.yaml` (stored alongside the config file)
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
Image digests are stored separately in `dockerfarm-log.toml` (also in the config directory).
|
||||
@@ -218,8 +217,8 @@ Image digests are stored separately in `dockerfarm-log.toml` (also in the config
|
||||
```
|
||||
Config Change State Change Action
|
||||
─────────────────────────────────────────────────────
|
||||
Add service Missing cf up
|
||||
Remove service Orphaned cf down
|
||||
Add stack Missing cf up
|
||||
Remove stack Orphaned cf down
|
||||
Change host Migration down old, up new
|
||||
No change No change none (or refresh)
|
||||
```
|
||||
@@ -236,10 +235,10 @@ Updates state.yaml to match what's actually running.
|
||||
|
||||
## Compose File Discovery
|
||||
|
||||
For each service, Compose Farm looks for compose files in:
|
||||
For each stack, Compose Farm looks for compose files in:
|
||||
|
||||
```
|
||||
{compose_dir}/{service}/
|
||||
{compose_dir}/{stack}/
|
||||
├── compose.yaml # preferred
|
||||
├── compose.yml
|
||||
├── docker-compose.yml
|
||||
@@ -255,7 +254,7 @@ First match wins.
|
||||
Compose Farm parses Traefik labels from compose files:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
plex:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
@@ -300,9 +299,9 @@ If checks fail, operation aborts with clear error.
|
||||
|
||||
### Partial Failures
|
||||
|
||||
When operating on multiple services:
|
||||
- Each service is independent
|
||||
- Failures are logged, but other services continue
|
||||
When operating on multiple stacks:
|
||||
- Each stack is independent
|
||||
- Failures are logged, but other stacks continue
|
||||
- Exit code reflects overall success/failure
|
||||
|
||||
## Performance Considerations
|
||||
@@ -313,7 +312,7 @@ Services are started/stopped in parallel:
|
||||
|
||||
```python
|
||||
await asyncio.gather(*[
|
||||
up_service(service) for service in services
|
||||
up_stack(stack) for stack in stacks
|
||||
])
|
||||
```
|
||||
|
||||
|
||||
3
docs/assets/web-console.gif
Normal file
3
docs/assets/web-console.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:dac5660cfe6574857ec055fac7822f25b7c5fcb10a836b19c86142515e2fbf75
|
||||
size 1816075
|
||||
3
docs/assets/web-console.webm
Normal file
3
docs/assets/web-console.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d4efec8ef5a99f2cb31d55cd71cdbf0bb8dd0cd6281571886b7c1f8b41c3f9da
|
||||
size 1660764
|
||||
3
docs/assets/web-navigation.gif
Normal file
3
docs/assets/web-navigation.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9348dd36e79192344476d61fbbffdb122a96ecc5829fbece1818590cfc521521
|
||||
size 3373003
|
||||
3
docs/assets/web-navigation.webm
Normal file
3
docs/assets/web-navigation.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:bebbf8151434ba37bf5e46566a4e8b57812944281926f579d056bdc835ca26aa
|
||||
size 2729799
|
||||
3
docs/assets/web-shell.gif
Normal file
3
docs/assets/web-shell.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3712afff6fcde00eb951264bb24d4301deb085d082b4e95ed4c1893a571938ee
|
||||
size 1528294
|
||||
3
docs/assets/web-shell.webm
Normal file
3
docs/assets/web-shell.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0b218d400836a50661c9cdcce2d2b1e285cc5fe592cb42f58aae41f3e7d60684
|
||||
size 1327413
|
||||
3
docs/assets/web-stack.gif
Normal file
3
docs/assets/web-stack.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6a232ddc1b9ddd9bf6b5d99c05153e1094be56f1952f02636ca498eb7484e096
|
||||
size 3808675
|
||||
3
docs/assets/web-stack.webm
Normal file
3
docs/assets/web-stack.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5a7c9f5f6d47074a6af135190fda6d0a1936cd7a0b04b3aa04ea7d99167a9e05
|
||||
size 3333014
|
||||
3
docs/assets/web-themes.gif
Normal file
3
docs/assets/web-themes.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:66f4547ed2e83b302d795875588d9a085af76071a480f1096f2bb64344b80c42
|
||||
size 5428670
|
||||
3
docs/assets/web-themes.webm
Normal file
3
docs/assets/web-themes.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:75c8cdeefbbdcab2a240821d3410539f2a2cbe0a015897f4135404c80c3ac32c
|
||||
size 6578366
|
||||
3
docs/assets/web-workflow.gif
Normal file
3
docs/assets/web-workflow.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ff2e3ca5a46397efcd5f3a595e7d3c179266cc4f3f5f528b428f5ef2a423028e
|
||||
size 12649149
|
||||
3
docs/assets/web-workflow.webm
Normal file
3
docs/assets/web-workflow.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2d739c5f77ddd9d90b609e31df620b35988081b7341fe225eb717d71a87caa88
|
||||
size 12284953
|
||||
@@ -12,7 +12,7 @@ Tips, limitations, and recommendations for using Compose Farm effectively.
|
||||
|
||||
Compose Farm moves containers between hosts but **does not provide cross-host networking**. Docker's internal DNS and networks don't span hosts.
|
||||
|
||||
**What breaks when you move a service:**
|
||||
**What breaks when you move a stack:**
|
||||
|
||||
| Feature | Works? | Why |
|
||||
|---------|--------|-----|
|
||||
@@ -29,7 +29,7 @@ Compose Farm moves containers between hosts but **does not provide cross-host ne
|
||||
- No health checks or restart policies beyond Docker's
|
||||
- No secrets management beyond Docker's
|
||||
|
||||
## Service Organization
|
||||
## Stack Organization
|
||||
|
||||
### Keep Dependencies Together
|
||||
|
||||
@@ -53,16 +53,16 @@ services:
|
||||
|
||||
```yaml
|
||||
# compose-farm.yaml
|
||||
services:
|
||||
stacks:
|
||||
myapp: nuc # All three containers stay together
|
||||
```
|
||||
|
||||
### Separate Standalone Services
|
||||
### Separate Standalone Stacks
|
||||
|
||||
Services that don't talk to other containers can be anywhere:
|
||||
Stacks whose services don't talk to other containers can be anywhere:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
# These can run on any host
|
||||
plex: nuc
|
||||
jellyfin: hp
|
||||
@@ -92,14 +92,14 @@ services:
|
||||
- "5432:5432"
|
||||
```
|
||||
|
||||
## Multi-Host Services
|
||||
## Multi-Host Stacks
|
||||
|
||||
### When to Use `all`
|
||||
|
||||
Use `all` for services that need local access to each host:
|
||||
Use `all` for stacks that need local access to each host:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
# Need Docker socket
|
||||
dozzle: all # Log viewer
|
||||
portainer-agent: all # Portainer agents
|
||||
@@ -112,10 +112,10 @@ services:
|
||||
|
||||
### Host-Specific Lists
|
||||
|
||||
For services on specific hosts only:
|
||||
For stacks on specific hosts only:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
# Only on compute nodes
|
||||
gitlab-runner: [nuc, hp]
|
||||
|
||||
@@ -146,7 +146,7 @@ Before migrating, Compose Farm verifies:
|
||||
# 1. Preview changes
|
||||
cf apply --dry-run
|
||||
|
||||
# 2. Verify target host can run the service
|
||||
# 2. Verify target host can run the stack
|
||||
cf check myservice
|
||||
|
||||
# 3. Apply changes
|
||||
@@ -221,7 +221,7 @@ Keep config and data separate:
|
||||
|
||||
/opt/appdata/ # Local: per-host app data
|
||||
├── plex/
|
||||
└── sonarr/
|
||||
└── grafana/
|
||||
```
|
||||
|
||||
## Performance
|
||||
@@ -235,7 +235,7 @@ Compose Farm runs operations in parallel. For large deployments:
|
||||
cf up --all
|
||||
|
||||
# Avoid: sequential updates when possible
|
||||
for svc in plex sonarr radarr; do
|
||||
for svc in plex grafana nextcloud; do
|
||||
cf update $svc
|
||||
done
|
||||
```
|
||||
@@ -249,28 +249,28 @@ SSH connections are reused within a command. For many operations:
|
||||
cf update --all
|
||||
|
||||
# Multiple commands, multiple connections (slower)
|
||||
cf update plex && cf update sonarr && cf update radarr
|
||||
cf update plex && cf update grafana && cf update nextcloud
|
||||
```
|
||||
|
||||
## Traefik Setup
|
||||
|
||||
### Service Placement
|
||||
### Stack Placement
|
||||
|
||||
Put Traefik on a reliable host:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
traefik: nuc # Primary host with good uptime
|
||||
```
|
||||
|
||||
### Same-Host Services
|
||||
### Same-Host Stacks
|
||||
|
||||
Services on the same host as Traefik use Docker provider:
|
||||
Stacks on the same host as Traefik use Docker provider:
|
||||
|
||||
```yaml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
|
||||
services:
|
||||
stacks:
|
||||
traefik: nuc
|
||||
portainer: nuc # Docker provider handles this
|
||||
plex: hp # File provider handles this
|
||||
@@ -297,7 +297,7 @@ http:
|
||||
|------|----------|--------|
|
||||
| Compose Farm config | `~/.config/compose-farm/` | Git or copy |
|
||||
| Compose files | `/opt/compose/` | Git |
|
||||
| State file | `~/.config/compose-farm/state.yaml` | Optional (can refresh) |
|
||||
| State file | `~/.config/compose-farm/compose-farm-state.yaml` | Optional (can refresh) |
|
||||
| App data | `/opt/appdata/` | Backup solution |
|
||||
|
||||
### Disaster Recovery
|
||||
@@ -317,7 +317,7 @@ cf apply
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Service won't start:**
|
||||
**Stack won't start:**
|
||||
```bash
|
||||
cf check myservice # Verify mounts/networks
|
||||
cf logs myservice # Check container logs
|
||||
@@ -341,15 +341,6 @@ cf ssh status # Check key status
|
||||
cf ssh setup # Re-setup keys
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
For more verbose output:
|
||||
|
||||
```bash
|
||||
# See exact commands being run
|
||||
cf --verbose up myservice
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### SSH Keys
|
||||
@@ -374,7 +365,7 @@ cf --verbose up myservice
|
||||
|
||||
| Scenario | Solution |
|
||||
|----------|----------|
|
||||
| 2-10 hosts, static services | **Compose Farm** |
|
||||
| 2-10 hosts, static stacks | **Compose Farm** |
|
||||
| Cross-host container networking | Docker Swarm |
|
||||
| Auto-scaling, self-healing | Kubernetes |
|
||||
| Infrastructure as code | Ansible + Compose Farm |
|
||||
|
||||
248
docs/commands.md
248
docs/commands.md
@@ -11,13 +11,15 @@ The Compose Farm CLI is available as both `compose-farm` and the shorter alias `
|
||||
| Category | Command | Description |
|
||||
|----------|---------|-------------|
|
||||
| **Lifecycle** | `apply` | Make reality match config |
|
||||
| | `up` | Start services |
|
||||
| | `down` | Stop services |
|
||||
| | `restart` | Restart services (down + up) |
|
||||
| | `update` | Update services (pull + down + up) |
|
||||
| | `up` | Start stacks |
|
||||
| | `down` | Stop stacks |
|
||||
| | `stop` | Stop services without removing containers |
|
||||
| | `restart` | Restart stacks (down + up) |
|
||||
| | `update` | Update stacks (pull + build + down + up) |
|
||||
| | `pull` | Pull latest images |
|
||||
| **Monitoring** | `ps` | Show service status |
|
||||
| | `logs` | Show service logs |
|
||||
| | `compose` | Run any docker compose command |
|
||||
| **Monitoring** | `ps` | Show stack status |
|
||||
| | `logs` | Show stack logs |
|
||||
| | `stats` | Show overview statistics |
|
||||
| **Configuration** | `check` | Validate config and mounts |
|
||||
| | `refresh` | Sync state from reality |
|
||||
@@ -43,7 +45,7 @@ cf --help, -h # Show help
|
||||
Make reality match your configuration. The primary reconciliation command.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/apply.webm" type="video/webm">
|
||||
<source src="/assets/apply.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
```bash
|
||||
@@ -55,15 +57,15 @@ cf apply [OPTIONS]
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--dry-run, -n` | Preview changes without executing |
|
||||
| `--no-orphans` | Skip stopping orphaned services |
|
||||
| `--full, -f` | Also refresh running services |
|
||||
| `--no-orphans` | Skip stopping orphaned stacks |
|
||||
| `--full, -f` | Also refresh running stacks |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**What it does:**
|
||||
|
||||
1. Stops orphaned services (in state but removed from config)
|
||||
2. Migrates services on wrong host
|
||||
3. Starts missing services (in config but not running)
|
||||
1. Stops orphaned stacks (in state but removed from config)
|
||||
2. Migrates stacks on wrong host
|
||||
3. Starts missing stacks (in config but not running)
|
||||
|
||||
**Examples:**
|
||||
|
||||
@@ -77,7 +79,7 @@ cf apply
|
||||
# Only start/migrate, don't stop orphans
|
||||
cf apply --no-orphans
|
||||
|
||||
# Also refresh all running services
|
||||
# Also refresh all running stacks
|
||||
cf apply --full
|
||||
```
|
||||
|
||||
@@ -85,36 +87,40 @@ cf apply --full
|
||||
|
||||
### cf up
|
||||
|
||||
Start services. Auto-migrates if host assignment changed.
|
||||
Start stacks. Auto-migrates if host assignment changed.
|
||||
|
||||
```bash
|
||||
cf up [OPTIONS] [SERVICES]...
|
||||
cf up [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Start all services |
|
||||
| `--host, -H TEXT` | Filter to services on this host |
|
||||
| `--all, -a` | Start all stacks |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Start specific services
|
||||
cf up plex sonarr
|
||||
# Start specific stacks
|
||||
cf up plex grafana
|
||||
|
||||
# Start all services
|
||||
# Start all stacks
|
||||
cf up --all
|
||||
|
||||
# Start all services on a specific host
|
||||
# Start all stacks on a specific host
|
||||
cf up --all --host nuc
|
||||
|
||||
# Start a specific service within a stack
|
||||
cf up immich --service database
|
||||
```
|
||||
|
||||
**Auto-migration:**
|
||||
|
||||
If you change a service's host in config and run `cf up`:
|
||||
If you change a stack's host in config and run `cf up`:
|
||||
|
||||
1. Verifies mounts/networks exist on new host
|
||||
2. Runs `down` on old host
|
||||
@@ -125,52 +131,84 @@ If you change a service's host in config and run `cf up`:
|
||||
|
||||
### cf down
|
||||
|
||||
Stop services.
|
||||
Stop stacks.
|
||||
|
||||
```bash
|
||||
cf down [OPTIONS] [SERVICES]...
|
||||
cf down [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Stop all services |
|
||||
| `--orphaned` | Stop orphaned services only |
|
||||
| `--host, -H TEXT` | Filter to services on this host |
|
||||
| `--all, -a` | Stop all stacks |
|
||||
| `--orphaned` | Stop orphaned stacks only |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Stop specific services
|
||||
# Stop specific stacks
|
||||
cf down plex
|
||||
|
||||
# Stop all services
|
||||
# Stop all stacks
|
||||
cf down --all
|
||||
|
||||
# Stop services removed from config
|
||||
# Stop stacks removed from config
|
||||
cf down --orphaned
|
||||
|
||||
# Stop all services on a host
|
||||
# Stop all stacks on a host
|
||||
cf down --all --host nuc
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf stop
|
||||
|
||||
Stop services without removing containers.
|
||||
|
||||
```bash
|
||||
cf stop [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Stop all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Stop specific stacks
|
||||
cf stop plex
|
||||
|
||||
# Stop all stacks
|
||||
cf stop --all
|
||||
|
||||
# Stop a specific service within a stack
|
||||
cf stop immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf restart
|
||||
|
||||
Restart services (down + up).
|
||||
Restart stacks (down + up). With `--service`, restarts just that service.
|
||||
|
||||
```bash
|
||||
cf restart [OPTIONS] [SERVICES]...
|
||||
cf restart [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Restart all services |
|
||||
| `--all, -a` | Restart all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -178,37 +216,44 @@ cf restart [OPTIONS] [SERVICES]...
|
||||
```bash
|
||||
cf restart plex
|
||||
cf restart --all
|
||||
|
||||
# Restart a specific service
|
||||
cf restart immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf update
|
||||
|
||||
Update services (pull + build + down + up).
|
||||
Update stacks (pull + build + down + up). With `--service`, updates just that service.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/update.webm" type="video/webm">
|
||||
<source src="/assets/update.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
```bash
|
||||
cf update [OPTIONS] [SERVICES]...
|
||||
cf update [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Update all services |
|
||||
| `--all, -a` | Update all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Update specific service
|
||||
# Update specific stack
|
||||
cf update plex
|
||||
|
||||
# Update all services
|
||||
# Update all stacks
|
||||
cf update --all
|
||||
|
||||
# Update a specific service
|
||||
cf update immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
@@ -218,14 +263,15 @@ cf update --all
|
||||
Pull latest images.
|
||||
|
||||
```bash
|
||||
cf pull [OPTIONS] [SERVICES]...
|
||||
cf pull [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Pull for all services |
|
||||
| `--all, -a` | Pull for all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -233,6 +279,56 @@ cf pull [OPTIONS] [SERVICES]...
|
||||
```bash
|
||||
cf pull plex
|
||||
cf pull --all
|
||||
|
||||
# Pull a specific service
|
||||
cf pull immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf compose
|
||||
|
||||
Run any docker compose command on a stack. This is a passthrough to docker compose for commands not wrapped by cf.
|
||||
|
||||
```bash
|
||||
cf compose [OPTIONS] STACK COMMAND [ARGS]...
|
||||
```
|
||||
|
||||
**Arguments:**
|
||||
|
||||
| Argument | Description |
|
||||
|----------|-------------|
|
||||
| `STACK` | Stack to operate on (use `.` for current dir) |
|
||||
| `COMMAND` | Docker compose command to run |
|
||||
| `ARGS` | Additional arguments passed to docker compose |
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--host, -H TEXT` | Filter to stacks on this host (required for multi-host stacks) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Show docker compose help
|
||||
cf compose mystack --help
|
||||
|
||||
# View running processes
|
||||
cf compose mystack top
|
||||
|
||||
# List images
|
||||
cf compose mystack images
|
||||
|
||||
# Interactive shell
|
||||
cf compose mystack exec web bash
|
||||
|
||||
# View parsed config
|
||||
cf compose mystack config
|
||||
|
||||
# Use current directory as stack
|
||||
cf compose . ps
|
||||
```
|
||||
|
||||
---
|
||||
@@ -241,53 +337,58 @@ cf pull --all
|
||||
|
||||
### cf ps
|
||||
|
||||
Show status of services.
|
||||
Show status of stacks.
|
||||
|
||||
```bash
|
||||
cf ps [OPTIONS] [SERVICES]...
|
||||
cf ps [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Show all services (default) |
|
||||
| `--host, -H TEXT` | Filter to services on this host |
|
||||
| `--all, -a` | Show all stacks (default) |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Show all services
|
||||
# Show all stacks
|
||||
cf ps
|
||||
|
||||
# Show specific services
|
||||
cf ps plex sonarr
|
||||
# Show specific stacks
|
||||
cf ps plex grafana
|
||||
|
||||
# Filter by host
|
||||
cf ps --host nuc
|
||||
|
||||
# Show status of a specific service
|
||||
cf ps immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf logs
|
||||
|
||||
Show service logs.
|
||||
Show stack logs.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/logs.webm" type="video/webm">
|
||||
<source src="/assets/logs.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
```bash
|
||||
cf logs [OPTIONS] [SERVICES]...
|
||||
cf logs [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Show logs for all services |
|
||||
| `--host, -H TEXT` | Filter to services on this host |
|
||||
| `--all, -a` | Show logs for all stacks |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--follow, -f` | Follow logs (live stream) |
|
||||
| `--tail, -n INTEGER` | Number of lines (default: 20 for --all, 100 otherwise) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
@@ -301,11 +402,14 @@ cf logs plex
|
||||
# Follow logs
|
||||
cf logs -f plex
|
||||
|
||||
# Show last 50 lines of multiple services
|
||||
cf logs -n 50 plex sonarr
|
||||
# Show last 50 lines of multiple stacks
|
||||
cf logs -n 50 plex grafana
|
||||
|
||||
# Show last 20 lines of all services
|
||||
# Show last 20 lines of all stacks
|
||||
cf logs --all
|
||||
|
||||
# Show logs for a specific service
|
||||
cf logs immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
@@ -344,7 +448,7 @@ cf stats --live
|
||||
Validate configuration, mounts, and networks.
|
||||
|
||||
```bash
|
||||
cf check [OPTIONS] [SERVICES]...
|
||||
cf check [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
@@ -363,7 +467,7 @@ cf check
|
||||
# Fast local-only validation
|
||||
cf check --local
|
||||
|
||||
# Check specific service and show host compatibility
|
||||
# Check specific stack and show host compatibility
|
||||
cf check jellyfin
|
||||
```
|
||||
|
||||
@@ -371,28 +475,34 @@ cf check jellyfin
|
||||
|
||||
### cf refresh
|
||||
|
||||
Update local state from running services.
|
||||
Update local state from running stacks.
|
||||
|
||||
```bash
|
||||
cf refresh [OPTIONS]
|
||||
cf refresh [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Refresh all stacks |
|
||||
| `--dry-run, -n` | Show what would change |
|
||||
| `--log-path, -l PATH` | Path to Dockerfarm TOML log |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
Without arguments, refreshes all stacks (same as `--all`). With stack names, refreshes only those stacks.
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Sync state with reality
|
||||
# Sync state with reality (all stacks)
|
||||
cf refresh
|
||||
|
||||
# Preview changes
|
||||
cf refresh --dry-run
|
||||
|
||||
# Refresh specific stacks only
|
||||
cf refresh plex sonarr
|
||||
```
|
||||
|
||||
---
|
||||
@@ -434,14 +544,14 @@ cf init-network -n production -s 10.0.0.0/16 -g 10.0.0.1
|
||||
Generate Traefik file-provider config from compose labels.
|
||||
|
||||
```bash
|
||||
cf traefik-file [OPTIONS] [SERVICES]...
|
||||
cf traefik-file [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Generate for all services |
|
||||
| `--all, -a` | Generate for all stacks |
|
||||
| `--output, -o PATH` | Output file (stdout if omitted) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
@@ -454,7 +564,7 @@ cf traefik-file --all
|
||||
# Write to file
|
||||
cf traefik-file --all -o /opt/traefik/dynamic.d/cf.yml
|
||||
|
||||
# Specific services
|
||||
# Specific stacks
|
||||
cf traefik-file plex jellyfin -o /opt/traefik/cf.yml
|
||||
```
|
||||
|
||||
@@ -604,7 +714,7 @@ cf web --reload
|
||||
cf ps
|
||||
cf stats --live
|
||||
|
||||
# Update a specific service
|
||||
# Update a specific stack
|
||||
cf update plex
|
||||
|
||||
# View logs
|
||||
@@ -614,7 +724,7 @@ cf logs -f plex
|
||||
### Maintenance
|
||||
|
||||
```bash
|
||||
# Update all services
|
||||
# Update all stacks
|
||||
cf update --all
|
||||
|
||||
# Refresh state after manual changes
|
||||
@@ -627,7 +737,7 @@ cf refresh
|
||||
# Preview what would change
|
||||
cf apply --dry-run
|
||||
|
||||
# Move a service: edit config, then
|
||||
# Move a stack: edit config, then
|
||||
cf up plex # auto-migrates
|
||||
|
||||
# Or reconcile everything
|
||||
@@ -641,7 +751,7 @@ cf apply
|
||||
cf check --local
|
||||
cf check
|
||||
|
||||
# Check specific service
|
||||
# Check specific stack
|
||||
cf check jellyfin
|
||||
|
||||
# Sync state
|
||||
|
||||
@@ -4,7 +4,7 @@ icon: lucide/settings
|
||||
|
||||
# Configuration Reference
|
||||
|
||||
Compose Farm uses a YAML configuration file to define hosts and service assignments.
|
||||
Compose Farm uses a YAML configuration file to define hosts and stack assignments.
|
||||
|
||||
## Config File Location
|
||||
|
||||
@@ -27,15 +27,34 @@ Or set the environment variable:
|
||||
export CF_CONFIG=/path/to/config.yaml
|
||||
```
|
||||
|
||||
## Full Example
|
||||
## Examples
|
||||
|
||||
### Single host (local-only)
|
||||
|
||||
```yaml
|
||||
# Required: directory containing compose files
|
||||
compose_dir: /opt/stacks
|
||||
|
||||
# Define local host
|
||||
hosts:
|
||||
local: localhost
|
||||
|
||||
# Map stacks to the local host
|
||||
stacks:
|
||||
plex: local
|
||||
grafana: local
|
||||
nextcloud: local
|
||||
```
|
||||
|
||||
### Multi-host (full example)
|
||||
|
||||
```yaml
|
||||
# Required: directory containing compose files (same path on all hosts)
|
||||
compose_dir: /opt/compose
|
||||
|
||||
# Optional: auto-regenerate Traefik config
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
|
||||
# Define Docker hosts
|
||||
hosts:
|
||||
@@ -45,17 +64,15 @@ hosts:
|
||||
hp:
|
||||
address: 192.168.1.11
|
||||
user: admin
|
||||
local: localhost
|
||||
|
||||
# Map services to hosts
|
||||
services:
|
||||
# Single-host services
|
||||
# Map stacks to hosts
|
||||
stacks:
|
||||
# Single-host stacks
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
jellyfin: local
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
|
||||
# Multi-host services
|
||||
# Multi-host stacks
|
||||
dozzle: all # Run on ALL hosts
|
||||
node-exporter: [nuc, hp] # Run on specific hosts
|
||||
```
|
||||
@@ -64,7 +81,7 @@ services:
|
||||
|
||||
### compose_dir (required)
|
||||
|
||||
Directory containing your compose service folders. Must be the same path on all hosts.
|
||||
Directory containing your compose stack folders. Must be the same path on all hosts.
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
@@ -77,7 +94,7 @@ compose_dir: /opt/compose
|
||||
├── plex/
|
||||
│ ├── docker-compose.yml # or compose.yaml
|
||||
│ └── .env # optional environment file
|
||||
├── sonarr/
|
||||
├── grafana/
|
||||
│ └── docker-compose.yml
|
||||
└── ...
|
||||
```
|
||||
@@ -96,12 +113,12 @@ Path to auto-generated Traefik file-provider config. When set, Compose Farm rege
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
```
|
||||
|
||||
### traefik_service
|
||||
### traefik_stack
|
||||
|
||||
Service name running Traefik. Services on the same host are skipped in file-provider config (Traefik's docker provider handles them).
|
||||
Stack name running Traefik. Stacks on the same host are skipped in file-provider config (Traefik's docker provider handles them).
|
||||
|
||||
```yaml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
## Hosts Configuration
|
||||
@@ -137,14 +154,14 @@ hosts:
|
||||
|
||||
### Localhost
|
||||
|
||||
For services running on the same machine where you invoke Compose Farm:
|
||||
For stacks running on the same machine where you invoke Compose Farm:
|
||||
|
||||
```yaml
|
||||
hosts:
|
||||
local: localhost
|
||||
```
|
||||
|
||||
No SSH is used for localhost services.
|
||||
No SSH is used for localhost stacks.
|
||||
|
||||
### Multiple Hosts
|
||||
|
||||
@@ -161,23 +178,23 @@ hosts:
|
||||
local: localhost
|
||||
```
|
||||
|
||||
## Services Configuration
|
||||
## Stacks Configuration
|
||||
|
||||
### Single-Host Service
|
||||
### Single-Host Stack
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
### Multi-Host Service
|
||||
### Multi-Host Stack
|
||||
|
||||
For services that need to run on every host (e.g., log shippers, monitoring agents):
|
||||
For stacks that need to run on every host (e.g., log shippers, monitoring agents):
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
# Run on ALL configured hosts
|
||||
dozzle: all
|
||||
promtail: all
|
||||
@@ -186,19 +203,19 @@ services:
|
||||
node-exporter: [nuc, hp, truenas]
|
||||
```
|
||||
|
||||
**Common multi-host services:**
|
||||
**Common multi-host stacks:**
|
||||
- **Dozzle** - Docker log viewer (needs local socket)
|
||||
- **Promtail/Alloy** - Log shipping (needs local socket)
|
||||
- **node-exporter** - Host metrics (needs /proc, /sys)
|
||||
- **AutoKuma** - Uptime Kuma monitors (needs local socket)
|
||||
|
||||
### Service Names
|
||||
### Stack Names
|
||||
|
||||
Service names must match directory names in `compose_dir`:
|
||||
Stack names must match directory names in `compose_dir`:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
services:
|
||||
stacks:
|
||||
plex: nuc # expects /opt/compose/plex/docker-compose.yml
|
||||
my-app: hp # expects /opt/compose/my-app/docker-compose.yml
|
||||
```
|
||||
@@ -212,10 +229,10 @@ For example, if your config is at `~/.config/compose-farm/compose-farm.yaml`, th
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
This file records which services are deployed and on which host.
|
||||
This file records which stacks are deployed and on which host.
|
||||
|
||||
**Don't edit manually.** Use `cf refresh` to sync state with reality.
|
||||
|
||||
@@ -237,7 +254,7 @@ Compose Farm runs `docker compose` which handles `.env` automatically.
|
||||
|
||||
When generating Traefik config, Compose Farm resolves `${VAR}` and `${VAR:-default}` from:
|
||||
|
||||
1. The service's `.env` file
|
||||
1. The stack's `.env` file
|
||||
2. Current environment
|
||||
|
||||
## Config Commands
|
||||
@@ -303,7 +320,7 @@ cf check --local
|
||||
|
||||
Checks:
|
||||
- Config syntax
|
||||
- Service-to-host mappings
|
||||
- Stack-to-host mappings
|
||||
- Compose file existence
|
||||
|
||||
### Full Validation
|
||||
@@ -318,13 +335,13 @@ Additional SSH-based checks:
|
||||
- Docker network existence
|
||||
- Traefik label validation
|
||||
|
||||
### Service-Specific Check
|
||||
### Stack-Specific Check
|
||||
|
||||
```bash
|
||||
cf check jellyfin
|
||||
```
|
||||
|
||||
Shows which hosts can run the service (have required mounts/networks).
|
||||
Shows which hosts can run the stack (have required mounts/networks).
|
||||
|
||||
## Example Configurations
|
||||
|
||||
@@ -336,7 +353,7 @@ compose_dir: /opt/compose
|
||||
hosts:
|
||||
server: 192.168.1.10
|
||||
|
||||
services:
|
||||
stacks:
|
||||
myapp: server
|
||||
```
|
||||
|
||||
@@ -353,11 +370,11 @@ hosts:
|
||||
address: 192.168.1.100
|
||||
user: admin
|
||||
|
||||
services:
|
||||
stacks:
|
||||
# Media
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
jellyfin: nuc
|
||||
immich: nuc
|
||||
|
||||
# Infrastructure
|
||||
traefik: nuc
|
||||
@@ -371,9 +388,8 @@ services:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
network: production
|
||||
traefik_file: /opt/traefik/dynamic.d/cf.yml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
|
||||
hosts:
|
||||
web-1:
|
||||
@@ -386,7 +402,7 @@ hosts:
|
||||
address: 10.0.1.20
|
||||
user: deploy
|
||||
|
||||
services:
|
||||
stacks:
|
||||
# Load balanced
|
||||
api: [web-1, web-2]
|
||||
|
||||
|
||||
@@ -1,26 +1,17 @@
|
||||
# Terminal Demos
|
||||
# Demo Recordings
|
||||
|
||||
[VHS](https://github.com/charmbracelet/vhs) tape files for recording terminal demos.
|
||||
Demo recording infrastructure for Compose Farm documentation.
|
||||
|
||||
## Demos
|
||||
## Structure
|
||||
|
||||
| File | Shows |
|
||||
|------|-------|
|
||||
| `install.tape` | Installing with `uv tool install` |
|
||||
| `quickstart.tape` | `cf ps`, `cf up`, `cf logs` |
|
||||
| `logs.tape` | Viewing logs |
|
||||
| `update.tape` | `cf update` |
|
||||
| `migration.tape` | Service migration |
|
||||
| `apply.tape` | `cf apply` |
|
||||
|
||||
## Recording
|
||||
|
||||
```bash
|
||||
# Record all demos (outputs to docs/assets/)
|
||||
./docs/demos/record.sh
|
||||
|
||||
# Single demo
|
||||
cd /opt/stacks && vhs /path/to/docs/demos/quickstart.tape
|
||||
```
|
||||
docs/demos/
|
||||
├── cli/ # VHS-based CLI terminal recordings
|
||||
└── web/ # Playwright-based web UI recordings
|
||||
```
|
||||
|
||||
Output files (GIF + WebM) are tracked with Git LFS.
|
||||
## Output
|
||||
|
||||
All recordings output to `docs/assets/` as WebM (primary) and GIF (fallback).
|
||||
|
||||
See subdirectory READMEs for usage.
|
||||
|
||||
32
docs/demos/cli/README.md
Normal file
32
docs/demos/cli/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# CLI Demo Recordings
|
||||
|
||||
VHS-based terminal demo recordings for Compose Farm CLI.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [VHS](https://github.com/charmbracelet/vhs): `go install github.com/charmbracelet/vhs@latest`
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Record all demos
|
||||
./docs/demos/cli/record.sh
|
||||
|
||||
# Record single demo
|
||||
cd /opt/stacks && vhs docs/demos/cli/quickstart.tape
|
||||
```
|
||||
|
||||
## Demos
|
||||
|
||||
| Tape | Description |
|
||||
|------|-------------|
|
||||
| `install.tape` | Installing with `uv tool install` |
|
||||
| `quickstart.tape` | `cf ps`, `cf up`, `cf logs` |
|
||||
| `logs.tape` | Viewing logs |
|
||||
| `update.tape` | `cf update` |
|
||||
| `migration.tape` | Service migration |
|
||||
| `apply.tape` | `cf apply` |
|
||||
|
||||
## Output
|
||||
|
||||
GIF and WebM files saved to `docs/assets/`.
|
||||
@@ -1,5 +1,5 @@
|
||||
# Logs Demo
|
||||
# Shows viewing service logs
|
||||
# Shows viewing stack logs
|
||||
|
||||
Output docs/assets/logs.gif
|
||||
Output docs/assets/logs.webm
|
||||
@@ -1,5 +1,5 @@
|
||||
# Migration Demo
|
||||
# Shows automatic service migration when host changes
|
||||
# Shows automatic stack migration when host changes
|
||||
|
||||
Output docs/assets/migration.gif
|
||||
Output docs/assets/migration.webm
|
||||
@@ -25,7 +25,7 @@ Sleep 1s
|
||||
|
||||
Type "nvim /opt/stacks/compose-farm.yaml"
|
||||
Enter
|
||||
Wait+Screen /services:/
|
||||
Wait+Screen /stacks:/
|
||||
|
||||
# Search for audiobookshelf
|
||||
Type "/audiobookshelf"
|
||||
@@ -13,7 +13,7 @@ Set FontFamily "FiraCode Nerd Font"
|
||||
Set TypingSpeed 50ms
|
||||
Env BAT_PAGING "always"
|
||||
|
||||
Type "# Config is just: service host"
|
||||
Type "# Config is just: stack host"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
@@ -27,7 +27,7 @@ Sleep 3s
|
||||
Type "q"
|
||||
Sleep 500ms
|
||||
|
||||
Type "# Then map each service to a host"
|
||||
Type "# Then map each stack to a host"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
@@ -37,7 +37,7 @@ Sleep 3s
|
||||
Type "q"
|
||||
Sleep 500ms
|
||||
|
||||
Type "# Check service status"
|
||||
Type "# Check stack status"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
@@ -45,7 +45,7 @@ Type "cf ps immich"
|
||||
Enter
|
||||
Wait+Screen /PORTS/
|
||||
|
||||
Type "# Start a service"
|
||||
Type "# Start a stack"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DOCS_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
DEMOS_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
DOCS_DIR="$(dirname "$DEMOS_DIR")"
|
||||
REPO_DIR="$(dirname "$DOCS_DIR")"
|
||||
OUTPUT_DIR="$DOCS_DIR/assets"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Update Demo
|
||||
# Shows updating services (pull + down + up)
|
||||
# Shows updating stacks (pull + build + down + up)
|
||||
|
||||
Output docs/assets/update.gif
|
||||
Output docs/assets/update.webm
|
||||
@@ -11,7 +11,7 @@ Set Height 500
|
||||
Set Theme "Catppuccin Mocha"
|
||||
Set TypingSpeed 50ms
|
||||
|
||||
Type "# Update a single service"
|
||||
Type "# Update a single stack"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
45
docs/demos/web/README.md
Normal file
45
docs/demos/web/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Web UI Demo Recordings
|
||||
|
||||
Playwright-based demo recording for Compose Farm web UI.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Chromium: `playwright install chromium`
|
||||
- ffmpeg: `apt install ffmpeg` or `brew install ffmpeg`
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Record all demos
|
||||
python docs/demos/web/record.py
|
||||
|
||||
# Record specific demo
|
||||
python docs/demos/web/record.py navigation
|
||||
```
|
||||
|
||||
## Demos
|
||||
|
||||
| Demo | Description |
|
||||
|------|-------------|
|
||||
| `navigation` | Command palette fuzzy search and navigation |
|
||||
| `stack` | Stack restart/logs via command palette |
|
||||
| `themes` | Theme switching with arrow key preview |
|
||||
| `workflow` | Full workflow: filter, navigate, logs, themes |
|
||||
| `console` | Console terminal running cf commands |
|
||||
| `shell` | Container shell exec with top |
|
||||
|
||||
## Output
|
||||
|
||||
WebM and GIF files saved to `docs/assets/web-{demo}.{webm,gif}`.
|
||||
|
||||
## Files
|
||||
|
||||
- `record.py` - Orchestration script
|
||||
- `conftest.py` - Playwright fixtures, helper functions
|
||||
- `demo_*.py` - Individual demo scripts
|
||||
|
||||
## Notes
|
||||
|
||||
- Uses real config at `/opt/stacks/compose-farm.yaml`
|
||||
- Adjust `pause(page, ms)` calls to control timing
|
||||
- Viewport: 1280x720
|
||||
1
docs/demos/web/__init__.py
Normal file
1
docs/demos/web/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Web UI demo recording scripts."""
|
||||
224
docs/demos/web/conftest.py
Normal file
224
docs/demos/web/conftest.py
Normal file
@@ -0,0 +1,224 @@
|
||||
"""Shared fixtures for web UI demo recordings.
|
||||
|
||||
Based on tests/web/test_htmx_browser.py patterns for consistency.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import uvicorn
|
||||
|
||||
from compose_farm.config import Config as CFConfig
|
||||
from compose_farm.config import load_config
|
||||
from compose_farm.state import load_state as _original_load_state
|
||||
from compose_farm.web.app import create_app
|
||||
from compose_farm.web.cdn import CDN_ASSETS, ensure_vendor_cache
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
|
||||
from playwright.sync_api import BrowserContext, Page, Route
|
||||
|
||||
# Stacks to exclude from demo recordings (exact match)
|
||||
DEMO_EXCLUDE_STACKS = {"arr"}
|
||||
|
||||
|
||||
def _get_filtered_config() -> CFConfig:
|
||||
"""Load config but filter out excluded stacks."""
|
||||
config = load_config()
|
||||
filtered_stacks = {
|
||||
name: host for name, host in config.stacks.items() if name not in DEMO_EXCLUDE_STACKS
|
||||
}
|
||||
return CFConfig(
|
||||
compose_dir=config.compose_dir,
|
||||
hosts=config.hosts,
|
||||
stacks=filtered_stacks,
|
||||
traefik_file=config.traefik_file,
|
||||
traefik_stack=config.traefik_stack,
|
||||
config_path=config.config_path,
|
||||
)
|
||||
|
||||
|
||||
def _get_filtered_state(config: CFConfig) -> dict[str, str | list[str]]:
|
||||
"""Load state but filter out excluded stacks."""
|
||||
state = _original_load_state(config)
|
||||
return {name: host for name, host in state.items() if name not in DEMO_EXCLUDE_STACKS}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vendor_cache(request: pytest.FixtureRequest) -> Path:
|
||||
"""Download CDN assets once and cache to disk for faster recordings."""
|
||||
cache_dir = Path(str(request.config.rootdir)) / ".pytest_cache" / "vendor"
|
||||
return ensure_vendor_cache(cache_dir)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def browser_type_launch_args() -> dict[str, str]:
|
||||
"""Configure Playwright to use system Chromium if available."""
|
||||
for name in ["chromium", "chromium-browser", "google-chrome", "chrome"]:
|
||||
path = shutil.which(name)
|
||||
if path:
|
||||
return {"executable_path": path}
|
||||
return {}
|
||||
|
||||
|
||||
# Path to real compose-farm config
|
||||
REAL_CONFIG_PATH = Path("/opt/stacks/compose-farm.yaml")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def server_url() -> Generator[str, None, None]:
|
||||
"""Start demo server using real config (with filtered stacks) and return URL."""
|
||||
os.environ["CF_CONFIG"] = str(REAL_CONFIG_PATH)
|
||||
|
||||
# Patch at source module level so all callers get filtered versions
|
||||
patches = [
|
||||
# Patch load_state at source - all functions calling it get filtered state
|
||||
patch("compose_farm.state.load_state", _get_filtered_state),
|
||||
# Patch get_config where imported
|
||||
patch("compose_farm.web.routes.pages.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.api.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.actions.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.app.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.ws.get_config", _get_filtered_config),
|
||||
]
|
||||
|
||||
for p in patches:
|
||||
p.start()
|
||||
|
||||
with socket.socket() as s:
|
||||
s.bind(("127.0.0.1", 0))
|
||||
port = s.getsockname()[1]
|
||||
|
||||
app = create_app()
|
||||
uvicorn_config = uvicorn.Config(app, host="127.0.0.1", port=port, log_level="error")
|
||||
server = uvicorn.Server(uvicorn_config)
|
||||
|
||||
thread = threading.Thread(target=server.run, daemon=True)
|
||||
thread.start()
|
||||
|
||||
url = f"http://127.0.0.1:{port}"
|
||||
server_ready = False
|
||||
for _ in range(50):
|
||||
try:
|
||||
urllib.request.urlopen(url, timeout=0.5) # noqa: S310
|
||||
server_ready = True
|
||||
break
|
||||
except Exception:
|
||||
time.sleep(0.1)
|
||||
|
||||
if not server_ready:
|
||||
msg = f"Demo server failed to start on {url}"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
yield url
|
||||
|
||||
server.should_exit = True
|
||||
thread.join(timeout=2)
|
||||
os.environ.pop("CF_CONFIG", None)
|
||||
|
||||
for p in patches:
|
||||
p.stop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def recording_output_dir(tmp_path_factory: pytest.TempPathFactory) -> Path:
|
||||
"""Directory for video recordings."""
|
||||
return Path(tmp_path_factory.mktemp("recordings"))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def recording_context(
|
||||
browser: Any, # pytest-playwright's browser fixture
|
||||
vendor_cache: Path,
|
||||
recording_output_dir: Path,
|
||||
) -> Generator[BrowserContext, None, None]:
|
||||
"""Browser context with video recording enabled."""
|
||||
context = browser.new_context(
|
||||
viewport={"width": 1280, "height": 720},
|
||||
record_video_dir=str(recording_output_dir),
|
||||
record_video_size={"width": 1280, "height": 720},
|
||||
)
|
||||
|
||||
# Set up CDN interception
|
||||
cache = {url: (vendor_cache / f, ct) for url, (f, ct) in CDN_ASSETS.items()}
|
||||
|
||||
def handle_cdn(route: Route) -> None:
|
||||
url = route.request.url
|
||||
for url_prefix, (filepath, content_type) in cache.items():
|
||||
if url.startswith(url_prefix):
|
||||
route.fulfill(status=200, content_type=content_type, body=filepath.read_bytes())
|
||||
return
|
||||
route.abort("failed")
|
||||
|
||||
context.route(re.compile(r"https://(cdn\.jsdelivr\.net|unpkg\.com)/.*"), handle_cdn)
|
||||
|
||||
yield context
|
||||
context.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def recording_page(recording_context: BrowserContext) -> Generator[Page, None, None]:
|
||||
"""Page with recording and slow motion enabled."""
|
||||
page = recording_context.new_page()
|
||||
yield page
|
||||
page.close()
|
||||
|
||||
|
||||
# Demo helper functions
|
||||
|
||||
|
||||
def pause(page: Page, ms: int = 500) -> None:
|
||||
"""Pause for visibility in recording."""
|
||||
page.wait_for_timeout(ms)
|
||||
|
||||
|
||||
def slow_type(page: Page, selector: str, text: str, delay: int = 100) -> None:
|
||||
"""Type with visible delay between keystrokes."""
|
||||
page.type(selector, text, delay=delay)
|
||||
|
||||
|
||||
def open_command_palette(page: Page) -> None:
|
||||
"""Open command palette with Ctrl+K."""
|
||||
page.keyboard.press("Control+k")
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 300)
|
||||
|
||||
|
||||
def close_command_palette(page: Page) -> None:
|
||||
"""Close command palette with Escape."""
|
||||
page.keyboard.press("Escape")
|
||||
page.wait_for_selector("#cmd-palette:not([open])", timeout=2000)
|
||||
pause(page, 200)
|
||||
|
||||
|
||||
def wait_for_sidebar(page: Page) -> None:
|
||||
"""Wait for sidebar to load with stacks."""
|
||||
page.wait_for_selector("#sidebar-stacks", timeout=5000)
|
||||
pause(page, 300)
|
||||
|
||||
|
||||
def navigate_to_stack(page: Page, stack: str) -> None:
|
||||
"""Navigate to a stack page via sidebar click."""
|
||||
page.locator("#sidebar-stacks a", has_text=stack).click()
|
||||
page.wait_for_url(f"**/stack/{stack}", timeout=5000)
|
||||
pause(page, 500)
|
||||
|
||||
|
||||
def select_command(page: Page, command: str) -> None:
|
||||
"""Filter and select a command from the palette."""
|
||||
page.locator("#cmd-input").fill(command)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 200)
|
||||
73
docs/demos/web/demo_console.py
Normal file
73
docs/demos/web/demo_console.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""Demo: Console terminal.
|
||||
|
||||
Records a ~30 second demo showing:
|
||||
- Navigating to Console page
|
||||
- Running cf commands in the terminal
|
||||
- Showing the Compose Farm config in Monaco editor
|
||||
|
||||
Run: pytest docs/demos/web/demo_console.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_console(recording_page: Page, server_url: str) -> None:
|
||||
"""Record console terminal demo."""
|
||||
page = recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 800)
|
||||
|
||||
# Navigate to Console page via sidebar menu
|
||||
page.locator(".menu a", has_text="Console").click()
|
||||
page.wait_for_url("**/console", timeout=5000)
|
||||
pause(page, 1000)
|
||||
|
||||
# Wait for terminal to be ready (auto-connects)
|
||||
page.wait_for_selector("#console-terminal .xterm", timeout=10000)
|
||||
pause(page, 1500)
|
||||
|
||||
# Run fastfetch first
|
||||
slow_type(page, "#console-terminal .xterm-helper-textarea", "fastfetch", delay=80)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2500) # Wait for output
|
||||
|
||||
# Type cf stats command
|
||||
slow_type(page, "#console-terminal .xterm-helper-textarea", "cf stats", delay=80)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 3000) # Wait for output
|
||||
|
||||
# Type cf ps command
|
||||
slow_type(page, "#console-terminal .xterm-helper-textarea", "cf ps grocy", delay=80)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2500) # Wait for output
|
||||
|
||||
# Scroll down to show the Editor section with Compose Farm config
|
||||
editor_section = page.locator(".collapse", has_text="Editor").first
|
||||
editor_section.scroll_into_view_if_needed()
|
||||
pause(page, 800)
|
||||
|
||||
# Wait for Monaco editor to load with config content
|
||||
page.wait_for_selector("#console-editor .monaco-editor", timeout=10000)
|
||||
pause(page, 2500) # Let viewer see the Compose Farm config file
|
||||
|
||||
# Final pause
|
||||
pause(page, 800)
|
||||
74
docs/demos/web/demo_navigation.py
Normal file
74
docs/demos/web/demo_navigation.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Demo: Command palette navigation.
|
||||
|
||||
Records a ~15 second demo showing:
|
||||
- Opening command palette with Ctrl+K
|
||||
- Fuzzy search filtering
|
||||
- Arrow key navigation
|
||||
- Stack and page navigation
|
||||
|
||||
Run: pytest docs/demos/web/demo_navigation.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
open_command_palette,
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_navigation(recording_page: Page, server_url: str) -> None:
|
||||
"""Record command palette navigation demo."""
|
||||
page = recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 1000) # Let viewer see dashboard
|
||||
|
||||
# Open command palette with keyboard shortcut
|
||||
open_command_palette(page)
|
||||
pause(page, 500)
|
||||
|
||||
# Type partial stack name for fuzzy search
|
||||
slow_type(page, "#cmd-input", "grocy", delay=120)
|
||||
pause(page, 800)
|
||||
|
||||
# Arrow down to show selection movement
|
||||
page.keyboard.press("ArrowDown")
|
||||
pause(page, 400)
|
||||
page.keyboard.press("ArrowUp")
|
||||
pause(page, 400)
|
||||
|
||||
# Press Enter to navigate to stack
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/grocy", timeout=5000)
|
||||
pause(page, 1500) # Show stack page
|
||||
|
||||
# Open palette again to navigate elsewhere
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
|
||||
# Navigate to another stack (immich) to show more navigation
|
||||
slow_type(page, "#cmd-input", "imm", delay=120)
|
||||
pause(page, 600)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/immich", timeout=5000)
|
||||
pause(page, 1200) # Show immich stack page
|
||||
|
||||
# Open palette one more time, navigate back to dashboard
|
||||
open_command_palette(page)
|
||||
slow_type(page, "#cmd-input", "dashb", delay=120)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url(server_url, timeout=5000)
|
||||
pause(page, 1000) # Final dashboard view
|
||||
71
docs/demos/web/demo_shell.py
Normal file
71
docs/demos/web/demo_shell.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""Demo: Container shell exec.
|
||||
|
||||
Records a ~25 second demo showing:
|
||||
- Navigating to a stack page
|
||||
- Clicking Shell button on a container
|
||||
- Running top command inside the container
|
||||
|
||||
Run: pytest docs/demos/web/demo_shell.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_shell(recording_page: Page, server_url: str) -> None:
|
||||
"""Record container shell demo."""
|
||||
page = recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 800)
|
||||
|
||||
# Navigate to a stack with a running container (grocy)
|
||||
page.locator("#sidebar-stacks a", has_text="grocy").click()
|
||||
page.wait_for_url("**/stack/grocy", timeout=5000)
|
||||
pause(page, 1500)
|
||||
|
||||
# Wait for containers list to load (loaded via HTMX)
|
||||
page.wait_for_selector("#containers-list button", timeout=10000)
|
||||
pause(page, 800)
|
||||
|
||||
# Click Shell button on the first container
|
||||
shell_btn = page.locator("#containers-list button", has_text="Shell").first
|
||||
shell_btn.click()
|
||||
pause(page, 1000)
|
||||
|
||||
# Wait for exec terminal to appear
|
||||
page.wait_for_selector("#exec-terminal .xterm", timeout=10000)
|
||||
|
||||
# Scroll down to make the terminal visible
|
||||
page.locator("#exec-terminal").scroll_into_view_if_needed()
|
||||
pause(page, 2000)
|
||||
|
||||
# Run top command
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "top", delay=100)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 4000) # Let top run for a bit
|
||||
|
||||
# Press q to quit top
|
||||
page.keyboard.press("q")
|
||||
pause(page, 1000)
|
||||
|
||||
# Run another command to show it's interactive
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "ps aux | head", delay=60)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2000)
|
||||
96
docs/demos/web/demo_stack.py
Normal file
96
docs/demos/web/demo_stack.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Demo: Stack actions.
|
||||
|
||||
Records a ~30 second demo showing:
|
||||
- Navigating to a stack page
|
||||
- Viewing compose file in Monaco editor
|
||||
- Triggering Restart action via command palette
|
||||
- Watching terminal output stream
|
||||
- Triggering Logs action
|
||||
|
||||
Run: pytest docs/demos/web/demo_stack.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
open_command_palette,
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_stack(recording_page: Page, server_url: str) -> None:
|
||||
"""Record stack actions demo."""
|
||||
page = recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 800)
|
||||
|
||||
# Navigate to grocy via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "grocy", delay=100)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/grocy", timeout=5000)
|
||||
pause(page, 1000) # Show stack page
|
||||
|
||||
# Click on Compose File collapse to show the Monaco editor
|
||||
# The collapse uses a checkbox input, click it via the parent collapse div
|
||||
compose_collapse = page.locator(".collapse", has_text="Compose File").first
|
||||
compose_collapse.locator("input[type=checkbox]").click(force=True)
|
||||
pause(page, 500)
|
||||
|
||||
# Wait for Monaco editor to load and show content
|
||||
page.wait_for_selector("#compose-editor .monaco-editor", timeout=10000)
|
||||
pause(page, 2000) # Let viewer see the compose file
|
||||
|
||||
# Scroll down slightly to show more of the editor
|
||||
page.locator("#compose-editor").scroll_into_view_if_needed()
|
||||
pause(page, 1500)
|
||||
|
||||
# Close the compose file section
|
||||
compose_collapse.locator("input[type=checkbox]").click(force=True)
|
||||
pause(page, 500)
|
||||
|
||||
# Open command palette for stack actions
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
|
||||
# Filter to Restart action
|
||||
slow_type(page, "#cmd-input", "restart", delay=120)
|
||||
pause(page, 600)
|
||||
|
||||
# Execute Restart
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 300)
|
||||
|
||||
# Wait for terminal to expand and show output
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500) # Let viewer see terminal streaming
|
||||
|
||||
# Open palette again for Logs
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
|
||||
# Filter to Logs action
|
||||
slow_type(page, "#cmd-input", "logs", delay=120)
|
||||
pause(page, 600)
|
||||
|
||||
# Execute Logs
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 300)
|
||||
|
||||
# Show log output
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500) # Final view of logs
|
||||
81
docs/demos/web/demo_themes.py
Normal file
81
docs/demos/web/demo_themes.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Demo: Theme switching.
|
||||
|
||||
Records a ~15 second demo showing:
|
||||
- Opening theme picker via theme button
|
||||
- Live theme preview on arrow navigation
|
||||
- Selecting different themes
|
||||
- Theme persistence
|
||||
|
||||
Run: pytest docs/demos/web/demo_themes.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_themes(recording_page: Page, server_url: str) -> None:
|
||||
"""Record theme switching demo."""
|
||||
page = recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 1000) # Show initial theme
|
||||
|
||||
# Click theme button to open theme picker
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 600)
|
||||
|
||||
# Arrow through many themes to show live preview effect
|
||||
for _ in range(12):
|
||||
page.keyboard.press("ArrowDown")
|
||||
pause(page, 350) # Show each preview
|
||||
|
||||
# Go back up through a few (land on valentine, not cyberpunk)
|
||||
for _ in range(4):
|
||||
page.keyboard.press("ArrowUp")
|
||||
pause(page, 350)
|
||||
|
||||
# Select current theme with Enter
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Close palette with Escape
|
||||
page.keyboard.press("Escape")
|
||||
pause(page, 800)
|
||||
|
||||
# Open again and use search to find specific theme
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 400)
|
||||
|
||||
# Type to filter to a light theme (theme button pre-populates "theme:")
|
||||
slow_type(page, "#cmd-input", "cup", delay=100)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Close and return to dark
|
||||
page.keyboard.press("Escape")
|
||||
pause(page, 500)
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 300)
|
||||
|
||||
slow_type(page, "#cmd-input", "dark", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 800)
|
||||
201
docs/demos/web/demo_workflow.py
Normal file
201
docs/demos/web/demo_workflow.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""Demo: Full workflow.
|
||||
|
||||
Records a comprehensive demo (~60 seconds) combining all major features:
|
||||
1. Console page: terminal with fastfetch, cf pull command
|
||||
2. Editor showing Compose Farm YAML config
|
||||
3. Command palette navigation to grocy stack
|
||||
4. Stack actions: up, logs
|
||||
5. Switch to mealie stack via command palette, run update
|
||||
6. Dashboard overview
|
||||
7. Theme cycling via command palette
|
||||
|
||||
This demo is used on the homepage and Web UI page as the main showcase.
|
||||
|
||||
Run: pytest docs/demos/web/demo_workflow.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import open_command_palette, pause, slow_type, wait_for_sidebar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
def _demo_console_terminal(page: Page, server_url: str) -> None:
|
||||
"""Demo part 1: Console page with terminal and editor."""
|
||||
# Start on dashboard briefly
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 800)
|
||||
|
||||
# Navigate to Console page via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "cons", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/console", timeout=5000)
|
||||
pause(page, 800)
|
||||
|
||||
# Wait for terminal to be ready
|
||||
page.wait_for_selector("#console-terminal .xterm", timeout=10000)
|
||||
pause(page, 1000)
|
||||
|
||||
# Run fastfetch first
|
||||
slow_type(page, "#console-terminal .xterm-helper-textarea", "fastfetch", delay=60)
|
||||
pause(page, 200)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2000) # Wait for output
|
||||
|
||||
# Run cf pull on a stack to show Compose Farm in action
|
||||
slow_type(page, "#console-terminal .xterm-helper-textarea", "cf pull grocy", delay=60)
|
||||
pause(page, 200)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 3000) # Wait for pull output
|
||||
|
||||
|
||||
def _demo_config_editor(page: Page) -> None:
|
||||
"""Demo part 2: Show the Compose Farm config in editor."""
|
||||
# Smoothly scroll down to show the Editor section
|
||||
# Use JavaScript for smooth scrolling animation
|
||||
page.evaluate("""
|
||||
const editor = document.getElementById('console-editor');
|
||||
if (editor) {
|
||||
editor.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}
|
||||
""")
|
||||
pause(page, 1200) # Wait for smooth scroll animation
|
||||
|
||||
# Wait for Monaco editor to load with config content
|
||||
page.wait_for_selector("#console-editor .monaco-editor", timeout=10000)
|
||||
pause(page, 2000) # Let viewer see the Compose Farm config file
|
||||
|
||||
|
||||
def _demo_stack_actions(page: Page) -> None:
|
||||
"""Demo part 3: Navigate to stack and run actions."""
|
||||
# Click on sidebar to take focus away from terminal, then use command palette
|
||||
page.locator("#sidebar-stacks").click()
|
||||
pause(page, 300)
|
||||
|
||||
# Navigate to grocy via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "grocy", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/grocy", timeout=5000)
|
||||
pause(page, 1000)
|
||||
|
||||
# Open Compose File editor to show the compose.yaml
|
||||
compose_collapse = page.locator(".collapse", has_text="Compose File").first
|
||||
compose_collapse.locator("input[type=checkbox]").click(force=True)
|
||||
pause(page, 500)
|
||||
|
||||
# Wait for Monaco editor to load and show content
|
||||
page.wait_for_selector("#compose-editor .monaco-editor", timeout=10000)
|
||||
pause(page, 2000) # Let viewer see the compose file
|
||||
|
||||
# Close the compose file section
|
||||
compose_collapse.locator("input[type=checkbox]").click(force=True)
|
||||
pause(page, 500)
|
||||
|
||||
# Run Up action via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "up", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 200)
|
||||
|
||||
# Wait for terminal output
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500)
|
||||
|
||||
# Show logs
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "logs", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 200)
|
||||
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500)
|
||||
|
||||
# Switch to mealie via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "mealie", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/mealie", timeout=5000)
|
||||
pause(page, 1000)
|
||||
|
||||
# Run update action
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "upda", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 200)
|
||||
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500)
|
||||
|
||||
|
||||
def _demo_dashboard_and_themes(page: Page, server_url: str) -> None:
|
||||
"""Demo part 4: Dashboard and theme cycling."""
|
||||
# Navigate to dashboard via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "dash", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url(server_url, timeout=5000)
|
||||
pause(page, 800)
|
||||
|
||||
# Scroll to top of page to ensure dashboard is fully visible
|
||||
page.evaluate("window.scrollTo(0, 0)")
|
||||
pause(page, 600)
|
||||
|
||||
# Open theme picker and arrow down to Luxury (shows live preview)
|
||||
# Theme order: light, dark, cupcake, bumblebee, emerald, corporate, synthwave,
|
||||
# retro, cyberpunk, valentine, halloween, garden, forest, aqua, lofi, pastel,
|
||||
# fantasy, wireframe, black, luxury (index 19)
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 400)
|
||||
|
||||
# Arrow down through themes with live preview until we reach Luxury
|
||||
for _ in range(19):
|
||||
page.keyboard.press("ArrowDown")
|
||||
pause(page, 180)
|
||||
|
||||
# Select Luxury theme
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Return to dark theme
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", " dark", delay=80)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_workflow(recording_page: Page, server_url: str) -> None:
|
||||
"""Record full workflow demo."""
|
||||
page = recording_page
|
||||
|
||||
_demo_console_terminal(page, server_url)
|
||||
_demo_config_editor(page)
|
||||
_demo_stack_actions(page)
|
||||
_demo_dashboard_and_themes(page, server_url)
|
||||
260
docs/demos/web/record.py
Executable file
260
docs/demos/web/record.py
Executable file
@@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Record all web UI demos.
|
||||
|
||||
This script orchestrates recording of web UI demos using Playwright,
|
||||
then converts the WebM recordings to GIF format.
|
||||
|
||||
Usage:
|
||||
python docs/demos/web/record.py # Record all demos
|
||||
python docs/demos/web/record.py navigation # Record specific demo
|
||||
|
||||
Requirements:
|
||||
- Playwright with Chromium: playwright install chromium
|
||||
- ffmpeg for GIF conversion: apt install ffmpeg / brew install ffmpeg
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
console = Console()
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
REPO_DIR = SCRIPT_DIR.parent.parent.parent
|
||||
OUTPUT_DIR = REPO_DIR / "docs" / "assets"
|
||||
|
||||
DEMOS = [
|
||||
"navigation",
|
||||
"stack",
|
||||
"themes",
|
||||
"workflow",
|
||||
"console",
|
||||
"shell",
|
||||
]
|
||||
|
||||
# High-quality ffmpeg settings for VP8 encoding
|
||||
# See: https://github.com/microsoft/playwright/issues/10855
|
||||
# See: https://github.com/microsoft/playwright/issues/31424
|
||||
#
|
||||
# MAX_QUALITY: Lossless-like, largest files
|
||||
# BALANCED_QUALITY: ~43% file size, nearly indistinguishable quality
|
||||
MAX_QUALITY_ARGS = "-c:v vp8 -qmin 0 -qmax 0 -crf 0 -deadline best -speed 0 -b:v 0 -threads 0"
|
||||
BALANCED_QUALITY_ARGS = "-c:v vp8 -qmin 0 -qmax 10 -crf 4 -deadline best -speed 0 -b:v 0 -threads 0"
|
||||
|
||||
# Choose which quality to use
|
||||
VIDEO_QUALITY_ARGS = MAX_QUALITY_ARGS
|
||||
|
||||
|
||||
def patch_playwright_video_quality() -> None:
|
||||
"""Patch Playwright's videoRecorder.js to use high-quality encoding settings."""
|
||||
from playwright._impl._driver import compute_driver_executable # noqa: PLC0415
|
||||
|
||||
# compute_driver_executable returns (node_path, cli_path)
|
||||
result = compute_driver_executable()
|
||||
node_path = result[0] if isinstance(result, tuple) else result
|
||||
driver_path = Path(node_path).parent
|
||||
|
||||
video_recorder = driver_path / "package" / "lib" / "server" / "chromium" / "videoRecorder.js"
|
||||
|
||||
if not video_recorder.exists():
|
||||
msg = f"videoRecorder.js not found at {video_recorder}"
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
content = video_recorder.read_text()
|
||||
|
||||
# Check if already patched
|
||||
if "deadline best" in content:
|
||||
return # Already patched
|
||||
|
||||
# Pattern to match the ffmpeg args line
|
||||
pattern = (
|
||||
r"-c:v vp8 -qmin \d+ -qmax \d+ -crf \d+ -deadline \w+ -speed \d+ -b:v \w+ -threads \d+"
|
||||
)
|
||||
|
||||
if not re.search(pattern, content):
|
||||
msg = "Could not find ffmpeg args pattern in videoRecorder.js"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Replace with high-quality settings
|
||||
new_content = re.sub(pattern, VIDEO_QUALITY_ARGS, content)
|
||||
video_recorder.write_text(new_content)
|
||||
console.print("[green]Patched Playwright for high-quality video recording[/green]")
|
||||
|
||||
|
||||
def record_demo(name: str) -> Path | None:
|
||||
"""Run a single demo and return the video path."""
|
||||
console.print(f"[green]Recording:[/green] web-{name}")
|
||||
|
||||
demo_file = SCRIPT_DIR / f"demo_{name}.py"
|
||||
if not demo_file.exists():
|
||||
console.print(f"[red] Demo file not found: {demo_file}[/red]")
|
||||
return None
|
||||
|
||||
# Create temp output dir for this recording
|
||||
temp_dir = SCRIPT_DIR / ".recordings"
|
||||
temp_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Run pytest with video recording
|
||||
# Set PYTHONPATH so conftest.py imports work
|
||||
env = {**os.environ, "PYTHONPATH": str(SCRIPT_DIR)}
|
||||
result = subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"pytest",
|
||||
str(demo_file),
|
||||
"-v",
|
||||
"--no-cov",
|
||||
"-x", # Stop on first failure
|
||||
f"--basetemp={temp_dir}",
|
||||
],
|
||||
check=False,
|
||||
cwd=REPO_DIR,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=env,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
console.print(f"[red] Failed to record {name}[/red]")
|
||||
console.print(result.stdout)
|
||||
console.print(result.stderr)
|
||||
return None
|
||||
|
||||
# Find the recorded video
|
||||
videos = list(temp_dir.rglob("*.webm"))
|
||||
if not videos:
|
||||
console.print(f"[red] No video found for {name}[/red]")
|
||||
return None
|
||||
|
||||
# Use the most recent video
|
||||
video = max(videos, key=lambda p: p.stat().st_mtime)
|
||||
console.print(f"[green] Recorded: {video.name}[/green]")
|
||||
return video
|
||||
|
||||
|
||||
def convert_to_gif(webm_path: Path, output_name: str) -> Path:
|
||||
"""Convert WebM to GIF using ffmpeg with palette optimization."""
|
||||
gif_path = OUTPUT_DIR / f"{output_name}.gif"
|
||||
palette_path = webm_path.parent / "palette.png"
|
||||
|
||||
# Two-pass approach for better quality
|
||||
# Pass 1: Generate palette
|
||||
subprocess.run(
|
||||
[ # noqa: S607
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
"-i",
|
||||
str(webm_path),
|
||||
"-vf",
|
||||
"fps=10,scale=1280:-1:flags=lanczos,palettegen=stats_mode=diff",
|
||||
str(palette_path),
|
||||
],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
# Pass 2: Generate GIF with palette
|
||||
subprocess.run(
|
||||
[ # noqa: S607
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
"-i",
|
||||
str(webm_path),
|
||||
"-i",
|
||||
str(palette_path),
|
||||
"-lavfi",
|
||||
"fps=10,scale=1280:-1:flags=lanczos[x];[x][1:v]paletteuse=dither=bayer:bayer_scale=5:diff_mode=rectangle",
|
||||
str(gif_path),
|
||||
],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
palette_path.unlink(missing_ok=True)
|
||||
return gif_path
|
||||
|
||||
|
||||
def move_recording(video_path: Path, name: str) -> tuple[Path, Path]:
|
||||
"""Move WebM and convert to GIF, returning both paths."""
|
||||
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
output_name = f"web-{name}"
|
||||
webm_dest = OUTPUT_DIR / f"{output_name}.webm"
|
||||
|
||||
shutil.copy2(video_path, webm_dest)
|
||||
console.print(f"[blue] WebM: {webm_dest.relative_to(REPO_DIR)}[/blue]")
|
||||
|
||||
gif_path = convert_to_gif(video_path, output_name)
|
||||
console.print(f"[blue] GIF: {gif_path.relative_to(REPO_DIR)}[/blue]")
|
||||
|
||||
return webm_dest, gif_path
|
||||
|
||||
|
||||
def cleanup() -> None:
|
||||
"""Clean up temporary recording files."""
|
||||
temp_dir = SCRIPT_DIR / ".recordings"
|
||||
if temp_dir.exists():
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""Record all web UI demos."""
|
||||
console.print("[blue]Recording web UI demos...[/blue]")
|
||||
console.print(f"Output directory: {OUTPUT_DIR}")
|
||||
console.print()
|
||||
|
||||
# Patch Playwright for high-quality video recording
|
||||
patch_playwright_video_quality()
|
||||
|
||||
# Determine which demos to record
|
||||
if len(sys.argv) > 1:
|
||||
demos_to_record = [d for d in sys.argv[1:] if d in DEMOS]
|
||||
if not demos_to_record:
|
||||
console.print(f"[red]Unknown demo(s). Available: {', '.join(DEMOS)}[/red]")
|
||||
return 1
|
||||
else:
|
||||
demos_to_record = DEMOS
|
||||
|
||||
results: dict[str, tuple[Path | None, Path | None]] = {}
|
||||
|
||||
try:
|
||||
for i, demo in enumerate(demos_to_record, 1):
|
||||
console.print(f"[yellow]=== Demo {i}/{len(demos_to_record)}: {demo} ===[/yellow]")
|
||||
|
||||
video_path = record_demo(demo)
|
||||
if video_path:
|
||||
webm, gif = move_recording(video_path, demo)
|
||||
results[demo] = (webm, gif)
|
||||
else:
|
||||
results[demo] = (None, None)
|
||||
console.print()
|
||||
finally:
|
||||
cleanup()
|
||||
|
||||
# Summary
|
||||
console.print("[blue]=== Summary ===[/blue]")
|
||||
success_count = sum(1 for w, _ in results.values() if w is not None)
|
||||
console.print(f"Recorded: {success_count}/{len(demos_to_record)} demos")
|
||||
console.print()
|
||||
|
||||
for demo, (webm, gif) in results.items(): # type: ignore[assignment]
|
||||
status = "[green]OK[/green]" if webm else "[red]FAILED[/red]"
|
||||
console.print(f" {demo}: {status}")
|
||||
if webm:
|
||||
console.print(f" {webm.relative_to(REPO_DIR)}")
|
||||
if gif:
|
||||
console.print(f" {gif.relative_to(REPO_DIR)}")
|
||||
|
||||
return 0 if success_count == len(demos_to_record) else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,90 +0,0 @@
|
||||
# Docker Swarm Overlay Networks with Compose Farm
|
||||
|
||||
Notes from testing Docker Swarm's attachable overlay networks as a way to get cross-host container networking while still using `docker compose`.
|
||||
|
||||
## The Idea
|
||||
|
||||
Docker Swarm overlay networks can be made "attachable", allowing regular `docker compose` containers (not just swarm services) to join them. This would give us:
|
||||
|
||||
- Cross-host Docker DNS (containers find each other by name)
|
||||
- No need to publish ports for inter-container communication
|
||||
- Keep using `docker compose up` instead of `docker stack deploy`
|
||||
|
||||
## Setup Steps
|
||||
|
||||
```bash
|
||||
# On manager node
|
||||
docker swarm init --advertise-addr <manager-ip>
|
||||
|
||||
# On worker nodes (use token from init output)
|
||||
docker swarm join --token <token> <manager-ip>:2377
|
||||
|
||||
# Create attachable overlay network (on manager)
|
||||
docker network create --driver overlay --attachable my-network
|
||||
|
||||
# In compose files, add the network
|
||||
networks:
|
||||
my-network:
|
||||
external: true
|
||||
```
|
||||
|
||||
## Required Ports
|
||||
|
||||
Docker Swarm requires these ports open **bidirectionally** between all nodes:
|
||||
|
||||
| Port | Protocol | Purpose |
|
||||
|------|----------|---------|
|
||||
| 2377 | TCP | Cluster management |
|
||||
| 7946 | TCP + UDP | Node communication |
|
||||
| 4789 | UDP | Overlay network traffic (VXLAN) |
|
||||
|
||||
## Test Results (2024-12-13)
|
||||
|
||||
- docker-debian (192.168.1.66) as manager
|
||||
- dev-lxc (192.168.1.167) as worker
|
||||
|
||||
### What worked
|
||||
|
||||
- Swarm init and join
|
||||
- Overlay network creation
|
||||
- Nodes showed as Ready
|
||||
|
||||
### What failed
|
||||
|
||||
- Container on dev-lxc couldn't attach to overlay network
|
||||
- Error: `attaching to network failed... context deadline exceeded`
|
||||
- Cause: Port 7946 blocked from docker-debian → dev-lxc
|
||||
|
||||
### Root cause
|
||||
|
||||
Firewall on dev-lxc wasn't configured to allow swarm ports. Opening these ports requires sudo access on each node.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Docker Swarm overlay networks are **not plug-and-play**. Requirements:
|
||||
|
||||
1. Swarm init/join on all nodes
|
||||
2. Firewall rules on all nodes (needs sudo/root)
|
||||
3. All nodes must have bidirectional connectivity on 3 ports
|
||||
|
||||
For a simpler alternative, consider:
|
||||
|
||||
- **Tailscale**: VPN mesh, containers use host's Tailscale IP
|
||||
- **Host networking + published ports**: What compose-farm does today
|
||||
- **Keep dependent services together**: Avoid cross-host networking entirely
|
||||
|
||||
## Future Work
|
||||
|
||||
If we decide to support overlay networks:
|
||||
|
||||
1. Add a `compose-farm network create` command that:
|
||||
- Initializes swarm if needed
|
||||
- Creates attachable overlay network
|
||||
- Documents required firewall rules
|
||||
|
||||
2. Add network config to compose-farm.yaml:
|
||||
```yaml
|
||||
overlay_network: compose-farm-net
|
||||
```
|
||||
|
||||
3. Auto-inject network into compose files (or document manual setup)
|
||||
@@ -1,128 +0,0 @@
|
||||
# Future Improvements
|
||||
|
||||
Low-priority improvements identified during code review. These are not currently causing issues but could be addressed if they become pain points.
|
||||
|
||||
## 1. State Module Efficiency (LOW)
|
||||
|
||||
**Current:** Every state operation reads and writes the entire file.
|
||||
|
||||
```python
|
||||
def set_service_host(config, service, host):
|
||||
state = load_state(config) # Read file
|
||||
state[service] = host
|
||||
save_state(config, state) # Write file
|
||||
```
|
||||
|
||||
**Impact:** With 87 services, this is fine. With 1000+, it would be slow.
|
||||
|
||||
**Potential fix:** Add batch operations:
|
||||
```python
|
||||
def update_state(config, updates: dict[str, str | None]) -> None:
|
||||
"""Batch update: set services to hosts, None means remove."""
|
||||
state = load_state(config)
|
||||
for service, host in updates.items():
|
||||
if host is None:
|
||||
state.pop(service, None)
|
||||
else:
|
||||
state[service] = host
|
||||
save_state(config, state)
|
||||
```
|
||||
|
||||
**When to do:** Only if state operations become noticeably slow.
|
||||
|
||||
---
|
||||
|
||||
## 2. Remote-Aware Compose Path Resolution (LOW)
|
||||
|
||||
**Current:** `config.get_compose_path()` checks if files exist on the local filesystem:
|
||||
|
||||
```python
|
||||
def get_compose_path(self, service: str) -> Path:
|
||||
for filename in ("compose.yaml", "compose.yml", ...):
|
||||
candidate = service_dir / filename
|
||||
if candidate.exists(): # Local check!
|
||||
return candidate
|
||||
```
|
||||
|
||||
**Why this works:** NFS/shared storage means local = remote.
|
||||
|
||||
**Why it could break:** If running compose-farm from a machine without the NFS mount, it returns `compose.yaml` (the default) even if `docker-compose.yml` exists on the remote host.
|
||||
|
||||
**Potential fix:** Query the remote host for file existence, or accept this limitation and document it.
|
||||
|
||||
**When to do:** Only if users need to run compose-farm from non-NFS machines.
|
||||
|
||||
---
|
||||
|
||||
## 3. Add Integration Tests for CLI Commands (MEDIUM)
|
||||
|
||||
**Current:** No integration tests for the actual CLI commands. Tests cover the underlying functions but not the Typer commands themselves.
|
||||
|
||||
**Potential fix:** Add integration tests using `CliRunner` from Typer:
|
||||
|
||||
```python
|
||||
from typer.testing import CliRunner
|
||||
from compose_farm.cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
def test_check_command_validates_config():
|
||||
result = runner.invoke(app, ["check", "--local"])
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
**When to do:** When CLI behavior becomes complex enough to warrant dedicated testing.
|
||||
|
||||
---
|
||||
|
||||
## 4. Add Tests for operations.py (MEDIUM)
|
||||
|
||||
**Current:** Operations module has 30% coverage. Most logic is tested indirectly through test_sync.py.
|
||||
|
||||
**Potential fix:** Add dedicated tests for:
|
||||
- `up_services()` with migration scenarios
|
||||
- `preflight_check()`
|
||||
- `check_host_compatibility()`
|
||||
|
||||
**When to do:** When adding new operations or modifying migration logic.
|
||||
|
||||
---
|
||||
|
||||
## 5. Consider Structured Logging (LOW)
|
||||
|
||||
**Current:** Operations print directly to console using Rich. This couples the operations module to the Rich library.
|
||||
|
||||
**Potential fix:** Use Python's logging module with a custom Rich handler:
|
||||
|
||||
```python
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# In operations:
|
||||
logger.info("Migrating %s from %s to %s", service, old_host, new_host)
|
||||
|
||||
# In cli.py - configure Rich handler:
|
||||
from rich.logging import RichHandler
|
||||
logging.basicConfig(handlers=[RichHandler()])
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- Operations become testable without capturing stdout
|
||||
- Logs can be redirected to files
|
||||
- Log levels provide filtering
|
||||
|
||||
**When to do:** Only if console output coupling becomes a problem for testing or extensibility.
|
||||
|
||||
---
|
||||
|
||||
## Design Decisions to Keep
|
||||
|
||||
These patterns are working well and should be preserved:
|
||||
|
||||
1. **asyncio + asyncssh** - Solid async foundation
|
||||
2. **Pydantic models** - Clean validation
|
||||
3. **Rich for output** - Good UX
|
||||
4. **Test structure** - Good coverage
|
||||
5. **Module separation** - cli/operations/executor/compose pattern
|
||||
6. **KISS principle** - Don't over-engineer
|
||||
@@ -18,13 +18,13 @@ Before you begin, ensure you have:
|
||||
## Installation
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/install.webm" type="video/webm">
|
||||
<source src="/assets/install.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
### One-liner (recommended)
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
```
|
||||
|
||||
This installs [uv](https://docs.astral.sh/uv/) if needed, then installs compose-farm.
|
||||
@@ -111,9 +111,9 @@ nas:/volume1/compose /opt/compose nfs defaults 0 0
|
||||
/opt/compose/ # compose_dir in config
|
||||
├── plex/
|
||||
│ └── docker-compose.yml
|
||||
├── sonarr/
|
||||
├── grafana/
|
||||
│ └── docker-compose.yml
|
||||
├── radarr/
|
||||
├── nextcloud/
|
||||
│ └── docker-compose.yml
|
||||
└── jellyfin/
|
||||
└── docker-compose.yml
|
||||
@@ -123,8 +123,38 @@ nas:/volume1/compose /opt/compose nfs defaults 0 0
|
||||
|
||||
### Create Config File
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml`:
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands. For example, if your stacks are in `/opt/stacks`, place the config there too:
|
||||
|
||||
```bash
|
||||
cd /opt/stacks
|
||||
cf config init
|
||||
```
|
||||
|
||||
Alternatively, use `~/.config/compose-farm/compose-farm.yaml` for a global config. You can also symlink a working directory config to the global location:
|
||||
|
||||
```bash
|
||||
# Create config in your stacks directory, symlink to ~/.config
|
||||
cf config symlink /opt/stacks/compose-farm.yaml
|
||||
```
|
||||
|
||||
This way, `cf` commands work from anywhere while the config lives with your stacks.
|
||||
|
||||
#### Single host example
|
||||
|
||||
```yaml
|
||||
# Where compose files are located (one folder per stack)
|
||||
compose_dir: /opt/stacks
|
||||
|
||||
hosts:
|
||||
local: localhost
|
||||
|
||||
stacks:
|
||||
plex: local
|
||||
grafana: local
|
||||
nextcloud: local
|
||||
```
|
||||
|
||||
#### Multi-host example
|
||||
```yaml
|
||||
# Where compose files are located (same path on all hosts)
|
||||
compose_dir: /opt/compose
|
||||
@@ -137,16 +167,17 @@ hosts:
|
||||
hp:
|
||||
address: 192.168.1.11
|
||||
# user defaults to current user
|
||||
local: localhost # Run locally without SSH
|
||||
|
||||
# Map services to hosts
|
||||
services:
|
||||
# Map stacks to hosts
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
jellyfin: local
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
Each entry in `stacks:` maps to a folder under `compose_dir` that contains a compose file.
|
||||
|
||||
For cross-host HTTP routing, add Traefik labels and configure `traefik_file` (see [Traefik Integration](traefik.md)).
|
||||
### Validate Configuration
|
||||
|
||||
```bash
|
||||
@@ -167,20 +198,20 @@ cf check
|
||||
cf ps
|
||||
```
|
||||
|
||||
Shows all configured services and their status.
|
||||
Shows all configured stacks and their status.
|
||||
|
||||
### Start All Services
|
||||
### Start All Stacks
|
||||
|
||||
```bash
|
||||
cf up --all
|
||||
```
|
||||
|
||||
Starts all services on their assigned hosts.
|
||||
Starts all stacks on their assigned hosts.
|
||||
|
||||
### Start Specific Services
|
||||
### Start Specific Stacks
|
||||
|
||||
```bash
|
||||
cf up plex sonarr
|
||||
cf up plex grafana
|
||||
```
|
||||
|
||||
### Apply Configuration
|
||||
@@ -193,13 +224,13 @@ cf apply # Execute changes
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Start services in config but not running
|
||||
2. Migrate services on wrong host
|
||||
3. Stop services removed from config
|
||||
1. Start stacks in config but not running
|
||||
2. Migrate stacks on wrong host
|
||||
3. Stop stacks removed from config
|
||||
|
||||
## Docker Network Setup
|
||||
|
||||
If your services use an external Docker network:
|
||||
If your stacks use an external Docker network:
|
||||
|
||||
```bash
|
||||
# Create network on all hosts
|
||||
@@ -213,25 +244,28 @@ Default network: `mynetwork` with subnet `172.20.0.0/16`
|
||||
|
||||
## Example Workflow
|
||||
|
||||
### 1. Add a New Service
|
||||
### 1. Add a New Stack
|
||||
|
||||
Create the compose file:
|
||||
|
||||
```bash
|
||||
# On any host (shared storage)
|
||||
mkdir -p /opt/compose/prowlarr
|
||||
cat > /opt/compose/prowlarr/docker-compose.yml << 'EOF'
|
||||
mkdir -p /opt/compose/gitea
|
||||
cat > /opt/compose/gitea/docker-compose.yml << 'EOF'
|
||||
services:
|
||||
prowlarr:
|
||||
image: lscr.io/linuxserver/prowlarr:latest
|
||||
container_name: prowlarr
|
||||
gitea:
|
||||
image: docker.gitea.com/gitea:latest
|
||||
container_name: gitea
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
volumes:
|
||||
- /opt/config/prowlarr:/config
|
||||
- /opt/config/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "9696:9696"
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
restart: unless-stopped
|
||||
EOF
|
||||
```
|
||||
@@ -239,23 +273,23 @@ EOF
|
||||
Add to config:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
# ... existing services
|
||||
prowlarr: nuc
|
||||
stacks:
|
||||
# ... existing stacks
|
||||
gitea: nuc
|
||||
```
|
||||
|
||||
Start the service:
|
||||
Start the stack:
|
||||
|
||||
```bash
|
||||
cf up prowlarr
|
||||
cf up gitea
|
||||
```
|
||||
|
||||
### 2. Move a Service to Another Host
|
||||
### 2. Move a Stack to Another Host
|
||||
|
||||
Edit `compose-farm.yaml`:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
stacks:
|
||||
plex: hp # Changed from nuc
|
||||
```
|
||||
|
||||
@@ -272,11 +306,11 @@ Or use apply to reconcile everything:
|
||||
cf apply
|
||||
```
|
||||
|
||||
### 3. Update All Services
|
||||
### 3. Update All Stacks
|
||||
|
||||
```bash
|
||||
cf update --all
|
||||
# Runs: pull + down + up for each service
|
||||
# Runs: pull + build + down + up for each stack
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
@@ -8,14 +8,21 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
|
||||
## What is Compose Farm?
|
||||
|
||||
Compose Farm lets you manage Docker Compose services across multiple machines from a single command line. Think [Dockge](https://dockge.kuma.pet/) but with a CLI and web interface, designed for multi-host deployments.
|
||||
Compose Farm lets you manage Docker Compose stacks across multiple machines from a single command line. Think [Dockge](https://dockge.kuma.pet/) but with a CLI and web interface, designed for multi-host deployments.
|
||||
|
||||
Define which services run where in one YAML file, then use `cf apply` to make reality match your configuration.
|
||||
Define which stacks run where in one YAML file, then use `cf apply` to make reality match your configuration.
|
||||
It also works great on a single host with one folder per stack; just map stacks to `localhost`.
|
||||
|
||||
## Quick Demo
|
||||
|
||||
**CLI:**
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/quickstart.webm" type="video/webm">
|
||||
<source src="/assets/quickstart.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
**[Web UI](web-ui.md):**
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-workflow.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
## Why Compose Farm?
|
||||
@@ -31,6 +38,31 @@ Define which services run where in one YAML file, then use `cf apply` to make re
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Single host
|
||||
|
||||
No SSH, shared storage, or Traefik file-provider required.
|
||||
|
||||
```yaml
|
||||
# compose-farm.yaml
|
||||
compose_dir: /opt/stacks
|
||||
|
||||
hosts:
|
||||
local: localhost
|
||||
|
||||
stacks:
|
||||
plex: local
|
||||
jellyfin: local
|
||||
traefik: local
|
||||
```
|
||||
|
||||
```bash
|
||||
cf apply # Start/stop stacks to match config
|
||||
```
|
||||
|
||||
### Multi-host
|
||||
|
||||
Requires SSH plus a shared `compose_dir` path on all hosts (NFS or sync).
|
||||
|
||||
```yaml
|
||||
# compose-farm.yaml
|
||||
compose_dir: /opt/compose
|
||||
@@ -41,16 +73,19 @@ hosts:
|
||||
server-2:
|
||||
address: 192.168.1.11
|
||||
|
||||
services:
|
||||
stacks:
|
||||
plex: server-1
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
grafana: server-1
|
||||
```
|
||||
|
||||
```bash
|
||||
cf apply # Services start, migrate, or stop as needed
|
||||
cf apply # Stacks start, migrate, or stop as needed
|
||||
```
|
||||
|
||||
Each entry in `stacks:` maps to a folder under `compose_dir` that contains a compose file.
|
||||
|
||||
For cross-host HTTP routing, add Traefik labels and configure `traefik_file` to generate file-provider config.
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
@@ -61,7 +96,7 @@ pip install compose-farm
|
||||
|
||||
### Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml`:
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands (e.g., `/opt/stacks`), or in `~/.config/compose-farm/`:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
@@ -73,20 +108,22 @@ hosts:
|
||||
hp:
|
||||
address: 192.168.1.11
|
||||
|
||||
services:
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
See [Configuration](configuration.md) for all options and the full search order.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Make reality match config
|
||||
cf apply
|
||||
|
||||
# Start specific services
|
||||
cf up plex sonarr
|
||||
# Start specific stacks
|
||||
cf up plex grafana
|
||||
|
||||
# Check status
|
||||
cf ps
|
||||
@@ -98,13 +135,13 @@ cf logs -f plex
|
||||
## Key Features
|
||||
|
||||
- **Declarative configuration**: One YAML defines where everything runs
|
||||
- **Auto-migration**: Change a host assignment, run `cf up`, service moves automatically
|
||||
- **Auto-migration**: Change a host assignment, run `cf up`, stack moves automatically
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="assets/migration.webm" type="video/webm">
|
||||
<source src="/assets/migration.webm" type="video/webm">
|
||||
</video>
|
||||
- **Parallel execution**: Multiple services start/stop concurrently
|
||||
- **State tracking**: Knows which services are running where
|
||||
- **Parallel execution**: Multiple stacks start/stop concurrently
|
||||
- **State tracking**: Knows which stacks are running where
|
||||
- **Traefik integration**: Generate file-provider config for cross-host routing
|
||||
- **Zero changes**: Your compose files work as-is
|
||||
|
||||
@@ -120,6 +157,7 @@ cf logs -f plex
|
||||
- [Getting Started](getting-started.md) - Installation and first steps
|
||||
- [Configuration](configuration.md) - All configuration options
|
||||
- [Commands](commands.md) - CLI reference
|
||||
- [Web UI](web-ui.md) - Browser-based management interface
|
||||
- [Architecture](architecture.md) - How it works under the hood
|
||||
- [Traefik Integration](traefik.md) - Multi-host routing setup
|
||||
- [Best Practices](best-practices.md) - Tips and limitations
|
||||
|
||||
2
bootstrap.sh → docs/install
Executable file → Normal file
2
bootstrap.sh → docs/install
Executable file → Normal file
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
# Compose Farm bootstrap script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
# Usage: curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
#
|
||||
# This script installs uv (if needed) and then installs compose-farm as a uv tool.
|
||||
|
||||
21
docs/javascripts/video-fix.js
Normal file
21
docs/javascripts/video-fix.js
Normal file
@@ -0,0 +1,21 @@
|
||||
// Fix Safari video autoplay issues
|
||||
(function() {
|
||||
function initVideos() {
|
||||
document.querySelectorAll('video[autoplay]').forEach(function(video) {
|
||||
video.load();
|
||||
video.play().catch(function() {});
|
||||
});
|
||||
}
|
||||
|
||||
// For initial page load (needed for Chrome)
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', initVideos);
|
||||
} else {
|
||||
initVideos();
|
||||
}
|
||||
|
||||
// For MkDocs instant navigation (needed for Safari)
|
||||
if (typeof document$ !== 'undefined') {
|
||||
document$.subscribe(initVideos);
|
||||
}
|
||||
})();
|
||||
@@ -5,7 +5,7 @@
|
||||
- I made a CLI to run Docker Compose across multiple hosts without Kubernetes or Swarm
|
||||
---
|
||||
|
||||
I've been running 100+ Docker Compose stacks on a single machine, and it kept running out of memory. I needed to spread services across multiple hosts, but:
|
||||
I've been running 100+ Docker Compose stacks on a single machine, and it kept running out of memory. I needed to spread stacks across multiple hosts, but:
|
||||
|
||||
- **Kubernetes** felt like overkill. I don't need pods, ingress controllers, or 10x more YAML.
|
||||
- **Docker Swarm** is basically in maintenance mode.
|
||||
@@ -15,7 +15,7 @@ So I built **Compose Farm**, a simple CLI that runs `docker compose` commands ov
|
||||
|
||||
## How it works
|
||||
|
||||
One YAML file maps services to hosts:
|
||||
One YAML file maps stacks to hosts:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/stacks
|
||||
@@ -24,11 +24,11 @@ hosts:
|
||||
nuc: 192.168.1.10
|
||||
hp: 192.168.1.11
|
||||
|
||||
services:
|
||||
stacks:
|
||||
plex: nuc
|
||||
jellyfin: hp
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
grafana: nuc
|
||||
nextcloud: nuc
|
||||
```
|
||||
|
||||
Then just:
|
||||
@@ -43,7 +43,7 @@ cf ps # shows status across all hosts
|
||||
|
||||
## Auto-migration
|
||||
|
||||
Change a service's host in the config and run `cf up`. It stops the service on the old host and starts it on the new one. No manual SSH needed.
|
||||
Change a stack's host in the config and run `cf up`. It stops the stack on the old host and starts it on the new one. No manual SSH needed.
|
||||
|
||||
```yaml
|
||||
# Before
|
||||
@@ -65,7 +65,7 @@ cf up plex # migrates automatically
|
||||
|
||||
## What it doesn't do
|
||||
|
||||
- No high availability (if a host goes down, services don't auto-migrate)
|
||||
- No high availability (if a host goes down, stacks don't auto-migrate)
|
||||
- No overlay networking (containers on different hosts can't talk via Docker DNS)
|
||||
- No health checks or automatic restarts
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Compose Farm can generate Traefik file-provider configuration for routing traffi
|
||||
|
||||
## The Problem
|
||||
|
||||
When you run Traefik on one host but services on others, Traefik's docker provider can't see remote containers. The file provider bridges this gap.
|
||||
When you run Traefik on one host but stacks on others, Traefik's docker provider can't see remote containers. The file provider bridges this gap.
|
||||
|
||||
```
|
||||
Internet
|
||||
@@ -20,7 +20,7 @@ When you run Traefik on one host but services on others, Traefik's docker provid
|
||||
│ ┌─────────┐ │
|
||||
│ │ Traefik │◄─── Docker provider sees local containers │
|
||||
│ │ │ │
|
||||
│ │ │◄─── File provider sees remote services │
|
||||
│ │ │◄─── File provider sees remote stacks │
|
||||
│ └────┬────┘ (from compose-farm.yml) │
|
||||
│ │ │
|
||||
└───────┼─────────────────────────────────────────────────────┘
|
||||
@@ -40,7 +40,7 @@ When you run Traefik on one host but services on others, Traefik's docker provid
|
||||
1. Your compose files have standard Traefik labels
|
||||
2. Compose Farm reads labels and generates file-provider config
|
||||
3. Traefik watches the generated file
|
||||
4. Traffic routes to remote services via host IP + published port
|
||||
4. Traffic routes to remote stacks via host IP + published port
|
||||
|
||||
## Setup
|
||||
|
||||
@@ -122,7 +122,7 @@ Configure automatic regeneration in `compose-farm.yaml`:
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
|
||||
hosts:
|
||||
nuc:
|
||||
@@ -130,10 +130,10 @@ hosts:
|
||||
hp:
|
||||
address: 192.168.1.11
|
||||
|
||||
services:
|
||||
stacks:
|
||||
traefik: nuc # Traefik runs here
|
||||
plex: hp # Routed via file-provider
|
||||
sonarr: hp
|
||||
grafana: hp
|
||||
```
|
||||
|
||||
With `traefik_file` set, these commands auto-regenerate the config:
|
||||
@@ -143,13 +143,13 @@ With `traefik_file` set, these commands auto-regenerate the config:
|
||||
- `cf update`
|
||||
- `cf apply`
|
||||
|
||||
### traefik_service Option
|
||||
### traefik_stack Option
|
||||
|
||||
When set, services on the **same host as Traefik** are skipped in file-provider output. Traefik's docker provider handles them directly.
|
||||
When set, stacks on the **same host as Traefik** are skipped in file-provider output. Traefik's docker provider handles them directly.
|
||||
|
||||
```yaml
|
||||
traefik_service: traefik # traefik runs on nuc
|
||||
services:
|
||||
traefik_stack: traefik # traefik runs on nuc
|
||||
stacks:
|
||||
traefik: nuc # NOT in file-provider (docker provider)
|
||||
portainer: nuc # NOT in file-provider (docker provider)
|
||||
plex: hp # IN file-provider (cross-host)
|
||||
@@ -215,7 +215,7 @@ labels:
|
||||
```
|
||||
|
||||
Compose Farm resolves variables from:
|
||||
1. Service's `.env` file
|
||||
1. Stack's `.env` file
|
||||
2. Current environment
|
||||
|
||||
```bash
|
||||
@@ -242,7 +242,7 @@ If no suitable port is found, a warning is shown.
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
|
||||
hosts:
|
||||
nuc:
|
||||
@@ -252,12 +252,12 @@ hosts:
|
||||
nas:
|
||||
address: 192.168.1.100
|
||||
|
||||
services:
|
||||
stacks:
|
||||
traefik: nuc
|
||||
plex: hp
|
||||
jellyfin: nas
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
grafana: nuc
|
||||
nextcloud: nuc
|
||||
```
|
||||
|
||||
### /opt/compose/plex/docker-compose.yml
|
||||
@@ -309,7 +309,7 @@ http:
|
||||
- url: http://192.168.1.100:8096
|
||||
```
|
||||
|
||||
Note: `sonarr` and `radarr` are NOT in the file because they're on the same host as Traefik (`nuc`).
|
||||
Note: `grafana` and `nextcloud` are NOT in the file because they're on the same host as Traefik (`nuc`).
|
||||
|
||||
## Combining with Existing Config
|
||||
|
||||
@@ -331,7 +331,7 @@ Traefik merges all YAML files in the directory.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Service Not Accessible
|
||||
### Stack Not Accessible
|
||||
|
||||
1. **Check port is published:**
|
||||
```yaml
|
||||
@@ -341,12 +341,12 @@ Traefik merges all YAML files in the directory.
|
||||
|
||||
2. **Check label syntax:**
|
||||
```bash
|
||||
cf check myservice
|
||||
cf check mystack
|
||||
```
|
||||
|
||||
3. **Verify generated config:**
|
||||
```bash
|
||||
cf traefik-file myservice
|
||||
cf traefik-file mystack
|
||||
```
|
||||
|
||||
4. **Check Traefik logs:**
|
||||
|
||||
130
docs/web-ui.md
Normal file
130
docs/web-ui.md
Normal file
@@ -0,0 +1,130 @@
|
||||
---
|
||||
icon: lucide/layout-dashboard
|
||||
---
|
||||
|
||||
# Web UI
|
||||
|
||||
Compose Farm includes a web interface for managing stacks from your browser. Start it with:
|
||||
|
||||
```bash
|
||||
cf web
|
||||
```
|
||||
|
||||
Then open [http://localhost:8000](http://localhost:8000).
|
||||
|
||||
## Features
|
||||
|
||||
### Full Workflow
|
||||
|
||||
Console terminal, config editor, stack navigation, actions (up, logs, update), dashboard overview, and theme switching - all in one flow.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-workflow.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
### Stack Actions
|
||||
|
||||
Navigate to any stack and use the command palette to trigger actions like restart, pull, update, or view logs. Output streams in real-time via WebSocket.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-stack.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
### Theme Switching
|
||||
|
||||
35 themes available via the command palette. Type `theme:` to filter, then use arrow keys to preview themes live before selecting.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-themes.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
### Command Palette
|
||||
|
||||
Press `Ctrl+K` (or `Cmd+K` on macOS) to open the command palette. Use fuzzy search to quickly navigate, trigger actions, or change themes.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-navigation.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
## Pages
|
||||
|
||||
### Dashboard (`/`)
|
||||
|
||||
- Stack overview with status indicators
|
||||
- Host statistics
|
||||
- Pending operations (migrations, orphaned stacks)
|
||||
- Quick actions via command palette
|
||||
|
||||
### Stack Detail (`/stack/{name}`)
|
||||
|
||||
- Compose file editor (Monaco)
|
||||
- Environment file editor
|
||||
- Action buttons: Up, Down, Restart, Update, Pull, Logs
|
||||
- Container shell access (exec into running containers)
|
||||
- Terminal output for running commands
|
||||
|
||||
### Console (`/console`)
|
||||
|
||||
- Full shell access to any host
|
||||
- File editor for remote files
|
||||
- Monaco editor with syntax highlighting
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-console.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
### Container Shell
|
||||
|
||||
Click the Shell button on any running container to exec into it directly from the browser.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-shell.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
## Keyboard Shortcuts
|
||||
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| `Ctrl+K` / `Cmd+K` | Open command palette |
|
||||
| `Ctrl+S` / `Cmd+S` | Save editors |
|
||||
| `Escape` | Close command palette |
|
||||
| `Arrow keys` | Navigate command list |
|
||||
| `Enter` | Execute selected command |
|
||||
|
||||
## Starting the Server
|
||||
|
||||
```bash
|
||||
# Default: http://0.0.0.0:8000
|
||||
cf web
|
||||
|
||||
# Custom port
|
||||
cf web --port 3000
|
||||
|
||||
# Development mode with auto-reload
|
||||
cf web --reload
|
||||
|
||||
# Bind to specific interface
|
||||
cf web --host 127.0.0.1
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
The web UI requires additional dependencies:
|
||||
|
||||
```bash
|
||||
# If installed via pip
|
||||
pip install compose-farm[web]
|
||||
|
||||
# If installed via uv
|
||||
uv tool install 'compose-farm[web]'
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
The web UI uses:
|
||||
|
||||
- **FastAPI** - Backend API and WebSocket handling
|
||||
- **HTMX** - Dynamic page updates without full reloads
|
||||
- **DaisyUI + Tailwind** - Theming and styling
|
||||
- **Monaco Editor** - Code editing for compose/env files
|
||||
- **xterm.js** - Terminal emulation for logs and shell access
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
Real-world examples demonstrating compose-farm patterns for multi-host Docker deployments.
|
||||
|
||||
## Services
|
||||
## Stacks
|
||||
|
||||
| Service | Type | Demonstrates |
|
||||
| Stack | Type | Demonstrates |
|
||||
|---------|------|--------------|
|
||||
| [traefik](traefik/) | Infrastructure | Reverse proxy, Let's Encrypt, file-provider |
|
||||
| [mealie](mealie/) | Single container | Traefik labels, resource limits, environment vars |
|
||||
@@ -16,7 +16,7 @@ Real-world examples demonstrating compose-farm patterns for multi-host Docker de
|
||||
|
||||
### External Network
|
||||
|
||||
All services connect to a shared external network for inter-service communication:
|
||||
All stacks connect to a shared external network for inter-service communication:
|
||||
|
||||
```yaml
|
||||
networks:
|
||||
@@ -32,12 +32,12 @@ compose-farm init-network --network mynetwork --subnet 172.20.0.0/16
|
||||
|
||||
### Traefik Labels (Dual Routes)
|
||||
|
||||
Services expose two routes for different access patterns:
|
||||
Stacks expose two routes for different access patterns:
|
||||
|
||||
1. **HTTPS route** (`websecure` entrypoint): For your custom domain with Let's Encrypt TLS
|
||||
2. **HTTP route** (`web` entrypoint): For `.local` domains on your LAN (no TLS needed)
|
||||
|
||||
This pattern allows accessing services via:
|
||||
This pattern allows accessing stacks via:
|
||||
- `https://mealie.example.com` - from anywhere, with TLS
|
||||
- `http://mealie.local` - from your local network, no TLS overhead
|
||||
|
||||
@@ -57,7 +57,7 @@ labels:
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Each service has a `.env` file for secrets and domain configuration.
|
||||
Each stack has a `.env` file for secrets and domain configuration.
|
||||
Edit these files to set your domain and credentials:
|
||||
|
||||
```bash
|
||||
@@ -76,15 +76,15 @@ volumes:
|
||||
- /mnt/data/myapp:/app/data
|
||||
```
|
||||
|
||||
This allows services to migrate between hosts without data loss.
|
||||
This allows stacks to migrate between hosts without data loss.
|
||||
|
||||
### Multi-Host Services
|
||||
### Multi-Host Stacks
|
||||
|
||||
Services that need to run on every host (e.g., monitoring agents):
|
||||
Stacks that need to run on every host (e.g., monitoring agents):
|
||||
|
||||
```yaml
|
||||
# In compose-farm.yaml
|
||||
services:
|
||||
stacks:
|
||||
autokuma: all # Runs on every configured host
|
||||
```
|
||||
|
||||
@@ -107,7 +107,7 @@ services:
|
||||
|
||||
### AutoKuma Labels (Optional)
|
||||
|
||||
The autokuma example demonstrates compose-farm's **multi-host feature** - running the same service on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers.
|
||||
The autokuma example demonstrates compose-farm's **multi-host feature** - running the same stack on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers.
|
||||
|
||||
[AutoKuma](https://github.com/BigBoot/AutoKuma) automatically creates Uptime Kuma monitors from Docker labels:
|
||||
|
||||
@@ -128,7 +128,7 @@ compose-farm init-network
|
||||
# 2. Start Traefik first (the reverse proxy)
|
||||
compose-farm up traefik
|
||||
|
||||
# 3. Start other services
|
||||
# 3. Start other stacks
|
||||
compose-farm up mealie uptime-kuma
|
||||
|
||||
# 4. Check status
|
||||
@@ -148,24 +148,24 @@ compose-farm down --all
|
||||
|
||||
The `compose-farm.yaml` shows a multi-host setup:
|
||||
|
||||
- **primary** (192.168.1.10): Runs Traefik and heavy services
|
||||
- **secondary** (192.168.1.11): Runs lighter services
|
||||
- **primary** (192.168.1.10): Runs Traefik and heavy stacks
|
||||
- **secondary** (192.168.1.11): Runs lighter stacks
|
||||
- **autokuma**: Runs on ALL hosts to monitor local containers
|
||||
|
||||
When Traefik runs on `primary` and a service runs on `secondary`, compose-farm
|
||||
When Traefik runs on `primary` and a stack runs on `secondary`, compose-farm
|
||||
automatically generates file-provider config so Traefik can route to it.
|
||||
|
||||
## Traefik File-Provider
|
||||
|
||||
When services run on different hosts than Traefik, use `traefik-file` to generate routing config:
|
||||
When stacks run on different hosts than Traefik, use `traefik-file` to generate routing config:
|
||||
|
||||
```bash
|
||||
# Generate config for all services
|
||||
# Generate config for all stacks
|
||||
compose-farm traefik-file --all -o traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
# Or configure auto-generation in compose-farm.yaml:
|
||||
traefik_file: /opt/stacks/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, `restart`, and `update` commands.
|
||||
|
||||
@@ -7,34 +7,34 @@ compose_dir: /opt/stacks/compose-farm/examples
|
||||
|
||||
# Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
traefik_file: /opt/stacks/compose-farm/examples/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_service: traefik # Skip Traefik's host in file-provider (docker provider handles it)
|
||||
traefik_stack: traefik # Skip Traefik's host in file-provider (docker provider handles it)
|
||||
|
||||
hosts:
|
||||
# Primary server - runs Traefik and most services
|
||||
# Primary server - runs Traefik and most stacks
|
||||
# Full form with all options
|
||||
primary:
|
||||
address: 192.168.1.10
|
||||
user: deploy
|
||||
port: 22
|
||||
|
||||
# Secondary server - runs some services for load distribution
|
||||
# Secondary server - runs some stacks for load distribution
|
||||
# Short form (user defaults to current user, port defaults to 22)
|
||||
secondary: 192.168.1.11
|
||||
|
||||
# Local execution (no SSH) - for testing or when running on the host itself
|
||||
local: localhost
|
||||
|
||||
services:
|
||||
stacks:
|
||||
# Infrastructure (runs on primary where Traefik is)
|
||||
traefik: primary
|
||||
|
||||
# Multi-host services (runs on ALL hosts)
|
||||
# Multi-host stacks (runs on ALL hosts)
|
||||
# AutoKuma monitors Docker containers on each host
|
||||
autokuma: all
|
||||
|
||||
# Primary server services
|
||||
# Primary server stacks
|
||||
paperless-ngx: primary
|
||||
|
||||
# Secondary server services (distributed for performance)
|
||||
# Secondary server stacks (distributed for performance)
|
||||
mealie: secondary
|
||||
uptime-kuma: secondary
|
||||
|
||||
52
justfile
Normal file
52
justfile
Normal file
@@ -0,0 +1,52 @@
|
||||
# Compose Farm Development Commands
|
||||
# Run `just` to see available commands
|
||||
|
||||
# Default: list available commands
|
||||
default:
|
||||
@just --list
|
||||
|
||||
# Install development dependencies
|
||||
install:
|
||||
uv sync --all-extras --dev
|
||||
|
||||
# Run all tests (parallel)
|
||||
test:
|
||||
uv run pytest -n auto
|
||||
|
||||
# Run CLI tests only (parallel, with coverage)
|
||||
test-cli:
|
||||
uv run pytest -m "not browser" -n auto
|
||||
|
||||
# Run web UI tests only (parallel)
|
||||
test-web:
|
||||
uv run pytest -m browser -n auto
|
||||
|
||||
# Lint, format, and type check
|
||||
lint:
|
||||
uv run ruff check --fix .
|
||||
uv run ruff format .
|
||||
uv run mypy src
|
||||
uv run ty check src
|
||||
|
||||
# Start web UI in development mode with auto-reload
|
||||
web:
|
||||
uv run cf web --reload --port 9001
|
||||
|
||||
# Kill the web server
|
||||
kill-web:
|
||||
lsof -ti :9001 | xargs kill -9 2>/dev/null || true
|
||||
|
||||
# Build docs and serve locally
|
||||
doc:
|
||||
uvx zensical build
|
||||
python -m http.server -d site 9002
|
||||
|
||||
# Kill the docs server
|
||||
kill-doc:
|
||||
lsof -ti :9002 | xargs kill -9 2>/dev/null || true
|
||||
|
||||
# Clean up build artifacts and caches
|
||||
clean:
|
||||
rm -rf .pytest_cache .mypy_cache .ruff_cache .coverage htmlcov dist build
|
||||
find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
|
||||
find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true
|
||||
@@ -133,6 +133,10 @@ disallow_untyped_decorators = false
|
||||
module = "compose_farm.web.*"
|
||||
disallow_untyped_decorators = false
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "docs.demos.web.*"
|
||||
disallow_untyped_decorators = false
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
testpaths = ["tests"]
|
||||
@@ -160,9 +164,19 @@ exclude_lines = [
|
||||
'if __name__ == "__main__":',
|
||||
]
|
||||
|
||||
[tool.ty.environment]
|
||||
python-version = "3.11"
|
||||
|
||||
[tool.ty.src]
|
||||
exclude = [
|
||||
"hatch_build.py", # Build-time only, hatchling not in dev deps
|
||||
"docs/demos/**", # Demo scripts with local conftest imports
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"mypy>=1.19.0",
|
||||
"ty>=0.0.1a13",
|
||||
"pre-commit>=4.5.0",
|
||||
"pytest>=9.0.2",
|
||||
"pytest-asyncio>=1.3.0",
|
||||
|
||||
@@ -23,6 +23,7 @@ app = typer.Typer(
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
rich_markup_mode="rich",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from rich.progress import (
|
||||
|
||||
from compose_farm.console import (
|
||||
MSG_HOST_NOT_FOUND,
|
||||
MSG_SERVICE_NOT_FOUND,
|
||||
MSG_STACK_NOT_FOUND,
|
||||
console,
|
||||
print_error,
|
||||
print_hint,
|
||||
@@ -39,13 +39,13 @@ _R = TypeVar("_R")
|
||||
|
||||
|
||||
# --- Shared CLI Options ---
|
||||
ServicesArg = Annotated[
|
||||
StacksArg = Annotated[
|
||||
list[str] | None,
|
||||
typer.Argument(help="Services to operate on"),
|
||||
typer.Argument(help="Stacks to operate on"),
|
||||
]
|
||||
AllOption = Annotated[
|
||||
bool,
|
||||
typer.Option("--all", "-a", help="Run on all services"),
|
||||
typer.Option("--all", "-a", help="Run on all stacks"),
|
||||
]
|
||||
ConfigOption = Annotated[
|
||||
Path | None,
|
||||
@@ -57,7 +57,11 @@ LogPathOption = Annotated[
|
||||
]
|
||||
HostOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--host", "-H", help="Filter to services on this host"),
|
||||
typer.Option("--host", "-H", help="Filter to stacks on this host"),
|
||||
]
|
||||
ServiceOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--service", "-s", help="Target a specific service within the stack"),
|
||||
]
|
||||
|
||||
# --- Constants (internal) ---
|
||||
@@ -140,57 +144,57 @@ def load_config_or_exit(config_path: Path | None) -> Config:
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def get_services(
|
||||
services: list[str],
|
||||
all_services: bool,
|
||||
def get_stacks(
|
||||
stacks: list[str],
|
||||
all_stacks: bool,
|
||||
config_path: Path | None,
|
||||
*,
|
||||
host: str | None = None,
|
||||
default_all: bool = False,
|
||||
) -> tuple[list[str], Config]:
|
||||
"""Resolve service list and load config.
|
||||
"""Resolve stack list and load config.
|
||||
|
||||
Handles three mutually exclusive selection methods:
|
||||
- Explicit service names
|
||||
- Explicit stack names
|
||||
- --all flag
|
||||
- --host filter
|
||||
|
||||
Args:
|
||||
services: Explicit service names
|
||||
all_services: Whether --all was specified
|
||||
stacks: Explicit stack names
|
||||
all_stacks: Whether --all was specified
|
||||
config_path: Path to config file
|
||||
host: Filter to services on this host
|
||||
default_all: If True, default to all services when nothing specified (for ps)
|
||||
host: Filter to stacks on this host
|
||||
default_all: If True, default to all stacks when nothing specified (for ps)
|
||||
|
||||
Supports "." as shorthand for the current directory name.
|
||||
|
||||
"""
|
||||
validate_service_selection(services, all_services, host)
|
||||
validate_stack_selection(stacks, all_stacks, host)
|
||||
config = load_config_or_exit(config_path)
|
||||
|
||||
if host is not None:
|
||||
validate_hosts(config, host)
|
||||
svc_list = [s for s in config.services if host in config.get_hosts(s)]
|
||||
if not svc_list:
|
||||
print_warning(f"No services configured for host [magenta]{host}[/]")
|
||||
stack_list = [s for s in config.stacks if host in config.get_hosts(s)]
|
||||
if not stack_list:
|
||||
print_warning(f"No stacks configured for host [magenta]{host}[/]")
|
||||
raise typer.Exit(0)
|
||||
return svc_list, config
|
||||
return stack_list, config
|
||||
|
||||
if all_services:
|
||||
return list(config.services.keys()), config
|
||||
if all_stacks:
|
||||
return list(config.stacks.keys()), config
|
||||
|
||||
if not services:
|
||||
if not stacks:
|
||||
if default_all:
|
||||
return list(config.services.keys()), config
|
||||
print_error("Specify services or use [bold]--all[/] / [bold]--host[/]")
|
||||
return list(config.stacks.keys()), config
|
||||
print_error("Specify stacks or use [bold]--all[/] / [bold]--host[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Resolve "." to current directory name
|
||||
resolved = [Path.cwd().name if svc == "." else svc for svc in services]
|
||||
resolved = [Path.cwd().name if stack == "." else stack for stack in stacks]
|
||||
|
||||
# Validate all services exist in config
|
||||
validate_services(
|
||||
config, resolved, hint="Add the service to compose-farm.yaml or use [bold]--all[/]"
|
||||
# Validate all stacks exist in config
|
||||
validate_stacks(
|
||||
config, resolved, hint="Add the stack to compose-farm.yaml or use [bold]--all[/]"
|
||||
)
|
||||
|
||||
return resolved, config
|
||||
@@ -215,19 +219,19 @@ def report_results(results: list[CommandResult]) -> None:
|
||||
console.print() # Blank line before summary
|
||||
if failed:
|
||||
for r in failed:
|
||||
print_error(f"[cyan]{r.service}[/] failed with exit code {r.exit_code}")
|
||||
print_error(f"[cyan]{r.stack}[/] failed with exit code {r.exit_code}")
|
||||
console.print()
|
||||
console.print(
|
||||
f"[green]✓[/] {len(succeeded)}/{len(results)} services succeeded, "
|
||||
f"[green]✓[/] {len(succeeded)}/{len(results)} stacks succeeded, "
|
||||
f"[red]✗[/] {len(failed)} failed"
|
||||
)
|
||||
else:
|
||||
print_success(f"All {len(results)} services succeeded")
|
||||
print_success(f"All {len(results)} stacks succeeded")
|
||||
|
||||
elif failed:
|
||||
# Single service failed
|
||||
# Single stack failed
|
||||
r = failed[0]
|
||||
print_error(f"[cyan]{r.service}[/] failed with exit code {r.exit_code}")
|
||||
print_error(f"[cyan]{r.stack}[/] failed with exit code {r.exit_code}")
|
||||
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
@@ -239,12 +243,12 @@ def maybe_regenerate_traefik(
|
||||
) -> None:
|
||||
"""Regenerate traefik config if traefik_file is configured.
|
||||
|
||||
If results are provided, skips regeneration if all services failed.
|
||||
If results are provided, skips regeneration if all stacks failed.
|
||||
"""
|
||||
if cfg.traefik_file is None:
|
||||
return
|
||||
|
||||
# Skip if all services failed
|
||||
# Skip if all stacks failed
|
||||
if results and not any(r.success for r in results):
|
||||
return
|
||||
|
||||
@@ -255,7 +259,7 @@ def maybe_regenerate_traefik(
|
||||
)
|
||||
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys()))
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.stacks.keys()))
|
||||
new_content = render_traefik_config(dynamic)
|
||||
|
||||
# Check if content changed
|
||||
@@ -275,12 +279,12 @@ def maybe_regenerate_traefik(
|
||||
print_warning(f"Failed to update traefik config: {exc}")
|
||||
|
||||
|
||||
def validate_services(cfg: Config, services: list[str], *, hint: str | None = None) -> None:
|
||||
"""Validate that all services exist in config. Exits with error if any not found."""
|
||||
invalid = [s for s in services if s not in cfg.services]
|
||||
def validate_stacks(cfg: Config, stacks: list[str], *, hint: str | None = None) -> None:
|
||||
"""Validate that all stacks exist in config. Exits with error if any not found."""
|
||||
invalid = [s for s in stacks if s not in cfg.stacks]
|
||||
if invalid:
|
||||
for svc in invalid:
|
||||
print_error(MSG_SERVICE_NOT_FOUND.format(name=svc))
|
||||
print_error(MSG_STACK_NOT_FOUND.format(name=svc))
|
||||
if hint:
|
||||
print_hint(hint)
|
||||
raise typer.Exit(1)
|
||||
@@ -296,29 +300,29 @@ def validate_hosts(cfg: Config, hosts: str | list[str]) -> None:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def validate_host_for_service(cfg: Config, service: str, host: str) -> None:
|
||||
"""Validate that a host is valid for a service."""
|
||||
def validate_host_for_stack(cfg: Config, stack: str, host: str) -> None:
|
||||
"""Validate that a host is valid for a stack."""
|
||||
validate_hosts(cfg, host)
|
||||
allowed_hosts = cfg.get_hosts(service)
|
||||
allowed_hosts = cfg.get_hosts(stack)
|
||||
if host not in allowed_hosts:
|
||||
print_error(
|
||||
f"Service [cyan]{service}[/] is not configured for host [magenta]{host}[/] "
|
||||
f"Stack [cyan]{stack}[/] is not configured for host [magenta]{host}[/] "
|
||||
f"(configured: {', '.join(allowed_hosts)})"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def validate_service_selection(
|
||||
services: list[str] | None,
|
||||
all_services: bool,
|
||||
def validate_stack_selection(
|
||||
stacks: list[str] | None,
|
||||
all_stacks: bool,
|
||||
host: str | None,
|
||||
) -> None:
|
||||
"""Validate that only one service selection method is used.
|
||||
"""Validate that only one stack selection method is used.
|
||||
|
||||
The three selection methods (explicit services, --all, --host) are mutually
|
||||
The three selection methods (explicit stacks, --all, --host) are mutually
|
||||
exclusive. This ensures consistent behavior across all commands.
|
||||
"""
|
||||
methods = sum([bool(services), all_services, host is not None])
|
||||
methods = sum([bool(stacks), all_stacks, host is not None])
|
||||
if methods > 1:
|
||||
print_error("Use only one of: service names, [bold]--all[/], or [bold]--host[/]")
|
||||
print_error("Use only one of: stack names, [bold]--all[/], or [bold]--host[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
@@ -227,7 +227,7 @@ def config_validate(
|
||||
|
||||
print_success(f"Valid config: {config_file}")
|
||||
console.print(f" Hosts: {len(cfg.hosts)}")
|
||||
console.print(f" Services: {len(cfg.services)}")
|
||||
console.print(f" Stacks: {len(cfg.stacks)}")
|
||||
|
||||
|
||||
@config_app.command("symlink")
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
|
||||
import typer
|
||||
@@ -11,136 +12,198 @@ from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
ServiceOption,
|
||||
StacksArg,
|
||||
format_host,
|
||||
get_services,
|
||||
get_stacks,
|
||||
load_config_or_exit,
|
||||
maybe_regenerate_traefik,
|
||||
report_results,
|
||||
run_async,
|
||||
validate_host_for_stack,
|
||||
validate_stacks,
|
||||
)
|
||||
from compose_farm.console import MSG_DRY_RUN, console, print_error, print_success
|
||||
from compose_farm.executor import run_on_services, run_sequential_on_services
|
||||
from compose_farm.operations import stop_orphaned_services, up_services
|
||||
from compose_farm.executor import run_compose_on_host, run_on_stacks, run_sequential_on_stacks
|
||||
from compose_farm.operations import stop_orphaned_stacks, up_stacks
|
||||
from compose_farm.state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
get_services_needing_migration,
|
||||
get_services_not_in_state,
|
||||
remove_service,
|
||||
get_orphaned_stacks,
|
||||
get_stack_host,
|
||||
get_stacks_needing_migration,
|
||||
get_stacks_not_in_state,
|
||||
remove_stack,
|
||||
)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def up(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start services (docker compose up -d). Auto-migrates if host changed."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config, host=host)
|
||||
results = run_async(up_services(cfg, svc_list, raw=True))
|
||||
"""Start stacks (docker compose up -d). Auto-migrates if host changed."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level up, use run_on_stacks directly (no migration logic)
|
||||
results = run_async(run_on_stacks(cfg, stack_list, f"up -d {service}", raw=True))
|
||||
else:
|
||||
results = run_async(up_stacks(cfg, stack_list, raw=True))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def down(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
orphaned: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--orphaned", help="Stop orphaned services (in state but removed from config)"
|
||||
),
|
||||
typer.Option("--orphaned", help="Stop orphaned stacks (in state but removed from config)"),
|
||||
] = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services (docker compose down)."""
|
||||
"""Stop stacks (docker compose down)."""
|
||||
# Handle --orphaned flag (mutually exclusive with other selection methods)
|
||||
if orphaned:
|
||||
if services or all_services or host:
|
||||
if stacks or all_stacks or host:
|
||||
print_error(
|
||||
"Cannot combine [bold]--orphaned[/] with services, [bold]--all[/], or [bold]--host[/]"
|
||||
"Cannot combine [bold]--orphaned[/] with stacks, [bold]--all[/], or [bold]--host[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config)
|
||||
orphaned_services = get_orphaned_services(cfg)
|
||||
orphaned_stacks = get_orphaned_stacks(cfg)
|
||||
|
||||
if not orphaned_services:
|
||||
print_success("No orphaned services to stop")
|
||||
if not orphaned_stacks:
|
||||
print_success("No orphaned stacks to stop")
|
||||
return
|
||||
|
||||
console.print(
|
||||
f"[yellow]Stopping {len(orphaned_services)} orphaned service(s):[/] "
|
||||
f"{', '.join(orphaned_services.keys())}"
|
||||
f"[yellow]Stopping {len(orphaned_stacks)} orphaned stack(s):[/] "
|
||||
f"{', '.join(orphaned_stacks.keys())}"
|
||||
)
|
||||
results = run_async(stop_orphaned_services(cfg))
|
||||
results = run_async(stop_orphaned_stacks(cfg))
|
||||
report_results(results)
|
||||
return
|
||||
|
||||
svc_list, cfg = get_services(services or [], all_services, config, host=host)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "down", raw=raw))
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, "down", raw=raw))
|
||||
|
||||
# Remove from state on success
|
||||
# For multi-host services, result.service is "svc@host", extract base name
|
||||
removed_services: set[str] = set()
|
||||
# For multi-host stacks, result.stack is "stack@host", extract base name
|
||||
removed_stacks: set[str] = set()
|
||||
for result in results:
|
||||
if result.success:
|
||||
base_service = result.service.split("@")[0]
|
||||
if base_service not in removed_services:
|
||||
remove_service(cfg, base_service)
|
||||
removed_services.add(base_service)
|
||||
base_stack = result.stack.split("@")[0]
|
||||
if base_stack not in removed_stacks:
|
||||
remove_stack(cfg, base_stack)
|
||||
removed_stacks.add(base_stack)
|
||||
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def stop(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services without removing containers (docker compose stop)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"stop {service}" if service else "stop"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Pull latest images (docker compose pull)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_on_services(cfg, svc_list, "pull", raw=raw))
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"pull {service}" if service else "pull"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def restart(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart services (down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(run_sequential_on_services(cfg, svc_list, ["down", "up -d"], raw=raw))
|
||||
"""Restart stacks (down + up). With --service, restarts just that service."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level restart, use docker compose restart (more efficient)
|
||||
raw = True
|
||||
results = run_async(run_on_stacks(cfg, stack_list, f"restart {service}", raw=raw))
|
||||
else:
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_sequential_on_stacks(cfg, stack_list, ["down", "up -d"], raw=raw))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def update(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update services (pull + build + down + up)."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
raw = len(svc_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_services(
|
||||
cfg, svc_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw
|
||||
"""Update stacks (pull + build + down + up). With --service, updates just that service."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level update: pull + build + stop + up (stop instead of down)
|
||||
raw = True
|
||||
results = run_async(
|
||||
run_sequential_on_stacks(
|
||||
cfg,
|
||||
stack_list,
|
||||
[
|
||||
f"pull --ignore-buildable {service}",
|
||||
f"build {service}",
|
||||
f"stop {service}",
|
||||
f"up -d {service}",
|
||||
],
|
||||
raw=raw,
|
||||
)
|
||||
)
|
||||
else:
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_stacks(
|
||||
cfg, stack_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw
|
||||
)
|
||||
)
|
||||
)
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
@@ -153,35 +216,35 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
] = False,
|
||||
no_orphans: Annotated[
|
||||
bool,
|
||||
typer.Option("--no-orphans", help="Only migrate, don't stop orphaned services"),
|
||||
typer.Option("--no-orphans", help="Only migrate, don't stop orphaned stacks"),
|
||||
] = False,
|
||||
full: Annotated[
|
||||
bool,
|
||||
typer.Option("--full", "-f", help="Also run up on all services to apply config changes"),
|
||||
typer.Option("--full", "-f", help="Also run up on all stacks to apply config changes"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Make reality match config (start, migrate, stop as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running services match your
|
||||
This is the "reconcile" command that ensures running stacks match your
|
||||
config file. It will:
|
||||
|
||||
1. Stop orphaned services (in state but removed from config)
|
||||
2. Migrate services on wrong host (host in state ≠ host in config)
|
||||
3. Start missing services (in config but not in state)
|
||||
1. Stop orphaned stacks (in state but removed from config)
|
||||
2. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
3. Start missing stacks (in config but not in state)
|
||||
|
||||
Use --dry-run to preview changes before applying.
|
||||
Use --no-orphans to only migrate/start without stopping orphaned services.
|
||||
Use --full to also run 'up' on all services (picks up compose/env changes).
|
||||
Use --no-orphans to only migrate/start without stopping orphaned stacks.
|
||||
Use --full to also run 'up' on all stacks (picks up compose/env changes).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
migrations = get_services_needing_migration(cfg)
|
||||
missing = get_services_not_in_state(cfg)
|
||||
orphaned = get_orphaned_stacks(cfg)
|
||||
migrations = get_stacks_needing_migration(cfg)
|
||||
missing = get_stacks_not_in_state(cfg)
|
||||
|
||||
# For --full: refresh all services not already being started/migrated
|
||||
# For --full: refresh all stacks not already being started/migrated
|
||||
handled = set(migrations) | set(missing)
|
||||
to_refresh = [svc for svc in cfg.services if svc not in handled] if full else []
|
||||
to_refresh = [stack for stack in cfg.stacks if stack not in handled] if full else []
|
||||
|
||||
has_orphans = bool(orphaned) and not no_orphans
|
||||
has_migrations = bool(migrations)
|
||||
@@ -194,23 +257,23 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
|
||||
# Report what will be done
|
||||
if has_orphans:
|
||||
console.print(f"[yellow]Orphaned services to stop ({len(orphaned)}):[/]")
|
||||
console.print(f"[yellow]Orphaned stacks to stop ({len(orphaned)}):[/]")
|
||||
for svc, hosts in orphaned.items():
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{format_host(hosts)}[/]")
|
||||
if has_migrations:
|
||||
console.print(f"[cyan]Services to migrate ({len(migrations)}):[/]")
|
||||
for svc in migrations:
|
||||
current = get_service_host(cfg, svc)
|
||||
target = cfg.get_hosts(svc)[0]
|
||||
console.print(f" [cyan]{svc}[/]: [magenta]{current}[/] → [magenta]{target}[/]")
|
||||
console.print(f"[cyan]Stacks to migrate ({len(migrations)}):[/]")
|
||||
for stack in migrations:
|
||||
current = get_stack_host(cfg, stack)
|
||||
target = cfg.get_hosts(stack)[0]
|
||||
console.print(f" [cyan]{stack}[/]: [magenta]{current}[/] → [magenta]{target}[/]")
|
||||
if has_missing:
|
||||
console.print(f"[green]Services to start ({len(missing)}):[/]")
|
||||
for svc in missing:
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{format_host(cfg.get_hosts(svc))}[/]")
|
||||
console.print(f"[green]Stacks to start ({len(missing)}):[/]")
|
||||
for stack in missing:
|
||||
console.print(f" [cyan]{stack}[/] on [magenta]{format_host(cfg.get_hosts(stack))}[/]")
|
||||
if has_refresh:
|
||||
console.print(f"[blue]Services to refresh ({len(to_refresh)}):[/]")
|
||||
for svc in to_refresh:
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{format_host(cfg.get_hosts(svc))}[/]")
|
||||
console.print(f"[blue]Stacks to refresh ({len(to_refresh)}):[/]")
|
||||
for stack in to_refresh:
|
||||
console.print(f" [cyan]{stack}[/] on [magenta]{format_host(cfg.get_hosts(stack))}[/]")
|
||||
|
||||
if dry_run:
|
||||
console.print(f"\n{MSG_DRY_RUN}")
|
||||
@@ -220,34 +283,91 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
console.print()
|
||||
all_results = []
|
||||
|
||||
# 1. Stop orphaned services first
|
||||
# 1. Stop orphaned stacks first
|
||||
if has_orphans:
|
||||
console.print("[yellow]Stopping orphaned services...[/]")
|
||||
all_results.extend(run_async(stop_orphaned_services(cfg)))
|
||||
console.print("[yellow]Stopping orphaned stacks...[/]")
|
||||
all_results.extend(run_async(stop_orphaned_stacks(cfg)))
|
||||
|
||||
# 2. Migrate services on wrong host
|
||||
# 2. Migrate stacks on wrong host
|
||||
if has_migrations:
|
||||
console.print("[cyan]Migrating services...[/]")
|
||||
migrate_results = run_async(up_services(cfg, migrations, raw=True))
|
||||
console.print("[cyan]Migrating stacks...[/]")
|
||||
migrate_results = run_async(up_stacks(cfg, migrations, raw=True))
|
||||
all_results.extend(migrate_results)
|
||||
maybe_regenerate_traefik(cfg, migrate_results)
|
||||
|
||||
# 3. Start missing services (reuse up_services which handles state updates)
|
||||
# 3. Start missing stacks (reuse up_stacks which handles state updates)
|
||||
if has_missing:
|
||||
console.print("[green]Starting missing services...[/]")
|
||||
start_results = run_async(up_services(cfg, missing, raw=True))
|
||||
console.print("[green]Starting missing stacks...[/]")
|
||||
start_results = run_async(up_stacks(cfg, missing, raw=True))
|
||||
all_results.extend(start_results)
|
||||
maybe_regenerate_traefik(cfg, start_results)
|
||||
|
||||
# 4. Refresh remaining services (--full: run up to apply config changes)
|
||||
# 4. Refresh remaining stacks (--full: run up to apply config changes)
|
||||
if has_refresh:
|
||||
console.print("[blue]Refreshing services...[/]")
|
||||
refresh_results = run_async(up_services(cfg, to_refresh, raw=True))
|
||||
console.print("[blue]Refreshing stacks...[/]")
|
||||
refresh_results = run_async(up_stacks(cfg, to_refresh, raw=True))
|
||||
all_results.extend(refresh_results)
|
||||
maybe_regenerate_traefik(cfg, refresh_results)
|
||||
|
||||
report_results(all_results)
|
||||
|
||||
|
||||
@app.command(
|
||||
rich_help_panel="Lifecycle",
|
||||
context_settings={"allow_interspersed_args": False},
|
||||
)
|
||||
def compose(
|
||||
stack: Annotated[str, typer.Argument(help="Stack to operate on (use '.' for current dir)")],
|
||||
command: Annotated[str, typer.Argument(help="Docker compose command")],
|
||||
args: Annotated[list[str] | None, typer.Argument(help="Additional arguments")] = None,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Run any docker compose command on a stack.
|
||||
|
||||
Passthrough to docker compose for commands not wrapped by cf.
|
||||
Options after COMMAND are passed to docker compose, not cf.
|
||||
|
||||
Examples:
|
||||
cf compose mystack --help - show docker compose help
|
||||
cf compose mystack top - view running processes
|
||||
cf compose mystack images - list images
|
||||
cf compose mystack exec web bash - interactive shell
|
||||
cf compose mystack config - view parsed config
|
||||
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Resolve "." to current directory name
|
||||
resolved_stack = Path.cwd().name if stack == "." else stack
|
||||
validate_stacks(cfg, [resolved_stack])
|
||||
|
||||
# Handle multi-host stacks
|
||||
hosts = cfg.get_hosts(resolved_stack)
|
||||
if len(hosts) > 1:
|
||||
if host is None:
|
||||
print_error(
|
||||
f"Stack [cyan]{resolved_stack}[/] runs on multiple hosts: {', '.join(hosts)}\n"
|
||||
f"Use [bold]--host[/] to specify which host"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
validate_host_for_stack(cfg, resolved_stack, host)
|
||||
target_host = host
|
||||
else:
|
||||
target_host = hosts[0]
|
||||
|
||||
# Build the full compose command
|
||||
full_cmd = command
|
||||
if args:
|
||||
full_cmd += " " + " ".join(args)
|
||||
|
||||
# Run with raw=True for proper TTY handling (progress bars, interactive)
|
||||
result = run_async(run_compose_on_host(cfg, resolved_stack, target_host, full_cmd, raw=True))
|
||||
print() # Ensure newline after raw output
|
||||
|
||||
if not result.success:
|
||||
raise typer.Exit(result.exit_code)
|
||||
|
||||
|
||||
# Alias: cf a = cf apply
|
||||
app.command("a", hidden=True)(apply)
|
||||
|
||||
@@ -15,14 +15,14 @@ from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
LogPathOption,
|
||||
ServicesArg,
|
||||
StacksArg,
|
||||
format_host,
|
||||
get_services,
|
||||
get_stacks,
|
||||
load_config_or_exit,
|
||||
run_async,
|
||||
run_parallel_with_progress,
|
||||
validate_hosts,
|
||||
validate_services,
|
||||
validate_stacks,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -43,7 +43,7 @@ from compose_farm.executor import (
|
||||
from compose_farm.logs import (
|
||||
DEFAULT_LOG_PATH,
|
||||
SnapshotEntry,
|
||||
collect_service_entries,
|
||||
collect_stack_entries,
|
||||
isoformat,
|
||||
load_existing_entries,
|
||||
merge_entries,
|
||||
@@ -51,28 +51,29 @@ from compose_farm.logs import (
|
||||
)
|
||||
from compose_farm.operations import (
|
||||
check_host_compatibility,
|
||||
check_service_requirements,
|
||||
discover_service_host,
|
||||
check_stack_requirements,
|
||||
discover_stack_host,
|
||||
)
|
||||
from compose_farm.state import get_orphaned_services, load_state, save_state
|
||||
from compose_farm.state import get_orphaned_stacks, load_state, save_state
|
||||
from compose_farm.traefik import generate_traefik_config, render_traefik_config
|
||||
|
||||
# --- Sync helpers ---
|
||||
|
||||
|
||||
def _discover_services(cfg: Config) -> dict[str, str | list[str]]:
|
||||
"""Discover running services with a progress bar."""
|
||||
def _discover_stacks(cfg: Config, stacks: list[str] | None = None) -> dict[str, str | list[str]]:
|
||||
"""Discover running stacks with a progress bar."""
|
||||
stack_list = stacks if stacks is not None else list(cfg.stacks)
|
||||
results = run_parallel_with_progress(
|
||||
"Discovering",
|
||||
list(cfg.services),
|
||||
lambda s: discover_service_host(cfg, s),
|
||||
stack_list,
|
||||
lambda s: discover_stack_host(cfg, s),
|
||||
)
|
||||
return {svc: host for svc, host in results if host is not None}
|
||||
|
||||
|
||||
def _snapshot_services(
|
||||
def _snapshot_stacks(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
log_path: Path | None,
|
||||
) -> Path:
|
||||
"""Capture image digests with a progress bar."""
|
||||
@@ -80,16 +81,16 @@ def _snapshot_services(
|
||||
now_dt = datetime.now(UTC)
|
||||
now_iso = isoformat(now_dt)
|
||||
|
||||
async def collect_service(service: str) -> tuple[str, list[SnapshotEntry]]:
|
||||
async def collect_stack(stack: str) -> tuple[str, list[SnapshotEntry]]:
|
||||
try:
|
||||
return service, await collect_service_entries(cfg, service, now=now_dt)
|
||||
return stack, await collect_stack_entries(cfg, stack, now=now_dt)
|
||||
except RuntimeError:
|
||||
return service, []
|
||||
return stack, []
|
||||
|
||||
results = run_parallel_with_progress(
|
||||
"Capturing",
|
||||
services,
|
||||
collect_service,
|
||||
stacks,
|
||||
collect_stack,
|
||||
)
|
||||
snapshot_entries = [entry for _, entries in results for entry in entries]
|
||||
|
||||
@@ -104,6 +105,18 @@ def _snapshot_services(
|
||||
return effective_log_path
|
||||
|
||||
|
||||
def _merge_state(
|
||||
current_state: dict[str, str | list[str]],
|
||||
discovered: dict[str, str | list[str]],
|
||||
removed: list[str],
|
||||
) -> dict[str, str | list[str]]:
|
||||
"""Merge discovered stacks into existing state for partial refresh."""
|
||||
new_state = {**current_state, **discovered}
|
||||
for svc in removed:
|
||||
new_state.pop(svc, None)
|
||||
return new_state
|
||||
|
||||
|
||||
def _report_sync_changes(
|
||||
added: list[str],
|
||||
removed: list[str],
|
||||
@@ -113,25 +126,25 @@ def _report_sync_changes(
|
||||
) -> None:
|
||||
"""Report sync changes to the user."""
|
||||
if added:
|
||||
console.print(f"\nNew services found ({len(added)}):")
|
||||
for service in sorted(added):
|
||||
host_str = format_host(discovered[service])
|
||||
console.print(f" [green]+[/] [cyan]{service}[/] on [magenta]{host_str}[/]")
|
||||
console.print(f"\nNew stacks found ({len(added)}):")
|
||||
for stack in sorted(added):
|
||||
host_str = format_host(discovered[stack])
|
||||
console.print(f" [green]+[/] [cyan]{stack}[/] on [magenta]{host_str}[/]")
|
||||
|
||||
if changed:
|
||||
console.print(f"\nServices on different hosts ({len(changed)}):")
|
||||
for service, old_host, new_host in sorted(changed):
|
||||
console.print(f"\nStacks on different hosts ({len(changed)}):")
|
||||
for stack, old_host, new_host in sorted(changed):
|
||||
old_str = format_host(old_host)
|
||||
new_str = format_host(new_host)
|
||||
console.print(
|
||||
f" [yellow]~[/] [cyan]{service}[/]: [magenta]{old_str}[/] → [magenta]{new_str}[/]"
|
||||
f" [yellow]~[/] [cyan]{stack}[/]: [magenta]{old_str}[/] → [magenta]{new_str}[/]"
|
||||
)
|
||||
|
||||
if removed:
|
||||
console.print(f"\nServices no longer running ({len(removed)}):")
|
||||
for service in sorted(removed):
|
||||
host_str = format_host(current_state[service])
|
||||
console.print(f" [red]-[/] [cyan]{service}[/] (was on [magenta]{host_str}[/])")
|
||||
console.print(f"\nStacks no longer running ({len(removed)}):")
|
||||
for stack in sorted(removed):
|
||||
host_str = format_host(current_state[stack])
|
||||
console.print(f" [red]-[/] [cyan]{stack}[/] (was on [magenta]{host_str}[/])")
|
||||
|
||||
|
||||
# --- Check helpers ---
|
||||
@@ -166,44 +179,44 @@ def _check_ssh_connectivity(cfg: Config) -> list[str]:
|
||||
return [host for host, success in results if not success]
|
||||
|
||||
|
||||
def _check_service_requirements(
|
||||
def _check_stack_requirements(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]], list[tuple[str, str, str]]]:
|
||||
"""Check mounts, networks, and devices for all services with a progress bar.
|
||||
"""Check mounts, networks, and devices for all stacks with a progress bar.
|
||||
|
||||
Returns (mount_errors, network_errors, device_errors) where each is a list of
|
||||
(service, host, missing_item) tuples.
|
||||
(stack, host, missing_item) tuples.
|
||||
"""
|
||||
|
||||
async def check_service(
|
||||
service: str,
|
||||
async def check_stack(
|
||||
stack: str,
|
||||
) -> tuple[
|
||||
str,
|
||||
list[tuple[str, str, str]],
|
||||
list[tuple[str, str, str]],
|
||||
list[tuple[str, str, str]],
|
||||
]:
|
||||
"""Check requirements for a single service on all its hosts."""
|
||||
host_names = cfg.get_hosts(service)
|
||||
"""Check requirements for a single stack on all its hosts."""
|
||||
host_names = cfg.get_hosts(stack)
|
||||
mount_errors: list[tuple[str, str, str]] = []
|
||||
network_errors: list[tuple[str, str, str]] = []
|
||||
device_errors: list[tuple[str, str, str]] = []
|
||||
|
||||
for host_name in host_names:
|
||||
missing_paths, missing_nets, missing_devs = await check_service_requirements(
|
||||
cfg, service, host_name
|
||||
missing_paths, missing_nets, missing_devs = await check_stack_requirements(
|
||||
cfg, stack, host_name
|
||||
)
|
||||
mount_errors.extend((service, host_name, p) for p in missing_paths)
|
||||
network_errors.extend((service, host_name, n) for n in missing_nets)
|
||||
device_errors.extend((service, host_name, d) for d in missing_devs)
|
||||
mount_errors.extend((stack, host_name, p) for p in missing_paths)
|
||||
network_errors.extend((stack, host_name, n) for n in missing_nets)
|
||||
device_errors.extend((stack, host_name, d) for d in missing_devs)
|
||||
|
||||
return service, mount_errors, network_errors, device_errors
|
||||
return stack, mount_errors, network_errors, device_errors
|
||||
|
||||
results = run_parallel_with_progress(
|
||||
"Checking requirements",
|
||||
services,
|
||||
check_service,
|
||||
stacks,
|
||||
check_stack,
|
||||
)
|
||||
|
||||
all_mount_errors: list[tuple[str, str, str]] = []
|
||||
@@ -219,7 +232,7 @@ def _check_service_requirements(
|
||||
|
||||
def _report_config_status(cfg: Config) -> bool:
|
||||
"""Check and report config vs disk status. Returns True if errors found."""
|
||||
configured = set(cfg.services.keys())
|
||||
configured = set(cfg.stacks.keys())
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
unmanaged = sorted(on_disk - configured)
|
||||
missing_from_disk = sorted(configured - on_disk)
|
||||
@@ -240,12 +253,12 @@ def _report_config_status(cfg: Config) -> bool:
|
||||
return bool(missing_from_disk)
|
||||
|
||||
|
||||
def _report_orphaned_services(cfg: Config) -> bool:
|
||||
"""Check for services in state but not in config. Returns True if orphans found."""
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
def _report_orphaned_stacks(cfg: Config) -> bool:
|
||||
"""Check for stacks in state but not in config. Returns True if orphans found."""
|
||||
orphaned = get_orphaned_stacks(cfg)
|
||||
|
||||
if orphaned:
|
||||
console.print("\n[yellow]Orphaned services[/] (in state but not in config):")
|
||||
console.print("\n[yellow]Orphaned stacks[/] (in state but not in config):")
|
||||
console.print(
|
||||
"[dim]Run [bold]cf apply[/bold] to stop them, or [bold]cf down --orphaned[/bold] for just orphans.[/]"
|
||||
)
|
||||
@@ -256,10 +269,10 @@ def _report_orphaned_services(cfg: Config) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _report_traefik_status(cfg: Config, services: list[str]) -> None:
|
||||
def _report_traefik_status(cfg: Config, stacks: list[str]) -> None:
|
||||
"""Check and report traefik label status."""
|
||||
try:
|
||||
_, warnings = generate_traefik_config(cfg, services, check_all=True)
|
||||
_, warnings = generate_traefik_config(cfg, stacks, check_all=True)
|
||||
except (FileNotFoundError, ValueError):
|
||||
return
|
||||
|
||||
@@ -272,16 +285,16 @@ def _report_traefik_status(cfg: Config, services: list[str]) -> None:
|
||||
|
||||
|
||||
def _report_requirement_errors(errors: list[tuple[str, str, str]], category: str) -> None:
|
||||
"""Report requirement errors (mounts, networks, devices) grouped by service."""
|
||||
by_service: dict[str, list[tuple[str, str]]] = {}
|
||||
for svc, host, item in errors:
|
||||
by_service.setdefault(svc, []).append((host, item))
|
||||
"""Report requirement errors (mounts, networks, devices) grouped by stack."""
|
||||
by_stack: dict[str, list[tuple[str, str]]] = {}
|
||||
for stack, host, item in errors:
|
||||
by_stack.setdefault(stack, []).append((host, item))
|
||||
|
||||
console.print(f"[red]Missing {category}[/] ({len(errors)}):")
|
||||
for svc, items in sorted(by_service.items()):
|
||||
for stack, items in sorted(by_stack.items()):
|
||||
host = items[0][0]
|
||||
missing = [i for _, i in items]
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{host}[/]:")
|
||||
console.print(f" [cyan]{stack}[/] on [magenta]{host}[/]:")
|
||||
for item in missing:
|
||||
console.print(f" [red]✗[/] {item}")
|
||||
|
||||
@@ -301,7 +314,7 @@ def _report_host_compatibility(
|
||||
compat: dict[str, tuple[int, int, list[str]]],
|
||||
assigned_hosts: list[str],
|
||||
) -> None:
|
||||
"""Report host compatibility for a service."""
|
||||
"""Report host compatibility for a stack."""
|
||||
for host_name, (found, total, missing) in sorted(compat.items()):
|
||||
is_assigned = host_name in assigned_hosts
|
||||
marker = " [dim](assigned)[/]" if is_assigned else ""
|
||||
@@ -332,7 +345,7 @@ def _run_remote_checks(cfg: Config, svc_list: list[str], *, show_host_compat: bo
|
||||
console.print() # Spacing before mounts/networks check
|
||||
|
||||
# Check mounts, networks, and devices
|
||||
mount_errors, network_errors, device_errors = _check_service_requirements(cfg, svc_list)
|
||||
mount_errors, network_errors, device_errors = _check_stack_requirements(cfg, svc_list)
|
||||
|
||||
if mount_errors:
|
||||
_report_requirement_errors(mount_errors, "mounts")
|
||||
@@ -347,10 +360,10 @@ def _run_remote_checks(cfg: Config, svc_list: list[str], *, show_host_compat: bo
|
||||
print_success("All mounts, networks, and devices exist")
|
||||
|
||||
if show_host_compat:
|
||||
for service in svc_list:
|
||||
console.print(f"\n[bold]Host compatibility for[/] [cyan]{service}[/]:")
|
||||
compat = run_async(check_host_compatibility(cfg, service))
|
||||
assigned_hosts = cfg.get_hosts(service)
|
||||
for stack in svc_list:
|
||||
console.print(f"\n[bold]Host compatibility for[/] [cyan]{stack}[/]:")
|
||||
compat = run_async(check_host_compatibility(cfg, stack))
|
||||
assigned_hosts = cfg.get_hosts(stack)
|
||||
_report_host_compatibility(compat, assigned_hosts)
|
||||
|
||||
return has_errors
|
||||
@@ -364,8 +377,8 @@ _DEFAULT_NETWORK_GATEWAY = "172.20.0.1"
|
||||
|
||||
@app.command("traefik-file", rich_help_panel="Configuration")
|
||||
def traefik_file(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
@@ -377,9 +390,9 @@ def traefik_file(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Generate a Traefik file-provider fragment from compose Traefik labels."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config)
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, svc_list)
|
||||
dynamic, warnings = generate_traefik_config(cfg, stack_list)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
print_error(str(exc))
|
||||
raise typer.Exit(1) from exc
|
||||
@@ -399,6 +412,8 @@ def traefik_file(
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def refresh(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
config: ConfigOption = None,
|
||||
log_path: LogPathOption = None,
|
||||
dry_run: Annotated[
|
||||
@@ -406,22 +421,35 @@ def refresh(
|
||||
typer.Option("--dry-run", "-n", help="Show what would change without writing"),
|
||||
] = False,
|
||||
) -> None:
|
||||
"""Update local state from running services.
|
||||
"""Update local state from running stacks.
|
||||
|
||||
Discovers which services are running on which hosts, updates the state
|
||||
Discovers which stacks are running on which hosts, updates the state
|
||||
file, and captures image digests. This is a read operation - it updates
|
||||
your local state to match reality, not the other way around.
|
||||
|
||||
Without arguments: refreshes all stacks (same as --all).
|
||||
With stack names: refreshes only those stacks.
|
||||
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, default_all=True)
|
||||
|
||||
# Partial refresh merges with existing state; full refresh replaces it
|
||||
# Partial = specific stacks provided (not --all, not default)
|
||||
partial_refresh = bool(stacks) and not all_stacks
|
||||
|
||||
current_state = load_state(cfg)
|
||||
|
||||
discovered = _discover_services(cfg)
|
||||
discovered = _discover_stacks(cfg, stack_list)
|
||||
|
||||
# Calculate changes
|
||||
# Calculate changes (only for the stacks we're refreshing)
|
||||
added = [s for s in discovered if s not in current_state]
|
||||
removed = [s for s in current_state if s not in discovered]
|
||||
# Only mark as "removed" if we're doing a full refresh
|
||||
if partial_refresh:
|
||||
# In partial refresh, a stack not running is just "not found"
|
||||
removed = [s for s in stack_list if s in current_state and s not in discovered]
|
||||
else:
|
||||
removed = [s for s in current_state if s not in discovered]
|
||||
changed = [
|
||||
(s, current_state[s], discovered[s])
|
||||
for s in discovered
|
||||
@@ -441,13 +469,16 @@ def refresh(
|
||||
|
||||
# Update state file
|
||||
if state_changed:
|
||||
save_state(cfg, discovered)
|
||||
print_success(f"State updated: {len(discovered)} services tracked.")
|
||||
new_state = (
|
||||
_merge_state(current_state, discovered, removed) if partial_refresh else discovered
|
||||
)
|
||||
save_state(cfg, new_state)
|
||||
print_success(f"State updated: {len(new_state)} stacks tracked.")
|
||||
|
||||
# Capture image digests for running services
|
||||
# Capture image digests for running stacks
|
||||
if discovered:
|
||||
try:
|
||||
path = _snapshot_services(cfg, list(discovered.keys()), log_path)
|
||||
path = _snapshot_stacks(cfg, list(discovered.keys()), log_path)
|
||||
print_success(f"Digests written to {path}")
|
||||
except RuntimeError as exc:
|
||||
print_warning(str(exc))
|
||||
@@ -455,7 +486,7 @@ def refresh(
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def check(
|
||||
services: ServicesArg = None,
|
||||
stacks: StacksArg = None,
|
||||
local: Annotated[
|
||||
bool,
|
||||
typer.Option("--local", help="Skip SSH-based checks (faster)"),
|
||||
@@ -464,31 +495,31 @@ def check(
|
||||
) -> None:
|
||||
"""Validate configuration, traefik labels, mounts, and networks.
|
||||
|
||||
Without arguments: validates all services against configured hosts.
|
||||
With service arguments: validates specific services and shows host compatibility.
|
||||
Without arguments: validates all stacks against configured hosts.
|
||||
With stack arguments: validates specific stacks and shows host compatibility.
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Determine which services to check and whether to show host compatibility
|
||||
if services:
|
||||
svc_list = list(services)
|
||||
validate_services(cfg, svc_list)
|
||||
# Determine which stacks to check and whether to show host compatibility
|
||||
if stacks:
|
||||
stack_list = list(stacks)
|
||||
validate_stacks(cfg, stack_list)
|
||||
show_host_compat = True
|
||||
else:
|
||||
svc_list = list(cfg.services.keys())
|
||||
stack_list = list(cfg.stacks.keys())
|
||||
show_host_compat = False
|
||||
|
||||
# Run checks
|
||||
has_errors = _report_config_status(cfg)
|
||||
_report_traefik_status(cfg, svc_list)
|
||||
_report_traefik_status(cfg, stack_list)
|
||||
|
||||
if not local and _run_remote_checks(cfg, svc_list, show_host_compat=show_host_compat):
|
||||
if not local and _run_remote_checks(cfg, stack_list, show_host_compat=show_host_compat):
|
||||
has_errors = True
|
||||
|
||||
# Check for orphaned services (in state but removed from config)
|
||||
if _report_orphaned_services(cfg):
|
||||
# Check for orphaned stacks (in state but removed from config)
|
||||
if _report_orphaned_stacks(cfg):
|
||||
has_errors = True
|
||||
|
||||
if has_errors:
|
||||
@@ -517,7 +548,7 @@ def init_network(
|
||||
) -> None:
|
||||
"""Create Docker network on hosts with consistent settings.
|
||||
|
||||
Creates an external Docker network that services can use for cross-host
|
||||
Creates an external Docker network that stacks can use for cross-host
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
"""
|
||||
@@ -534,7 +565,7 @@ def init_network(
|
||||
|
||||
if check_result.success:
|
||||
console.print(f"[cyan]\\[{host_name}][/] Network '{network}' already exists")
|
||||
return CommandResult(service=host_name, exit_code=0, success=True)
|
||||
return CommandResult(stack=host_name, exit_code=0, success=True)
|
||||
|
||||
# Create the network
|
||||
create_cmd = (
|
||||
|
||||
@@ -14,16 +14,17 @@ from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServicesArg,
|
||||
get_services,
|
||||
ServiceOption,
|
||||
StacksArg,
|
||||
get_stacks,
|
||||
load_config_or_exit,
|
||||
report_results,
|
||||
run_async,
|
||||
run_parallel_with_progress,
|
||||
)
|
||||
from compose_farm.console import console
|
||||
from compose_farm.executor import run_command, run_on_services
|
||||
from compose_farm.state import get_services_needing_migration, group_services_by_host, load_state
|
||||
from compose_farm.console import console, print_error
|
||||
from compose_farm.executor import run_command, run_on_stacks
|
||||
from compose_farm.state import get_stacks_needing_migration, group_stacks_by_host, load_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
@@ -51,7 +52,7 @@ def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
|
||||
def _build_host_table(
|
||||
cfg: Config,
|
||||
services_by_host: dict[str, list[str]],
|
||||
stacks_by_host: dict[str, list[str]],
|
||||
running_by_host: dict[str, list[str]],
|
||||
container_counts: dict[str, int],
|
||||
*,
|
||||
@@ -68,7 +69,7 @@ def _build_host_table(
|
||||
|
||||
for host_name in sorted(cfg.hosts.keys()):
|
||||
host = cfg.hosts[host_name]
|
||||
configured = len(services_by_host[host_name])
|
||||
configured = len(stacks_by_host[host_name])
|
||||
running = len(running_by_host[host_name])
|
||||
|
||||
row = [
|
||||
@@ -96,8 +97,8 @@ def _build_summary_table(
|
||||
table.add_column("Value", style="bold")
|
||||
|
||||
table.add_row("Total hosts", str(len(cfg.hosts)))
|
||||
table.add_row("Services (configured)", str(len(cfg.services)))
|
||||
table.add_row("Services (tracked)", str(len(state)))
|
||||
table.add_row("Stacks (configured)", str(len(cfg.stacks)))
|
||||
table.add_row("Stacks (tracked)", str(len(state)))
|
||||
table.add_row("Compose files on disk", str(len(on_disk)))
|
||||
|
||||
if pending:
|
||||
@@ -115,9 +116,10 @@ def _build_summary_table(
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def logs(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
|
||||
tail: Annotated[
|
||||
int | None,
|
||||
@@ -125,34 +127,45 @@ def logs(
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show service logs."""
|
||||
svc_list, cfg = get_services(services or [], all_services, config, host=host)
|
||||
"""Show stack logs. With --service, shows logs for just that service."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Default to fewer lines when showing multiple services
|
||||
many_services = all_services or host is not None or len(svc_list) > 1
|
||||
effective_tail = tail if tail is not None else (20 if many_services else 100)
|
||||
# Default to fewer lines when showing multiple stacks
|
||||
many_stacks = all_stacks or host is not None or len(stack_list) > 1
|
||||
effective_tail = tail if tail is not None else (20 if many_stacks else 100)
|
||||
cmd = f"logs --tail {effective_tail}"
|
||||
if follow:
|
||||
cmd += " -f"
|
||||
results = run_async(run_on_services(cfg, svc_list, cmd))
|
||||
if service:
|
||||
cmd += f" {service}"
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def ps(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of services.
|
||||
"""Show status of stacks.
|
||||
|
||||
Without arguments: shows all services (same as --all).
|
||||
With service names: shows only those services.
|
||||
With --host: shows services on that host.
|
||||
Without arguments: shows all stacks (same as --all).
|
||||
With stack names: shows only those stacks.
|
||||
With --host: shows stacks on that host.
|
||||
With --service: filters to a specific service within the stack.
|
||||
"""
|
||||
svc_list, cfg = get_services(services or [], all_services, config, host=host, default_all=True)
|
||||
results = run_async(run_on_services(cfg, svc_list, "ps"))
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host, default_all=True)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"ps {service}" if service else "ps"
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -164,25 +177,25 @@ def stats(
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show overview statistics for hosts and services.
|
||||
"""Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, services, pending migrations).
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
state = load_state(cfg)
|
||||
pending = get_services_needing_migration(cfg)
|
||||
pending = get_stacks_needing_migration(cfg)
|
||||
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
services_by_host = group_services_by_host(cfg.services, cfg.hosts, all_hosts)
|
||||
running_by_host = group_services_by_host(state, cfg.hosts, all_hosts)
|
||||
stacks_by_host = group_stacks_by_host(cfg.stacks, cfg.hosts, all_hosts)
|
||||
running_by_host = group_stacks_by_host(state, cfg.hosts, all_hosts)
|
||||
|
||||
container_counts: dict[str, int] = {}
|
||||
if live:
|
||||
container_counts = _get_container_counts(cfg)
|
||||
|
||||
host_table = _build_host_table(
|
||||
cfg, services_by_host, running_by_host, container_counts, show_containers=live
|
||||
cfg, stacks_by_host, running_by_host, container_counts, show_containers=live
|
||||
)
|
||||
console.print(host_table)
|
||||
|
||||
|
||||
@@ -179,13 +179,13 @@ def _parse_volume_item(
|
||||
return host_path
|
||||
|
||||
|
||||
def parse_host_volumes(config: Config, service: str) -> list[str]:
|
||||
"""Extract host bind mount paths from a service's compose file.
|
||||
def parse_host_volumes(config: Config, stack: str) -> list[str]:
|
||||
"""Extract host bind mount paths from a stack's compose file.
|
||||
|
||||
Returns a list of absolute host paths used as volume mounts.
|
||||
Skips named volumes and resolves relative paths.
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
@@ -216,12 +216,12 @@ def parse_host_volumes(config: Config, service: str) -> list[str]:
|
||||
return list(dict.fromkeys(paths))
|
||||
|
||||
|
||||
def parse_devices(config: Config, service: str) -> list[str]:
|
||||
"""Extract host device paths from a service's compose file.
|
||||
def parse_devices(config: Config, stack: str) -> list[str]:
|
||||
"""Extract host device paths from a stack's compose file.
|
||||
|
||||
Returns a list of host device paths (e.g., /dev/dri, /dev/dri/renderD128).
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
@@ -255,12 +255,12 @@ def parse_devices(config: Config, service: str) -> list[str]:
|
||||
return list(dict.fromkeys(devices))
|
||||
|
||||
|
||||
def parse_external_networks(config: Config, service: str) -> list[str]:
|
||||
"""Extract external network names from a service's compose file.
|
||||
def parse_external_networks(config: Config, stack: str) -> list[str]:
|
||||
"""Extract external network names from a stack's compose file.
|
||||
|
||||
Returns a list of network names marked as external: true.
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
@@ -336,3 +336,18 @@ def get_ports_for_service(
|
||||
if isinstance(ref_def, dict):
|
||||
return _parse_ports(ref_def.get("ports"), env)
|
||||
return _parse_ports(definition.get("ports"), env)
|
||||
|
||||
|
||||
def get_container_name(
|
||||
service_name: str,
|
||||
service_def: dict[str, Any] | None,
|
||||
project_name: str,
|
||||
) -> str:
|
||||
"""Get the container name for a service.
|
||||
|
||||
Uses container_name from compose if set, otherwise defaults to {project}-{service}-1.
|
||||
This matches Docker Compose's default naming convention.
|
||||
"""
|
||||
if isinstance(service_def, dict) and service_def.get("container_name"):
|
||||
return str(service_def["container_name"])
|
||||
return f"{project_name}-{service_name}-1"
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import getpass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
@@ -14,7 +15,7 @@ from .paths import config_search_paths, find_config_path
|
||||
COMPOSE_FILENAMES = ("compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml")
|
||||
|
||||
|
||||
class Host(BaseModel):
|
||||
class Host(BaseModel, extra="forbid"):
|
||||
"""SSH host configuration."""
|
||||
|
||||
address: str
|
||||
@@ -22,14 +23,14 @@ class Host(BaseModel):
|
||||
port: int = 22
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
class Config(BaseModel, extra="forbid"):
|
||||
"""Main configuration."""
|
||||
|
||||
compose_dir: Path = Path("/opt/compose")
|
||||
hosts: dict[str, Host]
|
||||
services: dict[str, str | list[str]] # service_name -> host_name or list of hosts
|
||||
stacks: dict[str, str | list[str]] # stack_name -> host_name or list of hosts
|
||||
traefik_file: Path | None = None # Auto-regenerate traefik config after up/down
|
||||
traefik_service: str | None = None # Service name for Traefik (skip its host in file-provider)
|
||||
traefik_stack: str | None = None # Stack name for Traefik (skip its host in file-provider)
|
||||
config_path: Path = Path() # Set by load_config()
|
||||
|
||||
def get_state_path(self) -> Path:
|
||||
@@ -37,70 +38,70 @@ class Config(BaseModel):
|
||||
return self.config_path.parent / "compose-farm-state.yaml"
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_hosts_and_services(self) -> Config:
|
||||
"""Validate host names and service configurations."""
|
||||
def validate_hosts_and_stacks(self) -> Config:
|
||||
"""Validate host names and stack configurations."""
|
||||
# "all" is reserved keyword, cannot be used as host name
|
||||
if "all" in self.hosts:
|
||||
msg = "'all' is a reserved keyword and cannot be used as a host name"
|
||||
raise ValueError(msg)
|
||||
|
||||
for service, host_value in self.services.items():
|
||||
for stack, host_value in self.stacks.items():
|
||||
# Validate list configurations
|
||||
if isinstance(host_value, list):
|
||||
if not host_value:
|
||||
msg = f"Service '{service}' has empty host list"
|
||||
msg = f"Stack '{stack}' has empty host list"
|
||||
raise ValueError(msg)
|
||||
if len(host_value) != len(set(host_value)):
|
||||
msg = f"Service '{service}' has duplicate hosts in list"
|
||||
msg = f"Stack '{stack}' has duplicate hosts in list"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Validate all referenced hosts exist
|
||||
host_names = self.get_hosts(service)
|
||||
host_names = self.get_hosts(stack)
|
||||
for host_name in host_names:
|
||||
if host_name not in self.hosts:
|
||||
msg = f"Service '{service}' references unknown host '{host_name}'"
|
||||
msg = f"Stack '{stack}' references unknown host '{host_name}'"
|
||||
raise ValueError(msg)
|
||||
return self
|
||||
|
||||
def get_hosts(self, service: str) -> list[str]:
|
||||
"""Get list of host names for a service.
|
||||
def get_hosts(self, stack: str) -> list[str]:
|
||||
"""Get list of host names for a stack.
|
||||
|
||||
Supports:
|
||||
- Single host: "truenas-debian" -> ["truenas-debian"]
|
||||
- All hosts: "all" -> list of all configured hosts
|
||||
- Explicit list: ["host1", "host2"] -> ["host1", "host2"]
|
||||
"""
|
||||
if service not in self.services:
|
||||
msg = f"Unknown service: {service}"
|
||||
if stack not in self.stacks:
|
||||
msg = f"Unknown stack: {stack}"
|
||||
raise ValueError(msg)
|
||||
host_value = self.services[service]
|
||||
host_value = self.stacks[stack]
|
||||
if isinstance(host_value, list):
|
||||
return host_value
|
||||
if host_value == "all":
|
||||
return list(self.hosts.keys())
|
||||
return [host_value]
|
||||
|
||||
def is_multi_host(self, service: str) -> bool:
|
||||
"""Check if a service runs on multiple hosts."""
|
||||
return len(self.get_hosts(service)) > 1
|
||||
def is_multi_host(self, stack: str) -> bool:
|
||||
"""Check if a stack runs on multiple hosts."""
|
||||
return len(self.get_hosts(stack)) > 1
|
||||
|
||||
def get_host(self, service: str) -> Host:
|
||||
"""Get host config for a service (first host if multi-host)."""
|
||||
if service not in self.services:
|
||||
msg = f"Unknown service: {service}"
|
||||
def get_host(self, stack: str) -> Host:
|
||||
"""Get host config for a stack (first host if multi-host)."""
|
||||
if stack not in self.stacks:
|
||||
msg = f"Unknown stack: {stack}"
|
||||
raise ValueError(msg)
|
||||
host_names = self.get_hosts(service)
|
||||
host_names = self.get_hosts(stack)
|
||||
return self.hosts[host_names[0]]
|
||||
|
||||
def get_compose_path(self, service: str) -> Path:
|
||||
"""Get compose file path for a service (tries compose.yaml first)."""
|
||||
service_dir = self.compose_dir / service
|
||||
def get_compose_path(self, stack: str) -> Path:
|
||||
"""Get compose file path for a stack (tries compose.yaml first)."""
|
||||
stack_dir = self.compose_dir / stack
|
||||
for filename in COMPOSE_FILENAMES:
|
||||
candidate = service_dir / filename
|
||||
candidate = stack_dir / filename
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
# Default to compose.yaml if none exist (will error later)
|
||||
return service_dir / "compose.yaml"
|
||||
return stack_dir / "compose.yaml"
|
||||
|
||||
def discover_compose_dirs(self) -> set[str]:
|
||||
"""Find all directories in compose_dir that contain a compose file."""
|
||||
@@ -113,7 +114,7 @@ class Config(BaseModel):
|
||||
return found
|
||||
|
||||
|
||||
def _parse_hosts(raw_hosts: dict[str, str | dict[str, str | int]]) -> dict[str, Host]:
|
||||
def _parse_hosts(raw_hosts: dict[str, Any]) -> dict[str, Host]:
|
||||
"""Parse hosts from config, handling both simple and full forms."""
|
||||
hosts = {}
|
||||
for name, value in raw_hosts.items():
|
||||
|
||||
@@ -9,7 +9,7 @@ err_console = Console(stderr=True, highlight=False)
|
||||
# --- Message Constants ---
|
||||
# Standardized message templates for consistent user-facing output
|
||||
|
||||
MSG_SERVICE_NOT_FOUND = "Service [cyan]{name}[/] not found in config"
|
||||
MSG_STACK_NOT_FOUND = "Stack [cyan]{name}[/] not found in config"
|
||||
MSG_HOST_NOT_FOUND = "Host [magenta]{name}[/] not found in config"
|
||||
MSG_CONFIG_NOT_FOUND = "Config file not found"
|
||||
MSG_DRY_RUN = "[dim](dry-run: no changes made)[/]"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Compose Farm configuration
|
||||
# Documentation: https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file configures compose-farm to manage Docker Compose services
|
||||
# This file configures compose-farm to manage Docker Compose stacks
|
||||
# across multiple hosts via SSH.
|
||||
#
|
||||
# Place this file at:
|
||||
@@ -11,7 +11,7 @@
|
||||
# - Or set CF_CONFIG environment variable
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# compose_dir: Directory containing service subdirectories with compose files
|
||||
# compose_dir: Directory containing stack subdirectories with compose files
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each subdirectory should contain a compose.yaml (or docker-compose.yml).
|
||||
# This path must be the same on all hosts (NFS mount recommended).
|
||||
@@ -48,28 +48,28 @@ hosts:
|
||||
port: 2222
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# services: Map service names to their target host(s)
|
||||
# stacks: Map stack names to their target host(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
# Each service name must match a subdirectory in compose_dir.
|
||||
# Each stack name must match a subdirectory in compose_dir.
|
||||
#
|
||||
# Single host:
|
||||
# service-name: hostname
|
||||
# stack-name: hostname
|
||||
#
|
||||
# Multiple hosts (explicit list):
|
||||
# service-name: [host1, host2]
|
||||
# stack-name: [host1, host2]
|
||||
#
|
||||
# All hosts:
|
||||
# service-name: all
|
||||
# stack-name: all
|
||||
#
|
||||
services:
|
||||
# Example: service runs on a single host
|
||||
stacks:
|
||||
# Example: stack runs on a single host
|
||||
nginx: server1
|
||||
postgres: server2
|
||||
|
||||
# Example: service runs on multiple specific hosts
|
||||
# Example: stack runs on multiple specific hosts
|
||||
# prometheus: [server1, server2]
|
||||
|
||||
# Example: service runs on ALL hosts (e.g., monitoring agents)
|
||||
# Example: stack runs on ALL hosts (e.g., monitoring agents)
|
||||
# node-exporter: all
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@@ -81,9 +81,9 @@ services:
|
||||
# traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# traefik_service: (optional) Service name running Traefik
|
||||
# traefik_stack: (optional) Stack name running Traefik
|
||||
# ------------------------------------------------------------------------------
|
||||
# When generating traefik_file, services on the same host as Traefik are
|
||||
# When generating traefik_file, stacks on the same host as Traefik are
|
||||
# skipped (they're handled by Traefik's Docker provider directly).
|
||||
#
|
||||
# traefik_service: traefik
|
||||
# traefik_stack: traefik
|
||||
|
||||
@@ -50,7 +50,7 @@ async def _stream_output_lines(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
"""Stream lines from a reader to console with a service prefix.
|
||||
"""Stream lines from a reader to console with a stack prefix.
|
||||
|
||||
Works with both asyncio.StreamReader (bytes) and asyncssh readers (str).
|
||||
If prefix is empty, output is printed without a prefix.
|
||||
@@ -126,7 +126,7 @@ def _get_local_ips() -> frozenset[str]:
|
||||
class CommandResult:
|
||||
"""Result of a command execution."""
|
||||
|
||||
service: str
|
||||
stack: str
|
||||
exit_code: int
|
||||
success: bool
|
||||
stdout: str = ""
|
||||
@@ -172,7 +172,7 @@ def ssh_connect_kwargs(host: Host) -> dict[str, Any]:
|
||||
|
||||
async def _run_local_command(
|
||||
command: str,
|
||||
service: str,
|
||||
stack: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
@@ -189,7 +189,7 @@ async def _run_local_command(
|
||||
)
|
||||
await proc.wait()
|
||||
return CommandResult(
|
||||
service=service,
|
||||
stack=stack,
|
||||
exit_code=proc.returncode or 0,
|
||||
success=proc.returncode == 0,
|
||||
)
|
||||
@@ -214,21 +214,21 @@ async def _run_local_command(
|
||||
await proc.wait()
|
||||
|
||||
return CommandResult(
|
||||
service=service,
|
||||
stack=stack,
|
||||
exit_code=proc.returncode or 0,
|
||||
success=proc.returncode == 0,
|
||||
stdout=stdout_data.decode() if stdout_data else "",
|
||||
stderr=stderr_data.decode() if stderr_data else "",
|
||||
)
|
||||
except OSError as e:
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
err_console.print(f"[cyan]\\[{stack}][/] [red]Local error:[/] {e}")
|
||||
return CommandResult(stack=stack, exit_code=1, success=False)
|
||||
|
||||
|
||||
async def _run_ssh_command(
|
||||
host: Host,
|
||||
command: str,
|
||||
service: str,
|
||||
stack: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
@@ -238,11 +238,15 @@ async def _run_ssh_command(
|
||||
if raw:
|
||||
# Use native ssh with TTY for proper progress bar rendering
|
||||
ssh_args = build_ssh_command(host, command, tty=True)
|
||||
|
||||
def run_ssh() -> subprocess.CompletedProcess[bytes]:
|
||||
return subprocess.run(ssh_args, check=False, env=get_ssh_env())
|
||||
|
||||
# Run in thread to avoid blocking the event loop
|
||||
# Use get_ssh_env() to auto-detect SSH agent socket
|
||||
result = await asyncio.to_thread(subprocess.run, ssh_args, check=False, env=get_ssh_env())
|
||||
result = await asyncio.to_thread(run_ssh)
|
||||
return CommandResult(
|
||||
service=service,
|
||||
stack=stack,
|
||||
exit_code=result.returncode,
|
||||
success=result.returncode == 0,
|
||||
)
|
||||
@@ -267,21 +271,21 @@ async def _run_ssh_command(
|
||||
|
||||
await proc.wait()
|
||||
return CommandResult(
|
||||
service=service,
|
||||
stack=stack,
|
||||
exit_code=proc.exit_status or 0,
|
||||
success=proc.exit_status == 0,
|
||||
stdout=stdout_data,
|
||||
stderr=stderr_data,
|
||||
)
|
||||
except (OSError, asyncssh.Error) as e:
|
||||
err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
err_console.print(f"[cyan]\\[{stack}][/] [red]SSH error:[/] {e}")
|
||||
return CommandResult(stack=stack, exit_code=1, success=False)
|
||||
|
||||
|
||||
async def run_command(
|
||||
host: Host,
|
||||
command: str,
|
||||
service: str,
|
||||
stack: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
@@ -292,45 +296,45 @@ async def run_command(
|
||||
Args:
|
||||
host: Host configuration
|
||||
command: Command to run
|
||||
service: Service name (stored in result)
|
||||
stack: Stack name (stored in result)
|
||||
stream: Whether to stream output (default True)
|
||||
raw: Whether to use raw mode with TTY (default False)
|
||||
prefix: Output prefix. None=use service name, ""=no prefix.
|
||||
prefix: Output prefix. None=use stack name, ""=no prefix.
|
||||
|
||||
"""
|
||||
output_prefix = service if prefix is None else prefix
|
||||
output_prefix = stack if prefix is None else prefix
|
||||
if is_local(host):
|
||||
return await _run_local_command(
|
||||
command, service, stream=stream, raw=raw, prefix=output_prefix
|
||||
command, stack, stream=stream, raw=raw, prefix=output_prefix
|
||||
)
|
||||
return await _run_ssh_command(
|
||||
host, command, service, stream=stream, raw=raw, prefix=output_prefix
|
||||
host, command, stack, stream=stream, raw=raw, prefix=output_prefix
|
||||
)
|
||||
|
||||
|
||||
async def run_compose(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
compose_cmd: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
prefix: str | None = None,
|
||||
) -> CommandResult:
|
||||
"""Run a docker compose command for a service."""
|
||||
host_name = config.get_hosts(service)[0]
|
||||
"""Run a docker compose command for a stack."""
|
||||
host_name = config.get_hosts(stack)[0]
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
return await run_command(host, command, service, stream=stream, raw=raw, prefix=prefix)
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
async def run_compose_on_host(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
host_name: str,
|
||||
compose_cmd: str,
|
||||
*,
|
||||
@@ -338,68 +342,68 @@ async def run_compose_on_host(
|
||||
raw: bool = False,
|
||||
prefix: str | None = None,
|
||||
) -> CommandResult:
|
||||
"""Run a docker compose command for a service on a specific host.
|
||||
"""Run a docker compose command for a stack on a specific host.
|
||||
|
||||
Used for migration - running 'down' on the old host before 'up' on new host.
|
||||
"""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
return await run_command(host, command, service, stream=stream, raw=raw, prefix=prefix)
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
async def run_on_services(
|
||||
async def run_on_stacks(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
compose_cmd: str,
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Run a docker compose command on multiple services in parallel.
|
||||
"""Run a docker compose command on multiple stacks in parallel.
|
||||
|
||||
For multi-host services, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-service operations.
|
||||
For multi-host stacks, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-stack operations.
|
||||
"""
|
||||
return await run_sequential_on_services(config, services, [compose_cmd], stream=stream, raw=raw)
|
||||
return await run_sequential_on_stacks(config, stacks, [compose_cmd], stream=stream, raw=raw)
|
||||
|
||||
|
||||
async def _run_sequential_commands(
|
||||
async def _run_sequential_stack_commands(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
commands: list[str],
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
prefix: str | None = None,
|
||||
) -> CommandResult:
|
||||
"""Run multiple compose commands sequentially for a service."""
|
||||
"""Run multiple compose commands sequentially for a stack."""
|
||||
for cmd in commands:
|
||||
result = await run_compose(config, service, cmd, stream=stream, raw=raw, prefix=prefix)
|
||||
result = await run_compose(config, stack, cmd, stream=stream, raw=raw, prefix=prefix)
|
||||
if not result.success:
|
||||
return result
|
||||
return CommandResult(service=service, exit_code=0, success=True)
|
||||
return CommandResult(stack=stack, exit_code=0, success=True)
|
||||
|
||||
|
||||
async def _run_sequential_commands_multi_host(
|
||||
async def _run_sequential_stack_commands_multi_host(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
commands: list[str],
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
prefix: str | None = None,
|
||||
) -> list[CommandResult]:
|
||||
"""Run multiple compose commands sequentially for a multi-host service.
|
||||
"""Run multiple compose commands sequentially for a multi-host stack.
|
||||
|
||||
Commands are run sequentially, but each command runs on all hosts in parallel.
|
||||
For multi-host services, prefix defaults to service@host format.
|
||||
For multi-host stacks, prefix defaults to stack@host format.
|
||||
"""
|
||||
host_names = config.get_hosts(service)
|
||||
compose_path = config.get_compose_path(service)
|
||||
host_names = config.get_hosts(stack)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
final_results: list[CommandResult] = []
|
||||
|
||||
for cmd in commands:
|
||||
@@ -408,10 +412,10 @@ async def _run_sequential_commands_multi_host(
|
||||
for host_name in host_names:
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), cmd)
|
||||
host = config.hosts[host_name]
|
||||
# For multi-host services, always use service@host prefix to distinguish output
|
||||
label = f"{service}@{host_name}" if len(host_names) > 1 else service
|
||||
# Multi-host services always need prefixes to distinguish output from different hosts
|
||||
# (ignore empty prefix from single-service batches - we still need to distinguish hosts)
|
||||
# For multi-host stacks, always use stack@host prefix to distinguish output
|
||||
label = f"{stack}@{host_name}" if len(host_names) > 1 else stack
|
||||
# Multi-host stacks always need prefixes to distinguish output from different hosts
|
||||
# (ignore empty prefix from single-stack batches - we still need to distinguish hosts)
|
||||
effective_prefix = label if len(host_names) > 1 else prefix
|
||||
tasks.append(
|
||||
run_command(host, command, label, stream=stream, raw=raw, prefix=effective_prefix)
|
||||
@@ -427,37 +431,37 @@ async def _run_sequential_commands_multi_host(
|
||||
return final_results
|
||||
|
||||
|
||||
async def run_sequential_on_services(
|
||||
async def run_sequential_on_stacks(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
commands: list[str],
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Run sequential commands on multiple services in parallel.
|
||||
"""Run sequential commands on multiple stacks in parallel.
|
||||
|
||||
For multi-host services, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-service operations.
|
||||
For multi-host stacks, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-stack operations.
|
||||
"""
|
||||
# Skip prefix for single-service operations (command line already shows context)
|
||||
prefix: str | None = "" if len(services) == 1 else None
|
||||
# Skip prefix for single-stack operations (command line already shows context)
|
||||
prefix: str | None = "" if len(stacks) == 1 else None
|
||||
|
||||
# Separate multi-host and single-host services for type-safe gathering
|
||||
# Separate multi-host and single-host stacks for type-safe gathering
|
||||
multi_host_tasks = []
|
||||
single_host_tasks = []
|
||||
|
||||
for service in services:
|
||||
if config.is_multi_host(service):
|
||||
for stack in stacks:
|
||||
if config.is_multi_host(stack):
|
||||
multi_host_tasks.append(
|
||||
_run_sequential_commands_multi_host(
|
||||
config, service, commands, stream=stream, raw=raw, prefix=prefix
|
||||
_run_sequential_stack_commands_multi_host(
|
||||
config, stack, commands, stream=stream, raw=raw, prefix=prefix
|
||||
)
|
||||
)
|
||||
else:
|
||||
single_host_tasks.append(
|
||||
_run_sequential_commands(
|
||||
config, service, commands, stream=stream, raw=raw, prefix=prefix
|
||||
_run_sequential_stack_commands(
|
||||
config, stack, commands, stream=stream, raw=raw, prefix=prefix
|
||||
)
|
||||
)
|
||||
|
||||
@@ -476,18 +480,18 @@ async def run_sequential_on_services(
|
||||
return flat_results
|
||||
|
||||
|
||||
async def check_service_running(
|
||||
async def check_stack_running(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
host_name: str,
|
||||
) -> bool:
|
||||
"""Check if a service has running containers on a specific host."""
|
||||
"""Check if a stack has running containers on a specific host."""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
|
||||
# Use ps --status running to check for running containers
|
||||
command = f"docker compose -f {compose_path} ps --status running -q"
|
||||
result = await run_command(host, command, service, stream=False)
|
||||
result = await run_command(host, command, stack, stream=False)
|
||||
|
||||
# If command succeeded and has output, containers are running
|
||||
return result.success and bool(result.stdout.strip())
|
||||
|
||||
@@ -25,9 +25,9 @@ _DIGEST_HEX_LENGTH = 64
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SnapshotEntry:
|
||||
"""Normalized image snapshot for a single service."""
|
||||
"""Normalized image snapshot for a single stack."""
|
||||
|
||||
service: str
|
||||
stack: str
|
||||
host: str
|
||||
compose_file: Path
|
||||
image: str
|
||||
@@ -37,7 +37,7 @@ class SnapshotEntry:
|
||||
def as_dict(self, first_seen: str, last_seen: str) -> dict[str, str]:
|
||||
"""Render snapshot as a TOML-friendly dict."""
|
||||
return {
|
||||
"service": self.service,
|
||||
"stack": self.stack,
|
||||
"host": self.host,
|
||||
"compose_file": str(self.compose_file),
|
||||
"image": self.image,
|
||||
@@ -103,24 +103,24 @@ def _extract_image_fields(record: dict[str, Any]) -> tuple[str, str]:
|
||||
return image, digest
|
||||
|
||||
|
||||
async def collect_service_entries(
|
||||
async def collect_stack_entries(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
*,
|
||||
now: datetime,
|
||||
run_compose_fn: Callable[..., Awaitable[CommandResult]] = run_compose,
|
||||
) -> list[SnapshotEntry]:
|
||||
"""Run `docker compose images` for a service and normalize results."""
|
||||
result = await run_compose_fn(config, service, "images --format json", stream=False)
|
||||
"""Run `docker compose images` for a stack and normalize results."""
|
||||
result = await run_compose_fn(config, stack, "images --format json", stream=False)
|
||||
if not result.success:
|
||||
msg = result.stderr or f"compose images exited with {result.exit_code}"
|
||||
error = f"[{service}] Unable to read images: {msg}"
|
||||
error = f"[{stack}] Unable to read images: {msg}"
|
||||
raise RuntimeError(error)
|
||||
|
||||
records = _parse_images_output(result.stdout)
|
||||
# Use first host for snapshots (multi-host services use same images on all hosts)
|
||||
host_name = config.get_hosts(service)[0]
|
||||
compose_path = config.get_compose_path(service)
|
||||
# Use first host for snapshots (multi-host stacks use same images on all hosts)
|
||||
host_name = config.get_hosts(stack)[0]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
|
||||
entries: list[SnapshotEntry] = []
|
||||
for record in records:
|
||||
@@ -129,7 +129,7 @@ async def collect_service_entries(
|
||||
continue
|
||||
entries.append(
|
||||
SnapshotEntry(
|
||||
service=service,
|
||||
stack=stack,
|
||||
host=host_name,
|
||||
compose_file=compose_path,
|
||||
image=image,
|
||||
@@ -145,7 +145,14 @@ def load_existing_entries(log_path: Path) -> list[dict[str, str]]:
|
||||
if not log_path.exists():
|
||||
return []
|
||||
data = tomllib.loads(log_path.read_text())
|
||||
return list(data.get("entries", []))
|
||||
entries = list(data.get("entries", []))
|
||||
normalized: list[dict[str, str]] = []
|
||||
for entry in entries:
|
||||
normalized_entry = dict(entry)
|
||||
if "stack" not in normalized_entry and "service" in normalized_entry:
|
||||
normalized_entry["stack"] = normalized_entry.pop("service")
|
||||
normalized.append(normalized_entry)
|
||||
return normalized
|
||||
|
||||
|
||||
def merge_entries(
|
||||
@@ -156,11 +163,11 @@ def merge_entries(
|
||||
) -> list[dict[str, str]]:
|
||||
"""Merge new snapshot entries with existing ones, preserving first_seen timestamps."""
|
||||
merged: dict[tuple[str, str, str], dict[str, str]] = {
|
||||
(e["service"], e["host"], e["digest"]): dict(e) for e in existing
|
||||
(e["stack"], e["host"], e["digest"]): dict(e) for e in existing
|
||||
}
|
||||
|
||||
for entry in new_entries:
|
||||
key = (entry.service, entry.host, entry.digest)
|
||||
key = (entry.stack, entry.host, entry.digest)
|
||||
first_seen = merged.get(key, {}).get("first_seen", now_iso)
|
||||
merged[key] = entry.as_dict(first_seen, now_iso)
|
||||
|
||||
@@ -175,10 +182,10 @@ def write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str,
|
||||
if entries:
|
||||
lines.append("")
|
||||
|
||||
for entry in sorted(entries, key=lambda e: (e["service"], e["host"], e["digest"])):
|
||||
for entry in sorted(entries, key=lambda e: (e["stack"], e["host"], e["digest"])):
|
||||
lines.append("[[entries]]")
|
||||
for field in [
|
||||
"service",
|
||||
"stack",
|
||||
"host",
|
||||
"compose_file",
|
||||
"image",
|
||||
|
||||
@@ -15,17 +15,17 @@ from .executor import (
|
||||
CommandResult,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
check_service_running,
|
||||
check_stack_running,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_compose_on_host,
|
||||
)
|
||||
from .state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
remove_service,
|
||||
set_multi_host_service,
|
||||
set_service_host,
|
||||
get_orphaned_stacks,
|
||||
get_stack_host,
|
||||
remove_stack,
|
||||
set_multi_host_stack,
|
||||
set_stack_host,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -37,7 +37,7 @@ class OperationInterruptedError(Exception):
|
||||
|
||||
|
||||
class PreflightResult(NamedTuple):
|
||||
"""Result of pre-flight checks for a service on a host."""
|
||||
"""Result of pre-flight checks for a stack on a host."""
|
||||
|
||||
missing_paths: list[str]
|
||||
missing_networks: list[str]
|
||||
@@ -51,7 +51,7 @@ class PreflightResult(NamedTuple):
|
||||
|
||||
async def _run_compose_step(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
command: str,
|
||||
*,
|
||||
raw: bool,
|
||||
@@ -59,9 +59,9 @@ async def _run_compose_step(
|
||||
) -> CommandResult:
|
||||
"""Run a compose command, handle raw output newline, and check for interrupts."""
|
||||
if host:
|
||||
result = await run_compose_on_host(cfg, service, host, command, raw=raw)
|
||||
result = await run_compose_on_host(cfg, stack, host, command, raw=raw)
|
||||
else:
|
||||
result = await run_compose(cfg, service, command, raw=raw)
|
||||
result = await run_compose(cfg, stack, command, raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
if result.interrupted:
|
||||
@@ -69,63 +69,61 @@ async def _run_compose_step(
|
||||
return result
|
||||
|
||||
|
||||
def get_service_paths(cfg: Config, service: str) -> list[str]:
|
||||
"""Get all required paths for a service (compose_dir + volumes)."""
|
||||
def get_stack_paths(cfg: Config, stack: str) -> list[str]:
|
||||
"""Get all required paths for a stack (compose_dir + volumes)."""
|
||||
paths = [str(cfg.compose_dir)]
|
||||
paths.extend(parse_host_volumes(cfg, service))
|
||||
paths.extend(parse_host_volumes(cfg, stack))
|
||||
return paths
|
||||
|
||||
|
||||
async def discover_service_host(cfg: Config, service: str) -> tuple[str, str | list[str] | None]:
|
||||
"""Discover where a service is running.
|
||||
async def discover_stack_host(cfg: Config, stack: str) -> tuple[str, str | list[str] | None]:
|
||||
"""Discover where a stack is running.
|
||||
|
||||
For multi-host services, checks all assigned hosts in parallel.
|
||||
For multi-host stacks, checks all assigned hosts in parallel.
|
||||
For single-host, checks assigned host first, then others.
|
||||
|
||||
Returns (service_name, host_or_hosts_or_none).
|
||||
Returns (stack_name, host_or_hosts_or_none).
|
||||
"""
|
||||
assigned_hosts = cfg.get_hosts(service)
|
||||
assigned_hosts = cfg.get_hosts(stack)
|
||||
|
||||
if cfg.is_multi_host(service):
|
||||
if cfg.is_multi_host(stack):
|
||||
# Check all assigned hosts in parallel
|
||||
checks = await asyncio.gather(
|
||||
*[check_service_running(cfg, service, h) for h in assigned_hosts]
|
||||
)
|
||||
checks = await asyncio.gather(*[check_stack_running(cfg, stack, h) for h in assigned_hosts])
|
||||
running = [h for h, is_running in zip(assigned_hosts, checks, strict=True) if is_running]
|
||||
return service, running if running else None
|
||||
return stack, running if running else None
|
||||
|
||||
# Single-host: check assigned host first, then others
|
||||
if await check_service_running(cfg, service, assigned_hosts[0]):
|
||||
return service, assigned_hosts[0]
|
||||
if await check_stack_running(cfg, stack, assigned_hosts[0]):
|
||||
return stack, assigned_hosts[0]
|
||||
for host in cfg.hosts:
|
||||
if host != assigned_hosts[0] and await check_service_running(cfg, service, host):
|
||||
return service, host
|
||||
return service, None
|
||||
if host != assigned_hosts[0] and await check_stack_running(cfg, stack, host):
|
||||
return stack, host
|
||||
return stack, None
|
||||
|
||||
|
||||
async def check_service_requirements(
|
||||
async def check_stack_requirements(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
host_name: str,
|
||||
) -> PreflightResult:
|
||||
"""Check if a service can run on a specific host.
|
||||
"""Check if a stack can run on a specific host.
|
||||
|
||||
Verifies that all required paths (volumes), networks, and devices exist.
|
||||
"""
|
||||
# Check mount paths
|
||||
paths = get_service_paths(cfg, service)
|
||||
paths = get_stack_paths(cfg, stack)
|
||||
path_exists = await check_paths_exist(cfg, host_name, paths)
|
||||
missing_paths = [p for p, found in path_exists.items() if not found]
|
||||
|
||||
# Check external networks
|
||||
networks = parse_external_networks(cfg, service)
|
||||
networks = parse_external_networks(cfg, stack)
|
||||
missing_networks: list[str] = []
|
||||
if networks:
|
||||
net_exists = await check_networks_exist(cfg, host_name, networks)
|
||||
missing_networks = [n for n, found in net_exists.items() if not found]
|
||||
|
||||
# Check devices
|
||||
devices = parse_devices(cfg, service)
|
||||
devices = parse_devices(cfg, stack)
|
||||
missing_devices: list[str] = []
|
||||
if devices:
|
||||
dev_exists = await check_paths_exist(cfg, host_name, devices)
|
||||
@@ -136,7 +134,7 @@ async def check_service_requirements(
|
||||
|
||||
async def _cleanup_and_rollback(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
target_host: str,
|
||||
current_host: str,
|
||||
prefix: str,
|
||||
@@ -146,29 +144,29 @@ async def _cleanup_and_rollback(
|
||||
) -> None:
|
||||
"""Clean up failed start and attempt rollback to old host if it was running."""
|
||||
print_warning(f"{prefix} Cleaning up failed start on [magenta]{target_host}[/]")
|
||||
await run_compose(cfg, service, "down", raw=raw)
|
||||
await run_compose(cfg, stack, "down", raw=raw)
|
||||
|
||||
if not was_running:
|
||||
err_console.print(
|
||||
f"{prefix} [dim]Service was not running on [magenta]{current_host}[/], skipping rollback[/]"
|
||||
f"{prefix} [dim]Stack was not running on [magenta]{current_host}[/], skipping rollback[/]"
|
||||
)
|
||||
return
|
||||
|
||||
print_warning(f"{prefix} Rolling back to [magenta]{current_host}[/]...")
|
||||
rollback_result = await run_compose_on_host(cfg, service, current_host, "up -d", raw=raw)
|
||||
rollback_result = await run_compose_on_host(cfg, stack, current_host, "up -d", raw=raw)
|
||||
if rollback_result.success:
|
||||
print_success(f"{prefix} Rollback succeeded on [magenta]{current_host}[/]")
|
||||
else:
|
||||
print_error(f"{prefix} Rollback failed - service is down")
|
||||
print_error(f"{prefix} Rollback failed - stack is down")
|
||||
|
||||
|
||||
def _report_preflight_failures(
|
||||
service: str,
|
||||
stack: str,
|
||||
target_host: str,
|
||||
preflight: PreflightResult,
|
||||
) -> None:
|
||||
"""Report pre-flight check failures."""
|
||||
print_error(f"[cyan]\\[{service}][/] Cannot start on [magenta]{target_host}[/]:")
|
||||
print_error(f"[cyan]\\[{stack}][/] Cannot start on [magenta]{target_host}[/]:")
|
||||
for path in preflight.missing_paths:
|
||||
print_error(f" missing path: {path}")
|
||||
for net in preflight.missing_networks:
|
||||
@@ -179,27 +177,25 @@ def _report_preflight_failures(
|
||||
print_error(f" missing device: {dev}")
|
||||
|
||||
|
||||
async def _up_multi_host_service(
|
||||
async def _up_multi_host_stack(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start a multi-host service on all configured hosts."""
|
||||
host_names = cfg.get_hosts(service)
|
||||
"""Start a multi-host stack on all configured hosts."""
|
||||
host_names = cfg.get_hosts(stack)
|
||||
results: list[CommandResult] = []
|
||||
compose_path = cfg.get_compose_path(service)
|
||||
compose_path = cfg.get_compose_path(stack)
|
||||
command = f"docker compose -f {compose_path} up -d"
|
||||
|
||||
# Pre-flight checks on all hosts
|
||||
for host_name in host_names:
|
||||
preflight = await check_service_requirements(cfg, service, host_name)
|
||||
preflight = await check_stack_requirements(cfg, stack, host_name)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(service, host_name, preflight)
|
||||
results.append(
|
||||
CommandResult(service=f"{service}@{host_name}", exit_code=1, success=False)
|
||||
)
|
||||
_report_preflight_failures(stack, host_name, preflight)
|
||||
results.append(CommandResult(stack=f"{stack}@{host_name}", exit_code=1, success=False))
|
||||
return results
|
||||
|
||||
# Start on all hosts
|
||||
@@ -209,7 +205,7 @@ async def _up_multi_host_service(
|
||||
succeeded_hosts: list[str] = []
|
||||
for host_name in host_names:
|
||||
host = cfg.hosts[host_name]
|
||||
label = f"{service}@{host_name}"
|
||||
label = f"{stack}@{host_name}"
|
||||
result = await run_command(host, command, label, stream=not raw, raw=raw)
|
||||
if raw:
|
||||
print() # Ensure newline after raw output
|
||||
@@ -219,72 +215,70 @@ async def _up_multi_host_service(
|
||||
|
||||
# Update state with hosts that succeeded (partial success is tracked)
|
||||
if succeeded_hosts:
|
||||
set_multi_host_service(cfg, service, succeeded_hosts)
|
||||
set_multi_host_stack(cfg, stack, succeeded_hosts)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def _migrate_service(
|
||||
async def _migrate_stack(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
current_host: str,
|
||||
target_host: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> CommandResult | None:
|
||||
"""Migrate a service from current_host to target_host.
|
||||
"""Migrate a stack from current_host to target_host.
|
||||
|
||||
Pre-pulls/builds images on target, then stops service on current host.
|
||||
Pre-pulls/builds images on target, then stops stack on current host.
|
||||
Returns failure result if migration prep fails, None on success.
|
||||
"""
|
||||
console.print(
|
||||
f"{prefix} Migrating from [magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
)
|
||||
|
||||
# Prepare images on target host before stopping old service to minimize downtime.
|
||||
# Pull handles image-based services; build handles Dockerfile-based services.
|
||||
# Prepare images on target host before stopping old stack to minimize downtime.
|
||||
# Pull handles image-based compose services; build handles Dockerfile-based ones.
|
||||
# --ignore-buildable makes pull skip images that have build: defined.
|
||||
for cmd, label in [("pull --ignore-buildable", "Pull"), ("build", "Build")]:
|
||||
result = await _run_compose_step(cfg, service, cmd, raw=raw)
|
||||
result = await _run_compose_step(cfg, stack, cmd, raw=raw)
|
||||
if not result.success:
|
||||
print_error(
|
||||
f"{prefix} {label} failed on [magenta]{target_host}[/], "
|
||||
"leaving service on current host"
|
||||
"leaving stack on current host"
|
||||
)
|
||||
return result
|
||||
|
||||
# Stop on current host
|
||||
down_result = await _run_compose_step(cfg, service, "down", raw=raw, host=current_host)
|
||||
down_result = await _run_compose_step(cfg, stack, "down", raw=raw, host=current_host)
|
||||
return down_result if not down_result.success else None
|
||||
|
||||
|
||||
async def _up_single_service(
|
||||
async def _up_single_stack(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host service with migration support."""
|
||||
target_host = cfg.get_hosts(service)[0]
|
||||
current_host = get_service_host(cfg, service)
|
||||
"""Start a single-host stack with migration support."""
|
||||
target_host = cfg.get_hosts(stack)[0]
|
||||
current_host = get_stack_host(cfg, stack)
|
||||
|
||||
# Pre-flight check: verify paths, networks, and devices exist on target
|
||||
preflight = await check_service_requirements(cfg, service, target_host)
|
||||
preflight = await check_stack_requirements(cfg, stack, target_host)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(service, target_host, preflight)
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
_report_preflight_failures(stack, target_host, preflight)
|
||||
return CommandResult(stack=stack, exit_code=1, success=False)
|
||||
|
||||
# If service is deployed elsewhere, migrate it
|
||||
# If stack is deployed elsewhere, migrate it
|
||||
did_migration = False
|
||||
was_running = False
|
||||
if current_host and current_host != target_host:
|
||||
if current_host in cfg.hosts:
|
||||
was_running = await check_service_running(cfg, service, current_host)
|
||||
failure = await _migrate_service(
|
||||
cfg, service, current_host, target_host, prefix, raw=raw
|
||||
)
|
||||
was_running = await check_stack_running(cfg, stack, current_host)
|
||||
failure = await _migrate_stack(cfg, stack, current_host, target_host, prefix, raw=raw)
|
||||
if failure:
|
||||
return failure
|
||||
did_migration = True
|
||||
@@ -295,15 +289,15 @@ async def _up_single_service(
|
||||
|
||||
# Start on target host
|
||||
console.print(f"{prefix} Starting on [magenta]{target_host}[/]...")
|
||||
up_result = await _run_compose_step(cfg, service, "up -d", raw=raw)
|
||||
up_result = await _run_compose_step(cfg, stack, "up -d", raw=raw)
|
||||
|
||||
# Update state on success, or rollback on failure
|
||||
if up_result.success:
|
||||
set_service_host(cfg, service, target_host)
|
||||
set_stack_host(cfg, stack, target_host)
|
||||
elif did_migration and current_host:
|
||||
await _cleanup_and_rollback(
|
||||
cfg,
|
||||
service,
|
||||
stack,
|
||||
target_host,
|
||||
current_host,
|
||||
prefix,
|
||||
@@ -314,24 +308,24 @@ async def _up_single_service(
|
||||
return up_result
|
||||
|
||||
|
||||
async def up_services(
|
||||
async def up_stacks(
|
||||
cfg: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
*,
|
||||
raw: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start services with automatic migration if host changed."""
|
||||
"""Start stacks with automatic migration if host changed."""
|
||||
results: list[CommandResult] = []
|
||||
total = len(services)
|
||||
total = len(stacks)
|
||||
|
||||
try:
|
||||
for idx, service in enumerate(services, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{service}][/]"
|
||||
for idx, stack in enumerate(stacks, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{stack}][/]"
|
||||
|
||||
if cfg.is_multi_host(service):
|
||||
results.extend(await _up_multi_host_service(cfg, service, prefix, raw=raw))
|
||||
if cfg.is_multi_host(stack):
|
||||
results.extend(await _up_multi_host_stack(cfg, stack, prefix, raw=raw))
|
||||
else:
|
||||
results.append(await _up_single_service(cfg, service, prefix, raw=raw))
|
||||
results.append(await _up_single_stack(cfg, stack, prefix, raw=raw))
|
||||
except OperationInterruptedError:
|
||||
raise KeyboardInterrupt from None
|
||||
|
||||
@@ -340,22 +334,22 @@ async def up_services(
|
||||
|
||||
async def check_host_compatibility(
|
||||
cfg: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
) -> dict[str, tuple[int, int, list[str]]]:
|
||||
"""Check which hosts can run a service based on paths, networks, and devices.
|
||||
"""Check which hosts can run a stack based on paths, networks, and devices.
|
||||
|
||||
Returns dict of host_name -> (found_count, total_count, missing_items).
|
||||
"""
|
||||
# Get total requirements count
|
||||
paths = get_service_paths(cfg, service)
|
||||
networks = parse_external_networks(cfg, service)
|
||||
devices = parse_devices(cfg, service)
|
||||
paths = get_stack_paths(cfg, stack)
|
||||
networks = parse_external_networks(cfg, stack)
|
||||
devices = parse_devices(cfg, stack)
|
||||
total = len(paths) + len(networks) + len(devices)
|
||||
|
||||
results: dict[str, tuple[int, int, list[str]]] = {}
|
||||
|
||||
for host_name in cfg.hosts:
|
||||
preflight = await check_service_requirements(cfg, service, host_name)
|
||||
preflight = await check_stack_requirements(cfg, stack, host_name)
|
||||
all_missing = (
|
||||
preflight.missing_paths + preflight.missing_networks + preflight.missing_devices
|
||||
)
|
||||
@@ -365,70 +359,68 @@ async def check_host_compatibility(
|
||||
return results
|
||||
|
||||
|
||||
async def stop_orphaned_services(cfg: Config) -> list[CommandResult]:
|
||||
"""Stop orphaned services (in state but not in config).
|
||||
async def stop_orphaned_stacks(cfg: Config) -> list[CommandResult]:
|
||||
"""Stop orphaned stacks (in state but not in config).
|
||||
|
||||
Runs docker compose down on each service on its tracked host(s).
|
||||
Runs docker compose down on each stack on its tracked host(s).
|
||||
Only removes from state on successful stop.
|
||||
|
||||
Returns list of CommandResults for each service@host.
|
||||
Returns list of CommandResults for each stack@host.
|
||||
"""
|
||||
orphaned = get_orphaned_services(cfg)
|
||||
orphaned = get_orphaned_stacks(cfg)
|
||||
if not orphaned:
|
||||
return []
|
||||
|
||||
results: list[CommandResult] = []
|
||||
tasks: list[tuple[str, str, asyncio.Task[CommandResult]]] = []
|
||||
|
||||
# Build list of (service, host, task) for all orphaned services
|
||||
for service, hosts in orphaned.items():
|
||||
# Build list of (stack, host, task) for all orphaned stacks
|
||||
for stack, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
for host in host_list:
|
||||
# Skip hosts no longer in config
|
||||
if host not in cfg.hosts:
|
||||
print_warning(f"{service}@{host}: host no longer in config, skipping")
|
||||
print_warning(f"{stack}@{host}: host no longer in config, skipping")
|
||||
results.append(
|
||||
CommandResult(
|
||||
service=f"{service}@{host}",
|
||||
stack=f"{stack}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr="host no longer in config",
|
||||
)
|
||||
)
|
||||
continue
|
||||
coro = run_compose_on_host(cfg, service, host, "down")
|
||||
tasks.append((service, host, asyncio.create_task(coro)))
|
||||
coro = run_compose_on_host(cfg, stack, host, "down")
|
||||
tasks.append((stack, host, asyncio.create_task(coro)))
|
||||
|
||||
# Run all down commands in parallel
|
||||
if tasks:
|
||||
for service, host, task in tasks:
|
||||
for stack, host, task in tasks:
|
||||
try:
|
||||
result = await task
|
||||
results.append(result)
|
||||
if result.success:
|
||||
print_success(f"{service}@{host}: stopped")
|
||||
print_success(f"{stack}@{host}: stopped")
|
||||
else:
|
||||
print_error(f"{service}@{host}: {result.stderr or 'failed'}")
|
||||
print_error(f"{stack}@{host}: {result.stderr or 'failed'}")
|
||||
except Exception as e:
|
||||
print_error(f"{service}@{host}: {e}")
|
||||
print_error(f"{stack}@{host}: {e}")
|
||||
results.append(
|
||||
CommandResult(
|
||||
service=f"{service}@{host}",
|
||||
stack=f"{stack}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr=str(e),
|
||||
)
|
||||
)
|
||||
|
||||
# Remove from state only for services where ALL hosts succeeded
|
||||
for service, hosts in orphaned.items():
|
||||
# Remove from state only for stacks where ALL hosts succeeded
|
||||
for stack, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
all_succeeded = all(
|
||||
r.success
|
||||
for r in results
|
||||
if r.service.startswith(f"{service}@") or r.service == service
|
||||
r.success for r in results if r.stack.startswith(f"{stack}@") or r.stack == stack
|
||||
)
|
||||
if all_succeeded:
|
||||
remove_service(cfg, service)
|
||||
remove_stack(cfg, stack)
|
||||
|
||||
return results
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""State tracking for deployed services."""
|
||||
"""State tracking for deployed stacks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -13,44 +13,44 @@ if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
|
||||
|
||||
def group_services_by_host(
|
||||
services: dict[str, str | list[str]],
|
||||
def group_stacks_by_host(
|
||||
stacks: dict[str, str | list[str]],
|
||||
hosts: Mapping[str, object],
|
||||
all_hosts: list[str] | None = None,
|
||||
) -> dict[str, list[str]]:
|
||||
"""Group services by their assigned host(s).
|
||||
"""Group stacks by their assigned host(s).
|
||||
|
||||
For multi-host services (list or "all"), the service appears in multiple host lists.
|
||||
For multi-host stacks (list or "all"), the stack appears in multiple host lists.
|
||||
"""
|
||||
by_host: dict[str, list[str]] = {h: [] for h in hosts}
|
||||
for service, host_value in services.items():
|
||||
for stack, host_value in stacks.items():
|
||||
if isinstance(host_value, list):
|
||||
for host_name in host_value:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
by_host[host_name].append(stack)
|
||||
elif host_value == "all" and all_hosts:
|
||||
for host_name in all_hosts:
|
||||
if host_name in by_host:
|
||||
by_host[host_name].append(service)
|
||||
by_host[host_name].append(stack)
|
||||
elif host_value in by_host:
|
||||
by_host[host_value].append(service)
|
||||
by_host[host_value].append(stack)
|
||||
return by_host
|
||||
|
||||
|
||||
def group_running_services_by_host(
|
||||
def group_running_stacks_by_host(
|
||||
state: dict[str, str | list[str]],
|
||||
hosts: Mapping[str, object],
|
||||
) -> dict[str, list[str]]:
|
||||
"""Group running services by host, filtering out hosts with no services."""
|
||||
by_host = group_services_by_host(state, hosts)
|
||||
"""Group running stacks by host, filtering out hosts with no stacks."""
|
||||
by_host = group_stacks_by_host(state, hosts)
|
||||
return {h: svcs for h, svcs in by_host.items() if svcs}
|
||||
|
||||
|
||||
def load_state(config: Config) -> dict[str, str | list[str]]:
|
||||
"""Load the current deployment state.
|
||||
|
||||
Returns a dict mapping service names to host name(s).
|
||||
Multi-host services store a list of hosts.
|
||||
Returns a dict mapping stack names to host name(s).
|
||||
Multi-host stacks store a list of hosts.
|
||||
"""
|
||||
state_path = config.get_state_path()
|
||||
if not state_path.exists():
|
||||
@@ -83,13 +83,13 @@ def _modify_state(config: Config) -> Generator[dict[str, str | list[str]], None,
|
||||
save_state(config, state)
|
||||
|
||||
|
||||
def get_service_host(config: Config, service: str) -> str | None:
|
||||
"""Get the host where a service is currently deployed.
|
||||
def get_stack_host(config: Config, stack: str) -> str | None:
|
||||
"""Get the host where a stack is currently deployed.
|
||||
|
||||
For multi-host services, returns the first host or None.
|
||||
For multi-host stacks, returns the first host or None.
|
||||
"""
|
||||
state = load_state(config)
|
||||
value = state.get(service)
|
||||
value = state.get(stack)
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, list):
|
||||
@@ -97,59 +97,59 @@ def get_service_host(config: Config, service: str) -> str | None:
|
||||
return value
|
||||
|
||||
|
||||
def set_service_host(config: Config, service: str, host: str) -> None:
|
||||
"""Record that a service is deployed on a host."""
|
||||
def set_stack_host(config: Config, stack: str, host: str) -> None:
|
||||
"""Record that a stack is deployed on a host."""
|
||||
with _modify_state(config) as state:
|
||||
state[service] = host
|
||||
state[stack] = host
|
||||
|
||||
|
||||
def set_multi_host_service(config: Config, service: str, hosts: list[str]) -> None:
|
||||
"""Record that a multi-host service is deployed on multiple hosts."""
|
||||
def set_multi_host_stack(config: Config, stack: str, hosts: list[str]) -> None:
|
||||
"""Record that a multi-host stack is deployed on multiple hosts."""
|
||||
with _modify_state(config) as state:
|
||||
state[service] = hosts
|
||||
state[stack] = hosts
|
||||
|
||||
|
||||
def remove_service(config: Config, service: str) -> None:
|
||||
"""Remove a service from the state (after down)."""
|
||||
def remove_stack(config: Config, stack: str) -> None:
|
||||
"""Remove a stack from the state (after down)."""
|
||||
with _modify_state(config) as state:
|
||||
state.pop(service, None)
|
||||
state.pop(stack, None)
|
||||
|
||||
|
||||
def get_services_needing_migration(config: Config) -> list[str]:
|
||||
"""Get services where current host differs from configured host.
|
||||
def get_stacks_needing_migration(config: Config) -> list[str]:
|
||||
"""Get stacks where current host differs from configured host.
|
||||
|
||||
Multi-host services are never considered for migration.
|
||||
Multi-host stacks are never considered for migration.
|
||||
"""
|
||||
needs_migration = []
|
||||
for service in config.services:
|
||||
# Skip multi-host services
|
||||
if config.is_multi_host(service):
|
||||
for stack in config.stacks:
|
||||
# Skip multi-host stacks
|
||||
if config.is_multi_host(stack):
|
||||
continue
|
||||
|
||||
configured_host = config.get_hosts(service)[0]
|
||||
current_host = get_service_host(config, service)
|
||||
configured_host = config.get_hosts(stack)[0]
|
||||
current_host = get_stack_host(config, stack)
|
||||
if current_host and current_host != configured_host:
|
||||
needs_migration.append(service)
|
||||
needs_migration.append(stack)
|
||||
return needs_migration
|
||||
|
||||
|
||||
def get_orphaned_services(config: Config) -> dict[str, str | list[str]]:
|
||||
"""Get services that are in state but not in config.
|
||||
def get_orphaned_stacks(config: Config) -> dict[str, str | list[str]]:
|
||||
"""Get stacks that are in state but not in config.
|
||||
|
||||
These are services that were previously deployed but have been
|
||||
These are stacks that were previously deployed but have been
|
||||
removed from the config file (e.g., commented out).
|
||||
|
||||
Returns a dict mapping service name to host(s) where it's deployed.
|
||||
Returns a dict mapping stack name to host(s) where it's deployed.
|
||||
"""
|
||||
state = load_state(config)
|
||||
return {service: hosts for service, hosts in state.items() if service not in config.services}
|
||||
return {stack: hosts for stack, hosts in state.items() if stack not in config.stacks}
|
||||
|
||||
|
||||
def get_services_not_in_state(config: Config) -> list[str]:
|
||||
"""Get services that are in config but not in state.
|
||||
def get_stacks_not_in_state(config: Config) -> list[str]:
|
||||
"""Get stacks that are in config but not in state.
|
||||
|
||||
These are services that should be running but aren't tracked
|
||||
These are stacks that should be running but aren't tracked
|
||||
(e.g., newly added to config, or previously stopped as orphans).
|
||||
"""
|
||||
state = load_state(config)
|
||||
return [service for service in config.services if service not in state]
|
||||
return [stack for stack in config.stacks if stack not in state]
|
||||
|
||||
@@ -311,7 +311,7 @@ def _process_service_labels(
|
||||
|
||||
def generate_traefik_config(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
*,
|
||||
check_all: bool = False,
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
@@ -319,8 +319,8 @@ def generate_traefik_config(
|
||||
|
||||
Args:
|
||||
config: The compose-farm config.
|
||||
services: List of service names to process.
|
||||
check_all: If True, check all services for warnings (ignore host filtering).
|
||||
stacks: List of stack names to process.
|
||||
check_all: If True, check all stacks for warnings (ignore host filtering).
|
||||
Used by the check command to validate all traefik labels.
|
||||
|
||||
Returns (config_dict, warnings).
|
||||
@@ -332,14 +332,14 @@ def generate_traefik_config(
|
||||
|
||||
# Determine Traefik's host from service assignment
|
||||
traefik_host = None
|
||||
if config.traefik_service and not check_all:
|
||||
traefik_host = config.services.get(config.traefik_service)
|
||||
if config.traefik_stack and not check_all:
|
||||
traefik_host = config.stacks.get(config.traefik_stack)
|
||||
|
||||
for stack in services:
|
||||
for stack in stacks:
|
||||
raw_services, env, host_address = load_compose_services(config, stack)
|
||||
stack_host = config.services.get(stack)
|
||||
stack_host = config.stacks.get(stack)
|
||||
|
||||
# Skip services on Traefik's host - docker provider handles them directly
|
||||
# Skip stacks on Traefik's host - docker provider handles them directly
|
||||
# (unless check_all is True, for validation purposes)
|
||||
if not check_all:
|
||||
if host_address.lower() in LOCAL_ADDRESSES:
|
||||
@@ -370,7 +370,7 @@ _TRAEFIK_CONFIG_HEADER = """\
|
||||
# Auto-generated by compose-farm
|
||||
# https://github.com/basnijholt/compose-farm
|
||||
#
|
||||
# This file routes traffic to services running on hosts other than Traefik's host.
|
||||
# This file routes traffic to stacks running on hosts other than Traefik's host.
|
||||
# Services on Traefik's host use the Docker provider directly.
|
||||
#
|
||||
# Regenerate with: compose-farm traefik-file --all -o <this-file>
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
from typing import TYPE_CHECKING
|
||||
@@ -10,11 +11,22 @@ from typing import TYPE_CHECKING
|
||||
from fastapi import FastAPI
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import ValidationError
|
||||
from rich.logging import RichHandler
|
||||
|
||||
from compose_farm.web.deps import STATIC_DIR, get_config
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
from compose_farm.web.streaming import TASK_TTL_SECONDS, cleanup_stale_tasks
|
||||
|
||||
# Configure logging with Rich handler for compose_farm.web modules
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(message)s",
|
||||
datefmt="[%X]",
|
||||
handlers=[RichHandler(rich_tracebacks=True, show_path=False)],
|
||||
)
|
||||
# Set our web modules to INFO level (uvicorn handles its own logging)
|
||||
logging.getLogger("compose_farm.web").setLevel(logging.INFO)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
@@ -48,7 +60,7 @@ def create_app() -> FastAPI:
|
||||
"""Create and configure the FastAPI application."""
|
||||
app = FastAPI(
|
||||
title="Compose Farm",
|
||||
description="Web UI for managing Docker Compose services across multiple hosts",
|
||||
description="Web UI for managing Docker Compose stacks across multiple hosts",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
|
||||
108
src/compose_farm/web/cdn.py
Normal file
108
src/compose_farm/web/cdn.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""CDN asset definitions and caching for tests and demo recordings.
|
||||
|
||||
This module provides a single source of truth for CDN asset URLs used in
|
||||
browser tests and demo recordings. Assets are intercepted and served from
|
||||
a local cache to eliminate network variability.
|
||||
|
||||
Note: The canonical list of CDN assets for production is in base.html
|
||||
(with data-vendor attributes). This module includes those plus dynamically
|
||||
loaded assets (like Monaco editor modules loaded by app.js).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
# CDN assets to cache locally for tests/demos
|
||||
# Format: URL -> (local_filename, content_type)
|
||||
#
|
||||
# If tests fail with "Uncached CDN request", add the URL here.
|
||||
CDN_ASSETS: dict[str, tuple[str, str]] = {
|
||||
# From base.html (data-vendor attributes)
|
||||
"https://cdn.jsdelivr.net/npm/daisyui@5/themes.css": ("daisyui-themes.css", "text/css"),
|
||||
"https://cdn.jsdelivr.net/npm/daisyui@5": ("daisyui.css", "text/css"),
|
||||
"https://cdn.jsdelivr.net/npm/@tailwindcss/browser@4": (
|
||||
"tailwind.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/css/xterm.css": ("xterm.css", "text/css"),
|
||||
"https://unpkg.com/htmx.org@2.0.4": ("htmx.js", "application/javascript"),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.js": (
|
||||
"xterm.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.js": (
|
||||
"xterm-fit.js",
|
||||
"application/javascript",
|
||||
),
|
||||
# Monaco editor - dynamically loaded by app.js
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/loader.js": (
|
||||
"monaco-loader.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.js": (
|
||||
"monaco-editor-main.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.css": (
|
||||
"monaco-editor-main.css",
|
||||
"text/css",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/worker/workerMain.js": (
|
||||
"monaco-workerMain.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/basic-languages/yaml/yaml.js": (
|
||||
"monaco-yaml.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/browser/ui/codicons/codicon/codicon.ttf": (
|
||||
"monaco-codicon.ttf",
|
||||
"font/ttf",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def download_url(url: str) -> bytes | None:
|
||||
"""Download URL content using curl."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["curl", "-fsSL", "--max-time", "30", url], # noqa: S607
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
return bytes(result.stdout)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def ensure_vendor_cache(cache_dir: Path) -> Path:
|
||||
"""Download CDN assets to cache directory if not already present.
|
||||
|
||||
Args:
|
||||
cache_dir: Directory to store cached assets.
|
||||
|
||||
Returns:
|
||||
The cache directory path.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If any asset fails to download.
|
||||
|
||||
"""
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for url, (filename, _content_type) in CDN_ASSETS.items():
|
||||
filepath = cache_dir / filename
|
||||
if filepath.exists():
|
||||
continue
|
||||
content = download_url(url)
|
||||
if not content:
|
||||
msg = f"Failed to download {url} - check network/curl"
|
||||
raise RuntimeError(msg)
|
||||
filepath.write_bytes(content)
|
||||
|
||||
return cache_dir
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Action routes for service operations."""
|
||||
"""Action routes for stack operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -32,27 +32,47 @@ def _start_task(coro_factory: Callable[[str], Coroutine[Any, Any, None]]) -> str
|
||||
return task_id
|
||||
|
||||
|
||||
# Allowed service commands
|
||||
ALLOWED_COMMANDS = {"up", "down", "restart", "pull", "update", "logs"}
|
||||
# Allowed stack commands
|
||||
ALLOWED_COMMANDS = {"up", "down", "restart", "pull", "update", "logs", "stop"}
|
||||
|
||||
# Allowed service-level commands (no 'down' - use 'stop' for individual services)
|
||||
ALLOWED_SERVICE_COMMANDS = {"logs", "pull", "restart", "up", "stop"}
|
||||
|
||||
|
||||
@router.post("/service/{name}/{command}")
|
||||
async def service_action(name: str, command: str) -> dict[str, Any]:
|
||||
"""Run a compose command for a service (up, down, restart, pull, update, logs)."""
|
||||
@router.post("/stack/{name}/{command}")
|
||||
async def stack_action(name: str, command: str) -> dict[str, Any]:
|
||||
"""Run a compose command for a stack (up, down, restart, pull, update, logs, stop)."""
|
||||
if command not in ALLOWED_COMMANDS:
|
||||
raise HTTPException(status_code=404, detail=f"Unknown command '{command}'")
|
||||
|
||||
config = get_config()
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
if name not in config.stacks:
|
||||
raise HTTPException(status_code=404, detail=f"Stack '{name}' not found")
|
||||
|
||||
task_id = _start_task(lambda tid: run_compose_streaming(config, name, command, tid))
|
||||
return {"task_id": task_id, "service": name, "command": command}
|
||||
return {"task_id": task_id, "stack": name, "command": command}
|
||||
|
||||
|
||||
@router.post("/stack/{name}/service/{service}/{command}")
|
||||
async def service_action(name: str, service: str, command: str) -> dict[str, Any]:
|
||||
"""Run a compose command for a specific service within a stack."""
|
||||
if command not in ALLOWED_SERVICE_COMMANDS:
|
||||
raise HTTPException(status_code=404, detail=f"Unknown command '{command}'")
|
||||
|
||||
config = get_config()
|
||||
if name not in config.stacks:
|
||||
raise HTTPException(status_code=404, detail=f"Stack '{name}' not found")
|
||||
|
||||
# Use --service flag to target specific service
|
||||
task_id = _start_task(
|
||||
lambda tid: run_compose_streaming(config, name, f"{command} --service {service}", tid)
|
||||
)
|
||||
return {"task_id": task_id, "stack": name, "service": service, "command": command}
|
||||
|
||||
|
||||
@router.post("/apply")
|
||||
async def apply_all() -> dict[str, Any]:
|
||||
"""Run cf apply to reconcile all services."""
|
||||
"""Run cf apply to reconcile all stacks."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["apply"], tid))
|
||||
return {"task_id": task_id, "command": "apply"}
|
||||
@@ -60,7 +80,23 @@ async def apply_all() -> dict[str, Any]:
|
||||
|
||||
@router.post("/refresh")
|
||||
async def refresh_state() -> dict[str, Any]:
|
||||
"""Refresh state from running services."""
|
||||
"""Refresh state from running stacks."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["refresh"], tid))
|
||||
return {"task_id": task_id, "command": "refresh"}
|
||||
|
||||
|
||||
@router.post("/pull-all")
|
||||
async def pull_all() -> dict[str, Any]:
|
||||
"""Pull latest images for all stacks."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["pull", "--all"], tid))
|
||||
return {"task_id": task_id, "command": "pull --all"}
|
||||
|
||||
|
||||
@router.post("/update-all")
|
||||
async def update_all() -> dict[str, Any]:
|
||||
"""Update all stacks (pull + build + down + up)."""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["update", "--all"], tid))
|
||||
return {"task_id": task_id, "command": "update --all"}
|
||||
|
||||
@@ -5,21 +5,28 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import contextlib
|
||||
import json
|
||||
import logging
|
||||
import shlex
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any
|
||||
from typing import TYPE_CHECKING, Annotated, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
import asyncssh
|
||||
import yaml
|
||||
from fastapi import APIRouter, Body, HTTPException, Query
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from compose_farm.compose import get_container_name
|
||||
from compose_farm.executor import is_local, run_compose_on_host, ssh_connect_kwargs
|
||||
from compose_farm.paths import find_config_path
|
||||
from compose_farm.state import load_state
|
||||
from compose_farm.web.deps import get_config, get_templates
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["api"])
|
||||
|
||||
|
||||
@@ -76,12 +83,12 @@ def _save_with_backup(file_path: Path, content: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _get_service_compose_path(name: str) -> Path:
|
||||
"""Get compose path for service, raising HTTPException if not found."""
|
||||
def _get_stack_compose_path(name: str) -> Path:
|
||||
"""Get compose path for stack, raising HTTPException if not found."""
|
||||
config = get_config()
|
||||
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
if name not in config.stacks:
|
||||
raise HTTPException(status_code=404, detail=f"Stack '{name}' not found")
|
||||
|
||||
compose_path = config.get_compose_path(name)
|
||||
if not compose_path:
|
||||
@@ -90,12 +97,12 @@ def _get_service_compose_path(name: str) -> Path:
|
||||
return compose_path
|
||||
|
||||
|
||||
def _get_compose_services(config: Any, service: str, hosts: list[str]) -> list[dict[str, Any]]:
|
||||
def _get_compose_services(config: Any, stack: str, hosts: list[str]) -> list[dict[str, Any]]:
|
||||
"""Get container info from compose file (fast, local read).
|
||||
|
||||
Returns one entry per container per host for multi-host services.
|
||||
Returns one entry per container per host for multi-host stacks.
|
||||
"""
|
||||
compose_path = config.get_compose_path(service)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path or not compose_path.exists():
|
||||
return []
|
||||
|
||||
@@ -110,14 +117,9 @@ def _get_compose_services(config: Any, service: str, hosts: list[str]) -> list[d
|
||||
containers = []
|
||||
for host in hosts:
|
||||
for svc_name, svc_def in raw_services.items():
|
||||
# Use container_name if set, otherwise default to {project}-{service}-1
|
||||
if isinstance(svc_def, dict) and svc_def.get("container_name"):
|
||||
container_name = svc_def["container_name"]
|
||||
else:
|
||||
container_name = f"{project_name}-{svc_name}-1"
|
||||
containers.append(
|
||||
{
|
||||
"Name": container_name,
|
||||
"Name": get_container_name(svc_name, svc_def, project_name),
|
||||
"Service": svc_name,
|
||||
"Host": host,
|
||||
"State": "unknown", # Status requires Docker query
|
||||
@@ -127,7 +129,7 @@ def _get_compose_services(config: Any, service: str, hosts: list[str]) -> list[d
|
||||
|
||||
|
||||
async def _get_container_states(
|
||||
config: Any, service: str, containers: list[dict[str, Any]]
|
||||
config: Any, stack: str, containers: list[dict[str, Any]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Query Docker for actual container states on a single host."""
|
||||
if not containers:
|
||||
@@ -138,9 +140,15 @@ async def _get_container_states(
|
||||
|
||||
# Use -a to include stopped/exited containers
|
||||
result = await run_compose_on_host(
|
||||
config, service, host_name, "ps -a --format json", stream=False
|
||||
config, stack, host_name, "ps -a --format json", stream=False
|
||||
)
|
||||
if not result.success:
|
||||
logger.warning(
|
||||
"Failed to get container states for %s on %s: %s",
|
||||
stack,
|
||||
host_name,
|
||||
result.stderr or result.stdout,
|
||||
)
|
||||
return containers
|
||||
|
||||
# Build state map: name -> (state, exit_code)
|
||||
@@ -169,33 +177,34 @@ async def _get_container_states(
|
||||
|
||||
|
||||
def _render_containers(
|
||||
service: str, host: str, containers: list[dict[str, Any]], *, show_header: bool = False
|
||||
stack: str, host: str, containers: list[dict[str, Any]], *, show_header: bool = False
|
||||
) -> str:
|
||||
"""Render containers HTML using Jinja template."""
|
||||
templates = get_templates()
|
||||
template = templates.env.get_template("partials/containers.html")
|
||||
module = template.make_module()
|
||||
result: str = module.host_containers(service, host, containers, show_header=show_header)
|
||||
return result
|
||||
# TemplateModule exports macros as attributes; getattr keeps type checkers happy
|
||||
host_containers: Callable[..., str] = getattr(module, "host_containers") # noqa: B009
|
||||
return host_containers(stack, host, containers, show_header=show_header)
|
||||
|
||||
|
||||
@router.get("/service/{name}/containers", response_class=HTMLResponse)
|
||||
@router.get("/stack/{name}/containers", response_class=HTMLResponse)
|
||||
async def get_containers(name: str, host: str | None = None) -> HTMLResponse:
|
||||
"""Get containers for a service as HTML buttons.
|
||||
"""Get containers for a stack as HTML buttons.
|
||||
|
||||
If host is specified, queries Docker for that host's status.
|
||||
Otherwise returns all hosts with loading spinners that auto-fetch.
|
||||
"""
|
||||
config = get_config()
|
||||
|
||||
if name not in config.services:
|
||||
raise HTTPException(status_code=404, detail=f"Service '{name}' not found")
|
||||
if name not in config.stacks:
|
||||
raise HTTPException(status_code=404, detail=f"Stack '{name}' not found")
|
||||
|
||||
# Get hosts where service is running from state
|
||||
# Get hosts where stack is running from state
|
||||
state = load_state(config)
|
||||
current_hosts = state.get(name)
|
||||
if not current_hosts:
|
||||
return HTMLResponse('<span class="text-base-content/60">Service not running</span>')
|
||||
return HTMLResponse('<span class="text-base-content/60">Stack not running</span>')
|
||||
|
||||
all_hosts = current_hosts if isinstance(current_hosts, list) else [current_hosts]
|
||||
|
||||
@@ -222,7 +231,7 @@ async def get_containers(name: str, host: str | None = None) -> HTMLResponse:
|
||||
# Container for this host that auto-fetches its own status
|
||||
html_parts.append(f"""
|
||||
<div id="{host_id}"
|
||||
hx-get="/api/service/{name}/containers?host={h}"
|
||||
hx-get="/api/stack/{name}/containers?host={h}"
|
||||
hx-trigger="load"
|
||||
hx-target="this"
|
||||
hx-select="unset"
|
||||
@@ -234,24 +243,24 @@ async def get_containers(name: str, host: str | None = None) -> HTMLResponse:
|
||||
return HTMLResponse("".join(html_parts))
|
||||
|
||||
|
||||
@router.put("/service/{name}/compose")
|
||||
@router.put("/stack/{name}/compose")
|
||||
async def save_compose(
|
||||
name: str, content: Annotated[str, Body(media_type="text/plain")]
|
||||
) -> dict[str, Any]:
|
||||
"""Save compose file content."""
|
||||
compose_path = _get_service_compose_path(name)
|
||||
compose_path = _get_stack_compose_path(name)
|
||||
_validate_yaml(content)
|
||||
saved = _save_with_backup(compose_path, content)
|
||||
msg = "Compose file saved" if saved else "No changes to save"
|
||||
return {"success": True, "message": msg}
|
||||
|
||||
|
||||
@router.put("/service/{name}/env")
|
||||
@router.put("/stack/{name}/env")
|
||||
async def save_env(
|
||||
name: str, content: Annotated[str, Body(media_type="text/plain")]
|
||||
) -> dict[str, Any]:
|
||||
"""Save .env file content."""
|
||||
env_path = _get_service_compose_path(name).parent / ".env"
|
||||
env_path = _get_stack_compose_path(name).parent / ".env"
|
||||
saved = _save_with_backup(env_path, content)
|
||||
msg = ".env file saved" if saved else "No changes to save"
|
||||
return {"success": True, "message": msg}
|
||||
@@ -346,6 +355,7 @@ async def read_console_file(
|
||||
except PermissionError:
|
||||
raise HTTPException(status_code=403, detail=f"Permission denied: {path}") from None
|
||||
except Exception as e:
|
||||
logger.exception("Failed to read file %s from host %s", path, host)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
|
||||
@@ -369,4 +379,5 @@ async def write_console_file(
|
||||
except PermissionError:
|
||||
raise HTTPException(status_code=403, detail=f"Permission denied: {path}") from None
|
||||
except Exception as e:
|
||||
logger.exception("Failed to write file %s to host %s", path, host)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
@@ -7,13 +7,14 @@ from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from pydantic import ValidationError
|
||||
|
||||
from compose_farm.compose import get_container_name
|
||||
from compose_farm.paths import find_config_path
|
||||
from compose_farm.state import (
|
||||
get_orphaned_services,
|
||||
get_service_host,
|
||||
get_services_needing_migration,
|
||||
get_services_not_in_state,
|
||||
group_running_services_by_host,
|
||||
get_orphaned_stacks,
|
||||
get_stack_host,
|
||||
get_stacks_needing_migration,
|
||||
get_stacks_not_in_state,
|
||||
group_running_stacks_by_host,
|
||||
load_state,
|
||||
)
|
||||
from compose_farm.web.deps import (
|
||||
@@ -74,7 +75,7 @@ async def index(request: Request) -> HTMLResponse:
|
||||
"request": request,
|
||||
"config_error": config_error,
|
||||
"hosts": {},
|
||||
"services": {},
|
||||
"stacks": {},
|
||||
"config_content": config_content,
|
||||
"state_content": "",
|
||||
"running_count": 0,
|
||||
@@ -82,7 +83,7 @@ async def index(request: Request) -> HTMLResponse:
|
||||
"orphaned": [],
|
||||
"migrations": [],
|
||||
"not_started": [],
|
||||
"services_by_host": {},
|
||||
"stacks_by_host": {},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -91,15 +92,15 @@ async def index(request: Request) -> HTMLResponse:
|
||||
|
||||
# Stats
|
||||
running_count = len(deployed)
|
||||
stopped_count = len(config.services) - running_count
|
||||
stopped_count = len(config.stacks) - running_count
|
||||
|
||||
# Pending operations
|
||||
orphaned = get_orphaned_services(config)
|
||||
migrations = get_services_needing_migration(config)
|
||||
not_started = get_services_not_in_state(config)
|
||||
orphaned = get_orphaned_stacks(config)
|
||||
migrations = get_stacks_needing_migration(config)
|
||||
not_started = get_stacks_not_in_state(config)
|
||||
|
||||
# Group services by host (filter out hosts with no running services)
|
||||
services_by_host = group_running_services_by_host(deployed, config.hosts)
|
||||
# Group stacks by host (filter out hosts with no running stacks)
|
||||
stacks_by_host = group_running_stacks_by_host(deployed, config.hosts)
|
||||
|
||||
# Config file content
|
||||
config_content = ""
|
||||
@@ -116,7 +117,7 @@ async def index(request: Request) -> HTMLResponse:
|
||||
"config_error": None,
|
||||
# Config data
|
||||
"hosts": config.hosts,
|
||||
"services": config.services,
|
||||
"stacks": config.stacks,
|
||||
"config_content": config_content,
|
||||
# State data
|
||||
"state_content": state_content,
|
||||
@@ -127,15 +128,15 @@ async def index(request: Request) -> HTMLResponse:
|
||||
"orphaned": orphaned,
|
||||
"migrations": migrations,
|
||||
"not_started": not_started,
|
||||
# Services by host
|
||||
"services_by_host": services_by_host,
|
||||
# Stacks by host
|
||||
"stacks_by_host": stacks_by_host,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/service/{name}", response_class=HTMLResponse)
|
||||
async def service_detail(request: Request, name: str) -> HTMLResponse:
|
||||
"""Service detail page."""
|
||||
@router.get("/stack/{name}", response_class=HTMLResponse)
|
||||
async def stack_detail(request: Request, name: str) -> HTMLResponse:
|
||||
"""Stack detail page."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
@@ -157,10 +158,30 @@ async def service_detail(request: Request, name: str) -> HTMLResponse:
|
||||
hosts = config.get_hosts(name)
|
||||
|
||||
# Get state
|
||||
current_host = get_service_host(config, name)
|
||||
current_host = get_stack_host(config, name)
|
||||
|
||||
# Get service names and container info from compose file
|
||||
services: list[str] = []
|
||||
containers: dict[str, dict[str, str]] = {}
|
||||
shell_host = current_host[0] if isinstance(current_host, list) else current_host
|
||||
if compose_content:
|
||||
compose_data = yaml.safe_load(compose_content) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if isinstance(raw_services, dict):
|
||||
services = list(raw_services.keys())
|
||||
# Build container info for shell access (only if stack is running)
|
||||
if shell_host:
|
||||
project_name = compose_path.parent.name if compose_path else name
|
||||
containers = {
|
||||
svc: {
|
||||
"container": get_container_name(svc, svc_def, project_name),
|
||||
"host": shell_host,
|
||||
}
|
||||
for svc, svc_def in raw_services.items()
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"service.html",
|
||||
"stack.html",
|
||||
{
|
||||
"request": request,
|
||||
"name": name,
|
||||
@@ -170,30 +191,32 @@ async def service_detail(request: Request, name: str) -> HTMLResponse:
|
||||
"compose_path": str(compose_path) if compose_path else None,
|
||||
"env_content": env_content,
|
||||
"env_path": str(env_path) if env_path else None,
|
||||
"services": services,
|
||||
"containers": containers,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/sidebar", response_class=HTMLResponse)
|
||||
async def sidebar_partial(request: Request) -> HTMLResponse:
|
||||
"""Sidebar service list partial."""
|
||||
"""Sidebar stack list partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
state = load_state(config)
|
||||
|
||||
# Build service -> host mapping (empty string for multi-host services)
|
||||
service_hosts = {
|
||||
# Build stack -> host mapping (empty string for multi-host stacks)
|
||||
stack_hosts = {
|
||||
svc: "" if host_val == "all" or isinstance(host_val, list) else host_val
|
||||
for svc, host_val in config.services.items()
|
||||
for svc, host_val in config.stacks.items()
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/sidebar.html",
|
||||
{
|
||||
"request": request,
|
||||
"services": sorted(config.services.keys()),
|
||||
"service_hosts": service_hosts,
|
||||
"stacks": sorted(config.stacks.keys()),
|
||||
"stack_hosts": stack_hosts,
|
||||
"hosts": sorted(config.hosts.keys()),
|
||||
"local_host": get_local_host(config),
|
||||
"state": state,
|
||||
@@ -223,14 +246,14 @@ async def stats_partial(request: Request) -> HTMLResponse:
|
||||
|
||||
deployed = load_state(config)
|
||||
running_count = len(deployed)
|
||||
stopped_count = len(config.services) - running_count
|
||||
stopped_count = len(config.stacks) - running_count
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/stats.html",
|
||||
{
|
||||
"request": request,
|
||||
"hosts": config.hosts,
|
||||
"services": config.services,
|
||||
"stacks": config.stacks,
|
||||
"running_count": running_count,
|
||||
"stopped_count": stopped_count,
|
||||
},
|
||||
@@ -243,9 +266,9 @@ async def pending_partial(request: Request, expanded: bool = True) -> HTMLRespon
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
orphaned = get_orphaned_services(config)
|
||||
migrations = get_services_needing_migration(config)
|
||||
not_started = get_services_not_in_state(config)
|
||||
orphaned = get_orphaned_stacks(config)
|
||||
migrations = get_stacks_needing_migration(config)
|
||||
not_started = get_stacks_not_in_state(config)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/pending.html",
|
||||
@@ -259,21 +282,21 @@ async def pending_partial(request: Request, expanded: bool = True) -> HTMLRespon
|
||||
)
|
||||
|
||||
|
||||
@router.get("/partials/services-by-host", response_class=HTMLResponse)
|
||||
async def services_by_host_partial(request: Request, expanded: bool = True) -> HTMLResponse:
|
||||
"""Services by host partial."""
|
||||
@router.get("/partials/stacks-by-host", response_class=HTMLResponse)
|
||||
async def stacks_by_host_partial(request: Request, expanded: bool = True) -> HTMLResponse:
|
||||
"""Stacks by host partial."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
deployed = load_state(config)
|
||||
services_by_host = group_running_services_by_host(deployed, config.hosts)
|
||||
stacks_by_host = group_running_stacks_by_host(deployed, config.hosts)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/services_by_host.html",
|
||||
"partials/stacks_by_host.html",
|
||||
{
|
||||
"request": request,
|
||||
"hosts": config.hosts,
|
||||
"services_by_host": services_by_host,
|
||||
"stacks_by_host": stacks_by_host,
|
||||
"expanded": expanded,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
/* Tooltips - ensure they appear above sidebar and other elements */
|
||||
.tooltip::before,
|
||||
.tooltip::after {
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
/* Sidebar inputs - remove focus outline (DaisyUI 5 uses outline + outline-offset) */
|
||||
#sidebar .input:focus,
|
||||
#sidebar .input:focus-within,
|
||||
|
||||
@@ -57,6 +57,10 @@ const LANGUAGE_MAP = {
|
||||
'env': 'plaintext'
|
||||
};
|
||||
|
||||
// Detect Mac for keyboard shortcut display
|
||||
const IS_MAC = navigator.platform.toUpperCase().indexOf('MAC') >= 0;
|
||||
const MOD_KEY = IS_MAC ? '⌘' : 'Ctrl';
|
||||
|
||||
// ============================================================================
|
||||
// STATE
|
||||
// ============================================================================
|
||||
@@ -210,7 +214,7 @@ window.initTerminal = initTerminal;
|
||||
/**
|
||||
* Initialize an interactive exec terminal
|
||||
*/
|
||||
function initExecTerminal(service, container, host) {
|
||||
function initExecTerminal(stack, container, host) {
|
||||
const containerEl = document.getElementById('exec-terminal-container');
|
||||
const terminalEl = document.getElementById('exec-terminal');
|
||||
|
||||
@@ -226,7 +230,7 @@ function initExecTerminal(service, container, host) {
|
||||
if (execTerminalWrapper) { execTerminalWrapper.dispose(); execTerminalWrapper = null; }
|
||||
|
||||
// Create WebSocket first so resize callback can use it
|
||||
execWs = createWebSocket(`/ws/exec/${service}/${container}/${host}`);
|
||||
execWs = createWebSocket(`/ws/exec/${stack}/${container}/${host}`);
|
||||
|
||||
// Resize callback sends size to WebSocket
|
||||
const sendSize = (cols, rows) => {
|
||||
@@ -456,14 +460,14 @@ function refreshDashboard() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter sidebar services by name and host
|
||||
* Filter sidebar stacks by name and host
|
||||
*/
|
||||
function sidebarFilter() {
|
||||
const q = (document.getElementById('sidebar-filter')?.value || '').toLowerCase();
|
||||
const h = document.getElementById('sidebar-host-select')?.value || '';
|
||||
let n = 0;
|
||||
document.querySelectorAll('#sidebar-services li').forEach(li => {
|
||||
const show = (!q || li.dataset.svc.includes(q)) && (!h || !li.dataset.h || li.dataset.h === h);
|
||||
document.querySelectorAll('#sidebar-stacks li').forEach(li => {
|
||||
const show = (!q || li.dataset.stack.includes(q)) && (!h || !li.dataset.h || li.dataset.h === h);
|
||||
li.hidden = !show;
|
||||
if (show) n++;
|
||||
});
|
||||
@@ -512,16 +516,22 @@ function playFabIntro() {
|
||||
const THEMES = ['light', 'dark', 'cupcake', 'bumblebee', 'emerald', 'corporate', 'synthwave', 'retro', 'cyberpunk', 'valentine', 'halloween', 'garden', 'forest', 'aqua', 'lofi', 'pastel', 'fantasy', 'wireframe', 'black', 'luxury', 'dracula', 'cmyk', 'autumn', 'business', 'acid', 'lemonade', 'night', 'coffee', 'winter', 'dim', 'nord', 'sunset', 'caramellatte', 'abyss', 'silk'];
|
||||
const THEME_KEY = 'cf_theme';
|
||||
|
||||
const colors = { service: '#22c55e', action: '#eab308', nav: '#3b82f6', app: '#a855f7', theme: '#ec4899' };
|
||||
const colors = { stack: '#22c55e', action: '#eab308', nav: '#3b82f6', app: '#a855f7', theme: '#ec4899', service: '#14b8a6' };
|
||||
let commands = [];
|
||||
let filtered = [];
|
||||
let selected = 0;
|
||||
let originalTheme = null; // Store theme when palette opens for preview/restore
|
||||
|
||||
const post = (url) => () => htmx.ajax('POST', url, {swap: 'none'});
|
||||
const nav = (url) => () => {
|
||||
const nav = (url, afterNav) => () => {
|
||||
// Set hash before HTMX swap so inline scripts can read it
|
||||
const hashIndex = url.indexOf('#');
|
||||
if (hashIndex !== -1) {
|
||||
window.location.hash = url.substring(hashIndex);
|
||||
}
|
||||
htmx.ajax('GET', url, {target: '#main-content', select: '#main-content', swap: 'outerHTML'}).then(() => {
|
||||
history.pushState({}, '', url);
|
||||
afterNav?.();
|
||||
});
|
||||
};
|
||||
// Navigate to dashboard (if needed) and trigger action
|
||||
@@ -559,33 +569,72 @@ function playFabIntro() {
|
||||
};
|
||||
|
||||
function buildCommands() {
|
||||
const openExternal = (url) => () => window.open(url, '_blank');
|
||||
|
||||
const actions = [
|
||||
cmd('action', 'Apply', 'Make reality match config', dashboardAction('apply'), icons.check),
|
||||
cmd('action', 'Refresh', 'Update state from reality', dashboardAction('refresh'), icons.refresh_cw),
|
||||
cmd('action', 'Pull All', 'Pull latest images for all stacks', dashboardAction('pull-all'), icons.cloud_download),
|
||||
cmd('action', 'Update All', 'Update all stacks', dashboardAction('update-all'), icons.refresh_cw),
|
||||
cmd('app', 'Theme', 'Change color theme', openThemePicker, icons.palette),
|
||||
cmd('app', 'Dashboard', 'Go to dashboard', nav('/'), icons.home),
|
||||
cmd('app', 'Console', 'Go to console', nav('/console'), icons.terminal),
|
||||
cmd('app', 'Edit Config', 'Edit compose-farm.yaml', nav('/console#editor'), icons.file_code),
|
||||
cmd('app', 'Docs', 'Open documentation', openExternal('https://compose-farm.nijho.lt/'), icons.book_open),
|
||||
];
|
||||
|
||||
// Add service-specific actions if on a service page
|
||||
const match = window.location.pathname.match(/^\/service\/(.+)$/);
|
||||
// Add stack-specific actions if on a stack page
|
||||
const match = window.location.pathname.match(/^\/stack\/(.+)$/);
|
||||
if (match) {
|
||||
const svc = decodeURIComponent(match[1]);
|
||||
const svcCmd = (name, desc, endpoint, icon) => cmd('service', name, `${desc} ${svc}`, post(`/api/service/${svc}/${endpoint}`), icon);
|
||||
const stack = decodeURIComponent(match[1]);
|
||||
const stackCmd = (name, desc, endpoint, icon) => cmd('stack', name, `${desc} ${stack}`, post(`/api/stack/${stack}/${endpoint}`), icon);
|
||||
actions.unshift(
|
||||
svcCmd('Up', 'Start', 'up', icons.play),
|
||||
svcCmd('Down', 'Stop', 'down', icons.square),
|
||||
svcCmd('Restart', 'Restart', 'restart', icons.rotate_cw),
|
||||
svcCmd('Pull', 'Pull', 'pull', icons.cloud_download),
|
||||
svcCmd('Update', 'Pull + restart', 'update', icons.refresh_cw),
|
||||
svcCmd('Logs', 'View logs for', 'logs', icons.file_text),
|
||||
stackCmd('Up', 'Start', 'up', icons.play),
|
||||
stackCmd('Down', 'Stop', 'down', icons.square),
|
||||
stackCmd('Restart', 'Restart', 'restart', icons.rotate_cw),
|
||||
stackCmd('Pull', 'Pull', 'pull', icons.cloud_download),
|
||||
stackCmd('Update', 'Pull + restart', 'update', icons.refresh_cw),
|
||||
stackCmd('Logs', 'View logs for', 'logs', icons.file_text),
|
||||
);
|
||||
|
||||
// Add service-specific commands from data-services and data-containers attributes
|
||||
// Grouped by action (all Logs together, all Pull together, etc.) with services sorted alphabetically
|
||||
const servicesAttr = document.querySelector('[data-services]')?.getAttribute('data-services');
|
||||
const containersAttr = document.querySelector('[data-containers]')?.getAttribute('data-containers');
|
||||
if (servicesAttr) {
|
||||
const services = servicesAttr.split(',').filter(s => s).sort();
|
||||
// Parse container info for shell access: {service: {container, host}}
|
||||
const containers = containersAttr ? JSON.parse(containersAttr) : {};
|
||||
|
||||
const svcCmd = (action, service, desc, endpoint, icon) =>
|
||||
cmd('service', `${action}: ${service}`, desc, post(`/api/stack/${stack}/service/${service}/${endpoint}`), icon);
|
||||
const svcActions = [
|
||||
['Logs', 'View logs for service', 'logs', icons.file_text],
|
||||
['Pull', 'Pull image for service', 'pull', icons.cloud_download],
|
||||
['Restart', 'Restart service', 'restart', icons.rotate_cw],
|
||||
['Stop', 'Stop service', 'stop', icons.square],
|
||||
['Up', 'Start service', 'up', icons.play],
|
||||
];
|
||||
for (const [action, desc, endpoint, icon] of svcActions) {
|
||||
for (const service of services) {
|
||||
actions.push(svcCmd(action, service, desc, endpoint, icon));
|
||||
}
|
||||
}
|
||||
// Add Shell commands if container info is available
|
||||
for (const service of services) {
|
||||
const info = containers[service];
|
||||
if (info?.container && info?.host) {
|
||||
actions.push(cmd('service', `Shell: ${service}`, 'Open interactive shell',
|
||||
() => initExecTerminal(stack, info.container, info.host), icons.terminal));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add nav commands for all services from sidebar
|
||||
const services = [...document.querySelectorAll('#sidebar-services li[data-svc] a[href]')].map(a => {
|
||||
const name = a.getAttribute('href').replace('/service/', '');
|
||||
return cmd('nav', name, 'Go to service', nav(`/service/${name}`), icons.box);
|
||||
// Add nav commands for all stacks from sidebar
|
||||
const stacks = [...document.querySelectorAll('#sidebar-stacks li[data-stack] a[href]')].map(a => {
|
||||
const name = a.getAttribute('href').replace('/stack/', '');
|
||||
return cmd('nav', name, 'Go to stack', nav(`/stack/${name}`), icons.box);
|
||||
});
|
||||
|
||||
// Add theme commands with color swatches
|
||||
@@ -594,12 +643,25 @@ function playFabIntro() {
|
||||
cmd('theme', `theme: ${theme}`, theme === currentTheme ? '(current)' : 'Switch theme', setTheme(theme), themeSwatch(theme), theme)
|
||||
);
|
||||
|
||||
commands = [...actions, ...services, ...themeCommands];
|
||||
commands = [...actions, ...stacks, ...themeCommands];
|
||||
}
|
||||
|
||||
function filter() {
|
||||
const q = input.value.toLowerCase();
|
||||
filtered = commands.filter(c => c.name.toLowerCase().includes(q));
|
||||
// Fuzzy matching: all query words must match the START of a word in the command name
|
||||
// Examples: "r ba" matches "Restart: bazarr" but NOT "Logs: bazarr"
|
||||
const q = input.value.toLowerCase().trim();
|
||||
// Split query into words and strip non-alphanumeric chars
|
||||
const queryWords = q.split(/[^a-z0-9]+/).filter(w => w);
|
||||
|
||||
filtered = commands.filter(c => {
|
||||
const name = c.name.toLowerCase();
|
||||
// Split command name into words (split on non-alphanumeric)
|
||||
const nameWords = name.split(/[^a-z0-9]+/).filter(w => w);
|
||||
// Each query word must match the start of some word in the command name
|
||||
return queryWords.every(qw =>
|
||||
nameWords.some(nw => nw.startsWith(qw))
|
||||
);
|
||||
});
|
||||
selected = Math.max(0, Math.min(selected, filtered.length - 1));
|
||||
}
|
||||
|
||||
@@ -631,7 +693,7 @@ function playFabIntro() {
|
||||
input.value = initialFilter;
|
||||
filter();
|
||||
// If opening theme picker, select current theme
|
||||
if (initialFilter === 'theme:') {
|
||||
if (initialFilter.startsWith('theme:')) {
|
||||
const currentIdx = filtered.findIndex(c => c.themeId === originalTheme);
|
||||
if (currentIdx >= 0) selected = currentIdx;
|
||||
}
|
||||
@@ -746,19 +808,38 @@ function initKeyboardShortcuts() {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Update keyboard shortcut display based on OS
|
||||
* Replaces ⌘ with Ctrl on non-Mac platforms
|
||||
*/
|
||||
function updateShortcutKeys() {
|
||||
// Update elements with class 'shortcut-key' that contain ⌘
|
||||
document.querySelectorAll('.shortcut-key').forEach(el => {
|
||||
if (el.textContent === '⌘') {
|
||||
el.textContent = MOD_KEY;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize page components
|
||||
*/
|
||||
function initPage() {
|
||||
initMonacoEditors();
|
||||
initSaveButton();
|
||||
updateShortcutKeys();
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to reconnect to an active task from localStorage
|
||||
* @param {string} [path] - Optional path to use for task key lookup.
|
||||
* If not provided, uses current window.location.pathname.
|
||||
* This is important for HTMX navigation where pushState
|
||||
* hasn't happened yet when htmx:afterSwap fires.
|
||||
*/
|
||||
function tryReconnectToTask() {
|
||||
const taskId = localStorage.getItem(getTaskKey());
|
||||
function tryReconnectToTask(path) {
|
||||
const taskKey = TASK_KEY_PREFIX + (path || window.location.pathname);
|
||||
const taskId = localStorage.getItem(taskKey);
|
||||
if (!taskId) return;
|
||||
|
||||
whenXtermReady(() => {
|
||||
@@ -781,8 +862,12 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
document.body.addEventListener('htmx:afterSwap', function(evt) {
|
||||
if (evt.detail.target.id === 'main-content') {
|
||||
initPage();
|
||||
// Try to reconnect when navigating back to dashboard
|
||||
tryReconnectToTask();
|
||||
// Try to reconnect to task for the TARGET page, not current URL.
|
||||
// When using command palette navigation (htmx.ajax + manual pushState),
|
||||
// window.location.pathname still reflects the OLD page at this point.
|
||||
// Use pathInfo.requestPath to get the correct target path.
|
||||
const targetPath = evt.detail.pathInfo?.requestPath?.split('?')[0] || window.location.pathname;
|
||||
tryReconnectToTask(targetPath);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ from compose_farm.ssh_keys import get_ssh_auth_sock
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
# Environment variable to identify the web service (for self-update detection)
|
||||
CF_WEB_SERVICE = os.environ.get("CF_WEB_SERVICE", "")
|
||||
# Environment variable to identify the web stack (for self-update detection)
|
||||
CF_WEB_STACK = os.environ.get("CF_WEB_STACK", "")
|
||||
|
||||
# ANSI escape codes for terminal output
|
||||
RED = "\x1b[31m"
|
||||
@@ -95,13 +95,13 @@ async def run_cli_streaming(
|
||||
tasks[task_id]["completed_at"] = time.time()
|
||||
|
||||
|
||||
def _is_self_update(service: str, command: str) -> bool:
|
||||
"""Check if this is a self-update (updating the web service itself).
|
||||
def _is_self_update(stack: str, command: str) -> bool:
|
||||
"""Check if this is a self-update (updating the web stack itself).
|
||||
|
||||
Self-updates need special handling because running 'down' on the container
|
||||
we're running in would kill the process before 'up' can execute.
|
||||
"""
|
||||
if not CF_WEB_SERVICE or service != CF_WEB_SERVICE:
|
||||
if not CF_WEB_STACK or stack != CF_WEB_STACK:
|
||||
return False
|
||||
# Commands that involve 'down' need SSH: update, restart, down
|
||||
return command in ("update", "restart", "down")
|
||||
@@ -114,7 +114,7 @@ async def _run_cli_via_ssh(
|
||||
) -> None:
|
||||
"""Run a cf CLI command via SSH for self-updates (survives container restart)."""
|
||||
try:
|
||||
host = config.get_host(CF_WEB_SERVICE)
|
||||
host = config.get_host(CF_WEB_STACK)
|
||||
cf_cmd = f"cf {' '.join(args)} --config={config.config_path}"
|
||||
# Include task_id to prevent collision with concurrent updates
|
||||
log_file = f"/tmp/cf-self-update-{task_id}.log" # noqa: S108
|
||||
@@ -156,7 +156,7 @@ async def _run_cli_via_ssh(
|
||||
|
||||
async def run_compose_streaming(
|
||||
config: Config,
|
||||
service: str,
|
||||
stack: str,
|
||||
command: str,
|
||||
task_id: str,
|
||||
) -> None:
|
||||
@@ -167,10 +167,10 @@ async def run_compose_streaming(
|
||||
extra_args = args[1:] # -d, etc.
|
||||
|
||||
# Build CLI args
|
||||
cli_args = [cli_cmd, service, *extra_args]
|
||||
cli_args = [cli_cmd, stack, *extra_args]
|
||||
|
||||
# Use SSH for self-updates to survive container restart
|
||||
if _is_self_update(service, cli_cmd):
|
||||
if _is_self_update(stack, cli_cmd):
|
||||
await _run_cli_via_ssh(config, cli_args, task_id)
|
||||
else:
|
||||
await run_cli_streaming(config, cli_args, task_id)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{% from "partials/icons.html" import github, hamburger, palette %}
|
||||
{% from "partials/icons.html" import github, book_open, hamburger, palette %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" data-theme="dark">
|
||||
<head>
|
||||
@@ -39,7 +39,7 @@
|
||||
<span class="font-semibold rainbow-hover">Compose Farm</span>
|
||||
</header>
|
||||
|
||||
<main id="main-content" class="flex-1 p-6 overflow-y-auto">
|
||||
<main id="main-content" class="flex-1 p-6">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
@@ -51,6 +51,9 @@
|
||||
<header class="p-4 border-b border-base-300">
|
||||
<h2 class="text-lg font-semibold flex items-center gap-2">
|
||||
<span class="rainbow-hover">Compose Farm</span>
|
||||
<a href="https://compose-farm.nijho.lt/" target="_blank" title="Docs" class="opacity-50 hover:opacity-100 transition-opacity">
|
||||
{{ book_open() }}
|
||||
</a>
|
||||
<a href="https://github.com/basnijholt/compose-farm" target="_blank" title="GitHub" class="opacity-50 hover:opacity-100 transition-opacity">
|
||||
{{ github() }}
|
||||
</a>
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
<option value="{{ name }}">{{ name }}{% if name == local_host %} (local){% endif %}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
<button id="console-connect-btn" class="btn btn-sm btn-primary" onclick="connectConsole()">Connect</button>
|
||||
<div class="tooltip" data-tip="Connect to host via SSH"><button id="console-connect-btn" class="btn btn-sm btn-primary" onclick="connectConsole()">Connect</button></div>
|
||||
<span id="console-status" class="text-sm opacity-60"></span>
|
||||
</div>
|
||||
|
||||
@@ -29,11 +29,11 @@
|
||||
<div class="flex items-center justify-between mb-2">
|
||||
<div class="flex items-center gap-4">
|
||||
<input type="text" id="console-file-path" class="input input-sm input-bordered w-96" placeholder="Enter file path (e.g., ~/docker-compose.yaml)" value="{{ config_path }}">
|
||||
<button class="btn btn-sm btn-outline" onclick="loadFile()">Open</button>
|
||||
<div class="tooltip" data-tip="Load file from host"><button class="btn btn-sm btn-outline" onclick="loadFile()">Open</button></div>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<span id="editor-status" class="text-sm opacity-60"></span>
|
||||
<button id="console-save-btn" class="btn btn-sm btn-primary" onclick="saveFile()">{{ save() }} Save</button>
|
||||
<div class="tooltip" data-tip="Save file to host (⌘/Ctrl+S)"><button id="console-save-btn" class="btn btn-sm btn-primary" onclick="saveFile()">{{ save() }} Save</button></div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="console-editor" class="resize-y overflow-hidden rounded-lg" style="height: 512px; min-height: 200px;"></div>
|
||||
@@ -97,7 +97,10 @@ function connectConsole() {
|
||||
consoleWs.onopen = () => {
|
||||
statusEl.textContent = `Connected to ${host}`;
|
||||
sendSize(term.cols, term.rows);
|
||||
term.focus();
|
||||
// Focus terminal unless #editor hash is present (command palette Edit Config)
|
||||
if (window.location.hash !== '#editor') {
|
||||
term.focus();
|
||||
}
|
||||
// Auto-load the default file once editor is ready
|
||||
const pathInput = document.getElementById('console-file-path');
|
||||
if (pathInput && pathInput.value) {
|
||||
@@ -133,6 +136,14 @@ function initConsoleEditor() {
|
||||
|
||||
loadMonaco(() => {
|
||||
consoleEditor = createEditor(editorEl, '', 'plaintext', { onSave: saveFile });
|
||||
// Focus editor if #editor hash is present (command palette Edit Config)
|
||||
if (window.location.hash === '#editor') {
|
||||
// Small delay for Monaco to fully initialize before focusing
|
||||
setTimeout(() => {
|
||||
consoleEditor.focus();
|
||||
editorEl.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}, 100);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% extends "base.html" %}
|
||||
{% from "partials/components.html" import page_header, collapse, stat_card, table, action_btn %}
|
||||
{% from "partials/icons.html" import check, refresh_cw, save, settings, server, database %}
|
||||
{% from "partials/icons.html" import check, refresh_cw, save, settings, server, database, cloud_download, rotate_cw %}
|
||||
{% block title %}Dashboard - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
@@ -17,7 +17,9 @@
|
||||
<div class="flex flex-wrap gap-2 mb-6">
|
||||
{{ action_btn("Apply", "/api/apply", "primary", "Make reality match config", check()) }}
|
||||
{{ action_btn("Refresh", "/api/refresh", "outline", "Update state from reality", refresh_cw()) }}
|
||||
<button id="save-config-btn" class="btn btn-outline">{{ save() }} Save Config</button>
|
||||
{{ action_btn("Pull All", "/api/pull-all", "outline", "Pull latest images for all stacks", cloud_download()) }}
|
||||
{{ action_btn("Update All", "/api/update-all", "outline", "Update all stacks (pull + build + down + up)", rotate_cw()) }}
|
||||
<div class="tooltip" data-tip="Save compose-farm.yaml config file"><button id="save-config-btn" class="btn btn-outline">{{ save() }} Save Config</button></div>
|
||||
</div>
|
||||
|
||||
{% include "partials/terminal.html" %}
|
||||
@@ -45,10 +47,10 @@
|
||||
{% include "partials/pending.html" %}
|
||||
</div>
|
||||
|
||||
<!-- Services by Host -->
|
||||
<div id="services-by-host"
|
||||
hx-get="/partials/services-by-host" hx-trigger="cf:refresh from:body" hx-swap="innerHTML">
|
||||
{% include "partials/services_by_host.html" %}
|
||||
<!-- Stacks by Host -->
|
||||
<div id="stacks-by-host"
|
||||
hx-get="/partials/stacks-by-host" hx-trigger="cf:refresh from:body" hx-swap="innerHTML">
|
||||
{% include "partials/stacks_by_host.html" %}
|
||||
</div>
|
||||
|
||||
<!-- Hosts Configuration -->
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{% from "partials/icons.html" import search, play, square, rotate_cw, cloud_download, refresh_cw, file_text, check, home, terminal, box, palette %}
|
||||
{% from "partials/icons.html" import search, play, square, rotate_cw, cloud_download, refresh_cw, file_text, file_code, check, home, terminal, box, palette, book_open %}
|
||||
|
||||
<!-- Icons for command palette (referenced by JS) -->
|
||||
<template id="cmd-icons">
|
||||
@@ -13,6 +13,8 @@
|
||||
<span data-icon="terminal">{{ terminal() }}</span>
|
||||
<span data-icon="box">{{ box() }}</span>
|
||||
<span data-icon="palette">{{ palette() }}</span>
|
||||
<span data-icon="book_open">{{ book_open() }}</span>
|
||||
<span data-icon="file_code">{{ file_code() }}</span>
|
||||
</template>
|
||||
<dialog id="cmd-palette" class="modal">
|
||||
<div class="modal-box max-w-lg p-0">
|
||||
@@ -29,8 +31,8 @@
|
||||
</dialog>
|
||||
|
||||
<!-- Floating button to open command palette -->
|
||||
<button id="cmd-fab" class="fixed bottom-6 right-6 z-50" title="Command Palette (⌘K)">
|
||||
<button id="cmd-fab" class="fixed bottom-6 right-6 z-50" title="Command Palette (⌘/Ctrl+K)">
|
||||
<div class="cmd-fab-inner">
|
||||
<span>⌘ + K</span>
|
||||
<span class="shortcut-key">⌘</span><span class="shortcut-plus"> + </span><span class="shortcut-key">K</span>
|
||||
</div>
|
||||
</button>
|
||||
|
||||
@@ -25,12 +25,13 @@
|
||||
|
||||
{# Action button with htmx #}
|
||||
{% macro action_btn(label, url, style="outline", title=None, icon=None) %}
|
||||
{% if title %}<div class="tooltip" data-tip="{{ title }}">{% endif %}
|
||||
<button hx-post="{{ url }}"
|
||||
hx-swap="none"
|
||||
class="btn btn-{{ style }}"
|
||||
{% if title %}title="{{ title }}"{% endif %}>
|
||||
class="btn btn-{{ style }}">
|
||||
{% if icon %}{{ icon }}{% endif %}{{ label }}
|
||||
</button>
|
||||
{% if title %}</div>{% endif %}
|
||||
{% endmacro %}
|
||||
|
||||
{# Stat card for dashboard #}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{# Container list for a service on a single host #}
|
||||
{% from "partials/icons.html" import terminal %}
|
||||
{% macro container_row(service, container, host) %}
|
||||
{# Container list for a stack on a single host #}
|
||||
{% from "partials/icons.html" import terminal, rotate_ccw, scroll_text, square, play, cloud_download %}
|
||||
{% macro container_row(stack, container, host) %}
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
{% if container.State == "running" %}
|
||||
<span class="badge badge-success">running</span>
|
||||
@@ -18,18 +18,57 @@
|
||||
<span class="badge badge-warning">{{ container.State }}</span>
|
||||
{% endif %}
|
||||
<code class="text-sm flex-1">{{ container.Name }}</code>
|
||||
<button class="btn btn-sm btn-outline"
|
||||
onclick="initExecTerminal('{{ service }}', '{{ container.Name }}', '{{ host }}')">
|
||||
{{ terminal() }} Shell
|
||||
</button>
|
||||
<div class="join">
|
||||
<div class="tooltip tooltip-top" data-tip="View logs">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
hx-post="/api/stack/{{ stack }}/service/{{ container.Service }}/logs"
|
||||
hx-swap="none">
|
||||
{{ scroll_text() }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip tooltip-top" data-tip="Restart service">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
hx-post="/api/stack/{{ stack }}/service/{{ container.Service }}/restart"
|
||||
hx-swap="none">
|
||||
{{ rotate_ccw() }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip tooltip-top" data-tip="Pull image">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
hx-post="/api/stack/{{ stack }}/service/{{ container.Service }}/pull"
|
||||
hx-swap="none">
|
||||
{{ cloud_download() }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip tooltip-top" data-tip="Start service">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
hx-post="/api/stack/{{ stack }}/service/{{ container.Service }}/up"
|
||||
hx-swap="none">
|
||||
{{ play() }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip tooltip-top" data-tip="Stop service">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
hx-post="/api/stack/{{ stack }}/service/{{ container.Service }}/stop"
|
||||
hx-swap="none">
|
||||
{{ square() }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip tooltip-top" data-tip="Open shell">
|
||||
<button class="btn btn-sm btn-outline join-item"
|
||||
onclick="initExecTerminal('{{ stack }}', '{{ container.Name }}', '{{ host }}')">
|
||||
{{ terminal() }}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro host_containers(service, host, containers, show_header=False) %}
|
||||
{% macro host_containers(stack, host, containers, show_header=False) %}
|
||||
{% if show_header %}
|
||||
<div class="font-semibold text-sm mt-3 mb-1">{{ host }}</div>
|
||||
{% endif %}
|
||||
{% for container in containers %}
|
||||
{{ container_row(service, container, host) }}
|
||||
{{ container_row(stack, container, host) }}
|
||||
{% endfor %}
|
||||
{% endmacro %}
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
{# Lucide-style icons (https://lucide.dev) - 24x24 viewBox, 2px stroke, round caps #}
|
||||
|
||||
{# Brand icons #}
|
||||
{% macro book_open(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M12 7v14"/><path d="M3 18a1 1 0 0 1-1-1V4a1 1 0 0 1 1-1h5a4 4 0 0 1 4 4 4 4 0 0 1 4-4h5a1 1 0 0 1 1 1v13a1 1 0 0 1-1 1h-6a3 3 0 0 0-3 3 3 3 0 0 0-3-3z"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro github(size=16) %}
|
||||
<svg height="{{ size }}" width="{{ size }}" viewBox="0 0 16 16" fill="currentColor"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"/></svg>
|
||||
{% endmacro %}
|
||||
@@ -37,6 +43,18 @@
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro rotate_ccw(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M3 12a9 9 0 1 0 9-9 9.75 9.75 0 0 0-6.74 2.74L3 8"/><path d="M3 3v5h5"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro scroll_text(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M15 12h-5"/><path d="M15 8h-5"/><path d="M19 17V5a2 2 0 0 0-2-2H4"/><path d="M8 21h12a2 2 0 0 0 2-2v-1a1 1 0 0 0-1-1H11a1 1 0 0 0-1 1v1a2 2 0 1 1-4 0V5a2 2 0 1 0-4 0v2a1 1 0 0 0 1 1h3"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro download(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"/><polyline points="7 10 12 15 17 10"/><line x1="12" x2="12" y1="15" y2="3"/>
|
||||
|
||||
@@ -3,28 +3,28 @@
|
||||
{% if orphaned or migrations or not_started %}
|
||||
{% call collapse("Pending Operations", id="pending-collapse", checked=expanded|default(true)) %}
|
||||
{% if orphaned %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Orphaned Services (will be stopped)</h4>
|
||||
<h4 class="font-semibold mt-2 mb-1">Orphaned Stacks (will be stopped)</h4>
|
||||
<ul class="list-disc list-inside mb-4">
|
||||
{% for svc, host in orphaned.items() %}
|
||||
<li><a href="/service/{{ svc }}" class="badge badge-warning hover:badge-primary">{{ svc }}</a> on {{ host }}</li>
|
||||
<li><a href="/stack/{{ svc }}" class="badge badge-warning hover:badge-primary">{{ svc }}</a> on {{ host }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
|
||||
{% if migrations %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Services Needing Migration</h4>
|
||||
<h4 class="font-semibold mt-2 mb-1">Stacks Needing Migration</h4>
|
||||
<ul class="list-disc list-inside mb-4">
|
||||
{% for svc in migrations %}
|
||||
<li><a href="/service/{{ svc }}" class="badge badge-info hover:badge-primary">{{ svc }}</a></li>
|
||||
<li><a href="/stack/{{ svc }}" class="badge badge-info hover:badge-primary">{{ svc }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
|
||||
{% if not_started %}
|
||||
<h4 class="font-semibold mt-2 mb-1">Services Not Started</h4>
|
||||
<h4 class="font-semibold mt-2 mb-1">Stacks Not Started</h4>
|
||||
<ul class="menu menu-horizontal bg-base-200 rounded-box mb-2">
|
||||
{% for svc in not_started | sort %}
|
||||
<li><a href="/service/{{ svc }}">{{ svc }}</a></li>
|
||||
<li><a href="/stack/{{ svc }}">{{ svc }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
@@ -32,6 +32,6 @@
|
||||
{% else %}
|
||||
<div role="alert" class="alert alert-success mb-4">
|
||||
<span class="shrink-0">{{ circle_check(24) }}</span>
|
||||
<span>All services are in sync with configuration.</span>
|
||||
<span>All stacks are in sync with configuration.</span>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<!-- Services Section -->
|
||||
<!-- Stacks Section -->
|
||||
<div class="mb-4">
|
||||
<h4 class="text-xs uppercase tracking-wide text-base-content/60 px-3 py-1">Services <span class="opacity-50" id="sidebar-count">({{ services | length }})</span></h4>
|
||||
<h4 class="text-xs uppercase tracking-wide text-base-content/60 px-3 py-1">Stacks <span class="opacity-50" id="sidebar-count">({{ stacks | length }})</span></h4>
|
||||
<div class="px-2 mb-2 flex flex-col gap-1">
|
||||
<label class="input input-xs flex items-center gap-2 bg-base-200">
|
||||
{{ search(14) }}<input type="text" id="sidebar-filter" placeholder="Filter..." onkeyup="sidebarFilter()" />
|
||||
@@ -19,13 +19,13 @@
|
||||
{% for h in hosts %}<option value="{{ h }}">{{ h }}{% if h == local_host %} (local){% endif %}</option>{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
<ul class="menu menu-sm" id="sidebar-services" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
{% for service in services %}
|
||||
<li data-svc="{{ service | lower }}" data-h="{{ service_hosts.get(service, '') }}">
|
||||
<a href="/service/{{ service }}" class="flex items-center gap-2">
|
||||
{% if service in state %}<span class="status status-success" title="In state file"></span>
|
||||
<ul class="menu menu-sm" id="sidebar-stacks" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
{% for stack in stacks %}
|
||||
<li data-stack="{{ stack | lower }}" data-h="{{ stack_hosts.get(stack, '') }}">
|
||||
<a href="/stack/{{ stack }}" class="flex items-center gap-2">
|
||||
{% if stack in state %}<span class="status status-success" title="In state file"></span>
|
||||
{% else %}<span class="status status-neutral" title="Not in state file"></span>{% endif %}
|
||||
{{ service }}
|
||||
{{ stack }}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
{% from "partials/components.html" import collapse %}
|
||||
{% from "partials/icons.html" import layers, search %}
|
||||
{% call collapse("Services by Host", id="services-by-host-collapse", checked=expanded|default(true), icon=layers()) %}
|
||||
{% call collapse("Stacks by Host", id="stacks-by-host-collapse", checked=expanded|default(true), icon=layers()) %}
|
||||
<div class="flex flex-wrap gap-2 mb-4 items-center">
|
||||
<label class="input input-sm input-bordered flex items-center gap-2 bg-base-200">
|
||||
{{ search() }}<input type="text" id="sbh-filter" class="w-32" placeholder="Filter..." onkeyup="sbhFilter()" />
|
||||
</label>
|
||||
<select id="sbh-host-select" class="select select-sm select-bordered bg-base-200" onchange="sbhFilter()">
|
||||
<option value="">All hosts</option>
|
||||
{% for h in services_by_host.keys() | sort %}<option value="{{ h }}">{{ h }}</option>{% endfor %}
|
||||
{% for h in stacks_by_host.keys() | sort %}<option value="{{ h }}">{{ h }}</option>{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
{% for host_name, host_services in services_by_host.items() | sort %}
|
||||
{% for host_name, host_stacks in stacks_by_host.items() | sort %}
|
||||
<div class="sbh-group" data-h="{{ host_name }}">
|
||||
<h4 class="font-semibold mt-3 mb-1">{{ host_name }}{% if host_name in hosts %}<code class="text-xs ml-2 opacity-60">{{ hosts[host_name].address }}</code>{% endif %}</h4>
|
||||
<ul class="menu menu-horizontal bg-base-200 rounded-box mb-2 flex-wrap">
|
||||
{% for svc in host_services | sort %}<li data-s="{{ svc | lower }}"><a href="/service/{{ svc }}">{{ svc }}</a></li>{% endfor %}
|
||||
{% for stack in host_stacks | sort %}<li data-s="{{ stack | lower }}"><a href="/stack/{{ stack }}">{{ stack }}</a></li>{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{% else %}
|
||||
<p class="text-base-content/60 italic">No services currently running.</p>
|
||||
<p class="text-base-content/60 italic">No stacks currently running.</p>
|
||||
{% endfor %}
|
||||
<script>
|
||||
function sbhFilter() {
|
||||
@@ -1,6 +1,6 @@
|
||||
{% from "partials/components.html" import stat_card %}
|
||||
{% from "partials/icons.html" import server, layers, circle_check, circle_x %}
|
||||
{{ stat_card("Hosts", hosts | length, icon=server()) }}
|
||||
{{ stat_card("Services", services | length, icon=layers()) }}
|
||||
{{ stat_card("Stacks", stacks | length, icon=layers()) }}
|
||||
{{ stat_card("Running", running_count, "success", circle_check()) }}
|
||||
{{ stat_card("Stopped", stopped_count, icon=circle_x()) }}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
{% block title %}{{ name }} - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="max-w-5xl">
|
||||
<div class="max-w-5xl" data-services="{{ services | join(',') }}" data-containers='{{ containers | tojson }}'>
|
||||
<div class="mb-6">
|
||||
<h1 class="text-3xl font-bold rainbow-hover">{{ name }}</h1>
|
||||
<div class="flex flex-wrap items-center gap-2 mt-2">
|
||||
@@ -20,28 +20,28 @@
|
||||
<!-- Action Buttons -->
|
||||
<div class="flex flex-wrap gap-2 mb-6">
|
||||
<!-- Lifecycle -->
|
||||
{{ action_btn("Up", "/api/service/" ~ name ~ "/up", "primary", "Start service (docker compose up -d)", play()) }}
|
||||
{{ action_btn("Down", "/api/service/" ~ name ~ "/down", "outline", "Stop service (docker compose down)", square()) }}
|
||||
{{ action_btn("Restart", "/api/service/" ~ name ~ "/restart", "secondary", "Restart service (down + up)", rotate_cw()) }}
|
||||
{{ action_btn("Update", "/api/service/" ~ name ~ "/update", "accent", "Update to latest (pull + build + down + up)", download()) }}
|
||||
{{ action_btn("Up", "/api/stack/" ~ name ~ "/up", "primary", "Start stack (docker compose up -d)", play()) }}
|
||||
{{ action_btn("Down", "/api/stack/" ~ name ~ "/down", "outline", "Stop stack (docker compose down)", square()) }}
|
||||
{{ action_btn("Restart", "/api/stack/" ~ name ~ "/restart", "secondary", "Restart stack (down + up)", rotate_cw()) }}
|
||||
{{ action_btn("Update", "/api/stack/" ~ name ~ "/update", "accent", "Update to latest (pull + build + down + up)", download()) }}
|
||||
|
||||
<div class="divider divider-horizontal mx-0"></div>
|
||||
|
||||
<!-- Other -->
|
||||
{{ action_btn("Pull", "/api/service/" ~ name ~ "/pull", "outline", "Pull latest images (no restart)", cloud_download()) }}
|
||||
{{ action_btn("Logs", "/api/service/" ~ name ~ "/logs", "outline", "Show recent logs", file_text()) }}
|
||||
<button id="save-btn" class="btn btn-outline">{{ save() }} Save All</button>
|
||||
{{ action_btn("Pull", "/api/stack/" ~ name ~ "/pull", "outline", "Pull latest images (no restart)", cloud_download()) }}
|
||||
{{ action_btn("Logs", "/api/stack/" ~ name ~ "/logs", "outline", "Show recent logs", file_text()) }}
|
||||
<div class="tooltip" data-tip="Save compose and .env files"><button id="save-btn" class="btn btn-outline">{{ save() }} Save All</button></div>
|
||||
</div>
|
||||
|
||||
{% call collapse("Compose File", badge=compose_path, icon=file_code()) %}
|
||||
<div class="editor-wrapper yaml-wrapper">
|
||||
<div id="compose-editor" class="yaml-editor" data-content="{{ compose_content | e }}" data-save-url="/api/service/{{ name }}/compose"></div>
|
||||
<div id="compose-editor" class="yaml-editor" data-content="{{ compose_content | e }}" data-save-url="/api/stack/{{ name }}/compose"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
{% call collapse(".env File", badge=env_path, icon=settings()) %}
|
||||
<div class="editor-wrapper env-wrapper">
|
||||
<div id="env-editor" class="env-editor" data-content="{{ env_content | e }}" data-save-url="/api/service/{{ name }}/env"></div>
|
||||
<div id="env-editor" class="env-editor" data-content="{{ env_content | e }}" data-save-url="/api/stack/{{ name }}/env"></div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
{% if current_host %}
|
||||
{% call collapse("Container Shell", id="exec-collapse", checked=True, icon=terminal()) %}
|
||||
<div id="containers-list" class="mb-4"
|
||||
hx-get="/api/service/{{ name }}/containers"
|
||||
hx-get="/api/stack/{{ name }}/containers"
|
||||
hx-trigger="load"
|
||||
hx-target="this"
|
||||
hx-select="unset"
|
||||
@@ -6,6 +6,7 @@ import asyncio
|
||||
import contextlib
|
||||
import fcntl
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pty
|
||||
import shlex
|
||||
@@ -21,6 +22,8 @@ from compose_farm.executor import is_local, ssh_connect_kwargs
|
||||
from compose_farm.web.deps import get_config
|
||||
from compose_farm.web.streaming import CRLF, DIM, GREEN, RED, RESET, tasks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Shell command to prefer bash over sh
|
||||
SHELL_FALLBACK = "command -v bash >/dev/null && exec bash || exec sh"
|
||||
|
||||
@@ -197,10 +200,10 @@ async def _run_exec_session(
|
||||
await _run_remote_exec(websocket, host, exec_cmd)
|
||||
|
||||
|
||||
@router.websocket("/ws/exec/{service}/{container}/{host}")
|
||||
@router.websocket("/ws/exec/{stack}/{container}/{host}")
|
||||
async def exec_websocket(
|
||||
websocket: WebSocket,
|
||||
service: str, # noqa: ARG001
|
||||
stack: str, # noqa: ARG001
|
||||
container: str,
|
||||
host: str,
|
||||
) -> None:
|
||||
@@ -214,6 +217,7 @@ async def exec_websocket(
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.exception("WebSocket exec error for %s on %s", container, host)
|
||||
with contextlib.suppress(Exception):
|
||||
await websocket.send_text(f"{RED}Error: {e}{RESET}{CRLF}")
|
||||
finally:
|
||||
@@ -232,8 +236,8 @@ async def _run_shell_session(
|
||||
await websocket.send_text(f"{RED}Host '{host_name}' not found{RESET}{CRLF}")
|
||||
return
|
||||
|
||||
# Start interactive shell in home directory (avoid login shell to prevent job control warnings)
|
||||
shell_cmd = "cd ~ && exec bash -i 2>/dev/null || exec sh -i"
|
||||
# Start interactive shell in home directory
|
||||
shell_cmd = "cd ~ && exec bash -i || exec sh -i"
|
||||
|
||||
if is_local(host):
|
||||
# Local: use argv list with shell -c to interpret the command
|
||||
@@ -258,6 +262,7 @@ async def shell_websocket(
|
||||
except WebSocketDisconnect:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.exception("WebSocket shell error for host %s", host)
|
||||
with contextlib.suppress(Exception):
|
||||
await websocket.send_text(f"{RED}Error: {e}{RESET}{CRLF}")
|
||||
finally:
|
||||
|
||||
@@ -11,16 +11,18 @@ from compose_farm.config import Config, Host
|
||||
from compose_farm.executor import CommandResult
|
||||
|
||||
|
||||
def _make_config(tmp_path: Path, services: dict[str, str] | None = None) -> Config:
|
||||
def _make_config(tmp_path: Path, stacks: dict[str, str] | None = None) -> Config:
|
||||
"""Create a minimal config for testing."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
compose_dir.mkdir()
|
||||
|
||||
svc_dict = services or {"svc1": "host1", "svc2": "host2"}
|
||||
for svc in svc_dict:
|
||||
svc_dir = compose_dir / svc
|
||||
svc_dir.mkdir()
|
||||
(svc_dir / "docker-compose.yml").write_text("services: {}\n")
|
||||
stack_dict: dict[str, str | list[str]] = (
|
||||
dict(stacks) if stacks else {"svc1": "host1", "svc2": "host2"}
|
||||
)
|
||||
for stack in stack_dict:
|
||||
stack_dir = compose_dir / stack
|
||||
stack_dir.mkdir()
|
||||
(stack_dir / "docker-compose.yml").write_text("services: {}\n")
|
||||
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
@@ -28,15 +30,15 @@ def _make_config(tmp_path: Path, services: dict[str, str] | None = None) -> Conf
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={"host1": Host(address="localhost"), "host2": Host(address="localhost")},
|
||||
services=svc_dict,
|
||||
stacks=stack_dict,
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
|
||||
def _make_result(service: str, success: bool = True) -> CommandResult:
|
||||
def _make_result(stack: str, success: bool = True) -> CommandResult:
|
||||
"""Create a command result."""
|
||||
return CommandResult(
|
||||
service=service,
|
||||
stack=stack,
|
||||
exit_code=0 if success else 1,
|
||||
success=success,
|
||||
stdout="",
|
||||
@@ -48,14 +50,14 @@ class TestApplyCommand:
|
||||
"""Tests for the apply command."""
|
||||
|
||||
def test_apply_nothing_to_do(self, tmp_path: Path, capsys: pytest.CaptureFixture[str]) -> None:
|
||||
"""When no migrations, orphans, or missing services, prints success message."""
|
||||
"""When no migrations, orphans, or missing stacks, prints success message."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
|
||||
@@ -71,24 +73,24 @@ class TestApplyCommand:
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
"compose_farm.cli.lifecycle.get_orphaned_stacks",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
"compose_farm.cli.lifecycle.get_stacks_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stack_host", return_value="host1"),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_stacks") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to migrate" in captured.out
|
||||
assert "Stacks to migrate" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "Orphaned services to stop" in captured.out
|
||||
assert "Orphaned stacks to stop" in captured.out
|
||||
assert "old-svc" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
@@ -97,24 +99,24 @@ class TestApplyCommand:
|
||||
mock_up.assert_not_called()
|
||||
|
||||
def test_apply_executes_migrations(self, tmp_path: Path) -> None:
|
||||
"""Apply runs migrations when services need migration."""
|
||||
"""Apply runs migrations when stacks need migration."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
"compose_farm.cli.lifecycle.get_stacks_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stack_host", return_value="host1"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
@@ -122,26 +124,26 @@ class TestApplyCommand:
|
||||
|
||||
mock_up.assert_called_once()
|
||||
call_args = mock_up.call_args
|
||||
assert call_args[0][1] == ["svc1"] # services list
|
||||
assert call_args[0][1] == ["svc1"] # stacks list
|
||||
|
||||
def test_apply_executes_orphan_cleanup(self, tmp_path: Path) -> None:
|
||||
"""Apply stops orphaned services."""
|
||||
"""Apply stops orphaned stacks."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("old-svc@host1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
"compose_farm.cli.lifecycle.get_orphaned_stacks",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_stacks") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=False, config=None)
|
||||
@@ -158,21 +160,21 @@ class TestApplyCommand:
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
"compose_farm.cli.lifecycle.get_orphaned_stacks",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
"compose_farm.cli.lifecycle.get_stacks_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host1"),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stack_host", return_value="host1"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_stacks") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
@@ -195,35 +197,35 @@ class TestApplyCommand:
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
"compose_farm.cli.lifecycle.get_orphaned_stacks",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=True, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Nothing to apply" in captured.out
|
||||
|
||||
def test_apply_starts_missing_services(self, tmp_path: Path) -> None:
|
||||
"""Apply starts services that are in config but not in state."""
|
||||
def test_apply_starts_missing_stacks(self, tmp_path: Path) -> None:
|
||||
"""Apply starts stacks that are in config but not in state."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
"compose_farm.cli.lifecycle.get_stacks_not_in_state",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
@@ -233,43 +235,43 @@ class TestApplyCommand:
|
||||
call_args = mock_up.call_args
|
||||
assert call_args[0][1] == ["svc1"]
|
||||
|
||||
def test_apply_dry_run_shows_missing_services(
|
||||
def test_apply_dry_run_shows_missing_stacks(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""Dry run shows services that would be started."""
|
||||
"""Dry run shows stacks that would be started."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
"compose_farm.cli.lifecycle.get_stacks_not_in_state",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=False, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to start" in captured.out
|
||||
assert "Stacks to start" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
def test_apply_full_refreshes_all_services(self, tmp_path: Path) -> None:
|
||||
"""--full runs up on all services to pick up config changes."""
|
||||
def test_apply_full_refreshes_all_stacks(self, tmp_path: Path) -> None:
|
||||
"""--full runs up on all stacks to pick up config changes."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("svc1"), _make_result("svc2")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
@@ -277,57 +279,57 @@ class TestApplyCommand:
|
||||
|
||||
mock_up.assert_called_once()
|
||||
call_args = mock_up.call_args
|
||||
# Should refresh all services in config
|
||||
# Should refresh all stacks in config
|
||||
assert set(call_args[0][1]) == {"svc1", "svc2"}
|
||||
|
||||
def test_apply_full_dry_run_shows_refresh(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--full --dry-run shows services that would be refreshed."""
|
||||
"""--full --dry-run shows stacks that would be refreshed."""
|
||||
cfg = _make_config(tmp_path)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_services_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_services_not_in_state", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_needing_migration", return_value=[]),
|
||||
patch("compose_farm.cli.lifecycle.get_stacks_not_in_state", return_value=[]),
|
||||
):
|
||||
apply(dry_run=True, no_orphans=False, full=True, config=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "Services to refresh" in captured.out
|
||||
assert "Stacks to refresh" in captured.out
|
||||
assert "svc1" in captured.out
|
||||
assert "svc2" in captured.out
|
||||
assert "dry-run" in captured.out
|
||||
|
||||
def test_apply_full_excludes_already_handled_services(self, tmp_path: Path) -> None:
|
||||
"""--full doesn't double-process services that are migrating or starting."""
|
||||
def test_apply_full_excludes_already_handled_stacks(self, tmp_path: Path) -> None:
|
||||
"""--full doesn't double-process stacks that are migrating or starting."""
|
||||
cfg = _make_config(tmp_path, {"svc1": "host1", "svc2": "host2", "svc3": "host1"})
|
||||
mock_results = [_make_result("svc1"), _make_result("svc3")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_needing_migration",
|
||||
"compose_farm.cli.lifecycle.get_stacks_needing_migration",
|
||||
return_value=["svc1"],
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_services_not_in_state",
|
||||
"compose_farm.cli.lifecycle.get_stacks_not_in_state",
|
||||
return_value=["svc2"],
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.get_service_host", return_value="host2"),
|
||||
patch("compose_farm.cli.lifecycle.get_stack_host", return_value="host2"),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.up_services") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.up_stacks") as mock_up,
|
||||
patch("compose_farm.cli.lifecycle.maybe_regenerate_traefik"),
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
apply(dry_run=False, no_orphans=False, full=True, config=None)
|
||||
|
||||
# up_services should be called 3 times: migrate, start, refresh
|
||||
# up_stacks should be called 3 times: migrate, start, refresh
|
||||
assert mock_up.call_count == 3
|
||||
# Get the third call (refresh) and check it only has svc3
|
||||
refresh_call = mock_up.call_args_list[2]
|
||||
@@ -345,40 +347,40 @@ class TestDownOrphaned:
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_services", return_value={}),
|
||||
patch("compose_farm.cli.lifecycle.get_orphaned_stacks", return_value={}),
|
||||
):
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
stacks=None,
|
||||
all_stacks=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "No orphaned services to stop" in captured.out
|
||||
assert "No orphaned stacks to stop" in captured.out
|
||||
|
||||
def test_down_orphaned_stops_services(self, tmp_path: Path) -> None:
|
||||
"""--orphaned stops orphaned services."""
|
||||
def test_down_orphaned_stops_stacks(self, tmp_path: Path) -> None:
|
||||
"""--orphaned stops orphaned stacks."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_results = [_make_result("old-svc@host1")]
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.lifecycle.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.get_orphaned_services",
|
||||
"compose_farm.cli.lifecycle.get_orphaned_stacks",
|
||||
return_value={"old-svc": "host1"},
|
||||
),
|
||||
patch(
|
||||
"compose_farm.cli.lifecycle.run_async",
|
||||
return_value=mock_results,
|
||||
),
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_services") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.stop_orphaned_stacks") as mock_stop,
|
||||
patch("compose_farm.cli.lifecycle.report_results"),
|
||||
):
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
stacks=None,
|
||||
all_stacks=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
@@ -386,12 +388,12 @@ class TestDownOrphaned:
|
||||
|
||||
mock_stop.assert_called_once_with(cfg)
|
||||
|
||||
def test_down_orphaned_with_services_errors(self) -> None:
|
||||
"""--orphaned cannot be combined with service arguments."""
|
||||
def test_down_orphaned_with_stacks_errors(self) -> None:
|
||||
"""--orphaned cannot be combined with stack arguments."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
stacks=["svc1"],
|
||||
all_stacks=False,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
@@ -403,8 +405,8 @@ class TestDownOrphaned:
|
||||
"""--orphaned cannot be combined with --all."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=None,
|
||||
all_services=True,
|
||||
stacks=None,
|
||||
all_stacks=True,
|
||||
orphaned=True,
|
||||
host=None,
|
||||
config=None,
|
||||
@@ -416,8 +418,8 @@ class TestDownOrphaned:
|
||||
"""--orphaned cannot be combined with --host."""
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
down(
|
||||
services=None,
|
||||
all_services=False,
|
||||
stacks=None,
|
||||
all_stacks=False,
|
||||
orphaned=True,
|
||||
host="host1",
|
||||
config=None,
|
||||
|
||||
@@ -25,20 +25,20 @@ def _make_config(tmp_path: Path) -> Config:
|
||||
return Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={"local": Host(address="localhost"), "remote": Host(address="192.168.1.10")},
|
||||
services={"svc1": "local", "svc2": "local", "svc3": "remote"},
|
||||
stacks={"svc1": "local", "svc2": "local", "svc3": "remote"},
|
||||
)
|
||||
|
||||
|
||||
def _make_result(service: str) -> CommandResult:
|
||||
def _make_result(stack: str) -> CommandResult:
|
||||
"""Create a successful command result."""
|
||||
return CommandResult(service=service, exit_code=0, success=True, stdout="", stderr="")
|
||||
return CommandResult(stack=stack, exit_code=0, success=True, stdout="", stderr="")
|
||||
|
||||
|
||||
def _mock_run_async_factory(
|
||||
services: list[str],
|
||||
stacks: list[str],
|
||||
) -> tuple[Any, list[CommandResult]]:
|
||||
"""Create a mock run_async that returns results for given services."""
|
||||
results = [_make_result(s) for s in services]
|
||||
"""Create a mock run_async that returns results for given stacks."""
|
||||
results = [_make_result(s) for s in stacks]
|
||||
|
||||
def mock_run_async(_coro: Coroutine[Any, Any, Any]) -> list[CommandResult]:
|
||||
return results
|
||||
@@ -49,7 +49,7 @@ def _mock_run_async_factory(
|
||||
class TestLogsContextualDefault:
|
||||
"""Tests for logs --tail contextual default behavior."""
|
||||
|
||||
def test_logs_all_services_defaults_to_20(self, tmp_path: Path) -> None:
|
||||
def test_logs_all_stacks_defaults_to_20(self, tmp_path: Path) -> None:
|
||||
"""When --all is specified, default tail should be 20."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
|
||||
@@ -58,18 +58,18 @@ class TestLogsContextualDefault:
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
mock_run.return_value = None
|
||||
|
||||
logs(services=None, all_services=True, host=None, follow=False, tail=None, config=None)
|
||||
logs(stacks=None, all_stacks=True, host=None, follow=False, tail=None, config=None)
|
||||
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
assert call_args[0][2] == "logs --tail 20"
|
||||
|
||||
def test_logs_single_service_defaults_to_100(self, tmp_path: Path) -> None:
|
||||
"""When specific services are specified, default tail should be 100."""
|
||||
def test_logs_single_stack_defaults_to_100(self, tmp_path: Path) -> None:
|
||||
"""When specific stacks are specified, default tail should be 100."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1"])
|
||||
|
||||
@@ -77,11 +77,11 @@ class TestLogsContextualDefault:
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
stacks=["svc1"],
|
||||
all_stacks=False,
|
||||
host=None,
|
||||
follow=False,
|
||||
tail=None,
|
||||
@@ -101,11 +101,11 @@ class TestLogsContextualDefault:
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=True,
|
||||
stacks=None,
|
||||
all_stacks=True,
|
||||
host=None,
|
||||
follow=False,
|
||||
tail=50,
|
||||
@@ -125,11 +125,11 @@ class TestLogsContextualDefault:
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=["svc1"],
|
||||
all_services=False,
|
||||
stacks=["svc1"],
|
||||
all_stacks=False,
|
||||
host=None,
|
||||
follow=True,
|
||||
tail=None,
|
||||
@@ -144,19 +144,19 @@ class TestLogsContextualDefault:
|
||||
class TestLogsHostFilter:
|
||||
"""Tests for logs --host filter behavior."""
|
||||
|
||||
def test_logs_host_filter_selects_services_on_host(self, tmp_path: Path) -> None:
|
||||
"""When --host is specified, only services on that host are included."""
|
||||
def test_logs_host_filter_selects_stacks_on_host(self, tmp_path: Path) -> None:
|
||||
"""When --host is specified, only stacks on that host are included."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=False,
|
||||
stacks=None,
|
||||
all_stacks=False,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
@@ -169,18 +169,18 @@ class TestLogsHostFilter:
|
||||
assert set(call_args[0][1]) == {"svc1", "svc2"}
|
||||
|
||||
def test_logs_host_filter_defaults_to_20_lines(self, tmp_path: Path) -> None:
|
||||
"""When --host is specified, default tail should be 20 (multiple services)."""
|
||||
"""When --host is specified, default tail should be 20 (multiple stacks)."""
|
||||
cfg = _make_config(tmp_path)
|
||||
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async),
|
||||
patch("compose_farm.cli.monitoring.run_on_services") as mock_run,
|
||||
patch("compose_farm.cli.monitoring.run_on_stacks") as mock_run,
|
||||
):
|
||||
logs(
|
||||
services=None,
|
||||
all_services=False,
|
||||
stacks=None,
|
||||
all_stacks=False,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
@@ -196,8 +196,8 @@ class TestLogsHostFilter:
|
||||
# No config mock needed - error is raised before config is loaded
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
logs(
|
||||
services=None,
|
||||
all_services=True,
|
||||
stacks=None,
|
||||
all_stacks=True,
|
||||
host="local",
|
||||
follow=False,
|
||||
tail=None,
|
||||
|
||||
@@ -53,7 +53,7 @@ class TestSshStatus:
|
||||
hosts:
|
||||
local:
|
||||
address: localhost
|
||||
services:
|
||||
stacks:
|
||||
test: local
|
||||
""")
|
||||
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
hosts:
|
||||
local:
|
||||
address: localhost
|
||||
services:
|
||||
stacks:
|
||||
test: local
|
||||
""")
|
||||
|
||||
@@ -92,7 +92,7 @@ class TestSshSetup:
|
||||
hosts:
|
||||
local:
|
||||
address: localhost
|
||||
services:
|
||||
stacks:
|
||||
test: local
|
||||
""")
|
||||
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
# Thresholds in seconds, per OS
|
||||
if sys.platform == "win32":
|
||||
CLI_STARTUP_THRESHOLD = 2.0
|
||||
@@ -16,6 +19,10 @@ else: # Linux
|
||||
CLI_STARTUP_THRESHOLD = 0.25
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"PYTEST_XDIST_WORKER" in os.environ,
|
||||
reason="Skip in parallel mode due to resource contention",
|
||||
)
|
||||
def test_cli_startup_time() -> None:
|
||||
"""Verify CLI startup time stays within acceptable bounds.
|
||||
|
||||
|
||||
60
tests/test_compose.py
Normal file
60
tests/test_compose.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Tests for compose file parsing utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from compose_farm.compose import get_container_name
|
||||
|
||||
|
||||
class TestGetContainerName:
|
||||
"""Test get_container_name helper function."""
|
||||
|
||||
def test_explicit_container_name(self) -> None:
|
||||
"""Uses container_name from service definition when set."""
|
||||
service_def = {"image": "nginx", "container_name": "my-custom-name"}
|
||||
result = get_container_name("web", service_def, "myproject")
|
||||
assert result == "my-custom-name"
|
||||
|
||||
def test_default_naming_pattern(self) -> None:
|
||||
"""Falls back to {project}-{service}-1 pattern."""
|
||||
service_def = {"image": "nginx"}
|
||||
result = get_container_name("web", service_def, "myproject")
|
||||
assert result == "myproject-web-1"
|
||||
|
||||
def test_none_service_def(self) -> None:
|
||||
"""Handles None service definition gracefully."""
|
||||
result = get_container_name("web", None, "myproject")
|
||||
assert result == "myproject-web-1"
|
||||
|
||||
def test_empty_service_def(self) -> None:
|
||||
"""Handles empty service definition."""
|
||||
result = get_container_name("web", {}, "myproject")
|
||||
assert result == "myproject-web-1"
|
||||
|
||||
def test_container_name_none_value(self) -> None:
|
||||
"""Handles container_name set to None."""
|
||||
service_def = {"image": "nginx", "container_name": None}
|
||||
result = get_container_name("web", service_def, "myproject")
|
||||
assert result == "myproject-web-1"
|
||||
|
||||
def test_container_name_empty_string(self) -> None:
|
||||
"""Handles container_name set to empty string."""
|
||||
service_def = {"image": "nginx", "container_name": ""}
|
||||
result = get_container_name("web", service_def, "myproject")
|
||||
assert result == "myproject-web-1"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("service_name", "project_name", "expected"),
|
||||
[
|
||||
("redis", "plex", "plex-redis-1"),
|
||||
("plex-server", "media", "media-plex-server-1"),
|
||||
("db", "my-app", "my-app-db-1"),
|
||||
],
|
||||
)
|
||||
def test_various_naming_combinations(
|
||||
self, service_name: str, project_name: str, expected: str
|
||||
) -> None:
|
||||
"""Test various service/project name combinations."""
|
||||
result = get_container_name(service_name, {"image": "test"}, project_name)
|
||||
assert result == expected
|
||||
@@ -36,43 +36,43 @@ class TestConfig:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01"},
|
||||
stacks={"plex": "nas01"},
|
||||
)
|
||||
assert config.compose_dir == Path("/opt/compose")
|
||||
assert "nas01" in config.hosts
|
||||
assert config.services["plex"] == "nas01"
|
||||
assert config.stacks["plex"] == "nas01"
|
||||
|
||||
def test_config_invalid_service_host(self) -> None:
|
||||
def test_config_invalid_stack_host(self) -> None:
|
||||
with pytest.raises(ValueError, match="unknown host"):
|
||||
Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nonexistent"},
|
||||
stacks={"plex": "nonexistent"},
|
||||
)
|
||||
|
||||
def test_get_host(self) -> None:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01"},
|
||||
stacks={"plex": "nas01"},
|
||||
)
|
||||
host = config.get_host("plex")
|
||||
assert host.address == "192.168.1.10"
|
||||
|
||||
def test_get_host_unknown_service(self) -> None:
|
||||
def test_get_host_unknown_stack(self) -> None:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01"},
|
||||
stacks={"plex": "nas01"},
|
||||
)
|
||||
with pytest.raises(ValueError, match="Unknown service"):
|
||||
with pytest.raises(ValueError, match="Unknown stack"):
|
||||
config.get_host("unknown")
|
||||
|
||||
def test_get_compose_path(self) -> None:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"plex": "nas01"},
|
||||
stacks={"plex": "nas01"},
|
||||
)
|
||||
path = config.get_compose_path("plex")
|
||||
# Defaults to compose.yaml when no file exists
|
||||
@@ -88,7 +88,7 @@ class TestLoadConfig:
|
||||
"hosts": {
|
||||
"nas01": {"address": "192.168.1.10", "user": "docker", "port": 2222},
|
||||
},
|
||||
"services": {"plex": "nas01"},
|
||||
"stacks": {"plex": "nas01"},
|
||||
}
|
||||
config_file = tmp_path / "sdc.yaml"
|
||||
config_file.write_text(yaml.dump(config_data))
|
||||
@@ -102,7 +102,7 @@ class TestLoadConfig:
|
||||
config_data = {
|
||||
"compose_dir": "/opt/compose",
|
||||
"hosts": {"nas01": "192.168.1.10"},
|
||||
"services": {"plex": "nas01"},
|
||||
"stacks": {"plex": "nas01"},
|
||||
}
|
||||
config_file = tmp_path / "sdc.yaml"
|
||||
config_file.write_text(yaml.dump(config_data))
|
||||
@@ -117,7 +117,7 @@ class TestLoadConfig:
|
||||
"nas01": {"address": "192.168.1.10", "user": "docker"},
|
||||
"nas02": "192.168.1.11",
|
||||
},
|
||||
"services": {"plex": "nas01", "jellyfin": "nas02"},
|
||||
"stacks": {"plex": "nas01", "jellyfin": "nas02"},
|
||||
}
|
||||
config_file = tmp_path / "sdc.yaml"
|
||||
config_file.write_text(yaml.dump(config_data))
|
||||
@@ -137,7 +137,7 @@ class TestLoadConfig:
|
||||
config_data = {
|
||||
"compose_dir": "/opt/compose",
|
||||
"hosts": {"local": "localhost"},
|
||||
"services": {"test": "local"},
|
||||
"stacks": {"test": "local"},
|
||||
}
|
||||
config_file = tmp_path / "sdc.yaml"
|
||||
config_file.write_text(yaml.dump(config_data))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user