mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
41 Commits
docker-use
...
c60b01febe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c60b01febe | ||
|
|
4d65702868 | ||
|
|
596a05e39d | ||
|
|
e1a8ceb9e6 | ||
|
|
ed450c65e5 | ||
|
|
0f84864a06 | ||
|
|
9c72e0937a | ||
|
|
74cc2f3245 | ||
|
|
940bd9585a | ||
|
|
dd60af61a8 | ||
|
|
2f3720949b | ||
|
|
1e3b1d71ed | ||
|
|
c159549a9e | ||
|
|
d65f4cf7f4 | ||
|
|
7ce2067fcb | ||
|
|
f32057aa7b | ||
|
|
c3e3aeb538 | ||
|
|
009f3b1403 | ||
|
|
51f74eab42 | ||
|
|
4acf797128 | ||
|
|
d167da9d63 | ||
|
|
a5eac339db | ||
|
|
9f3813eb72 | ||
|
|
b9ae0ad4d5 | ||
|
|
ca2a4dd6d9 | ||
|
|
fafdce5736 | ||
|
|
6436becff9 | ||
|
|
3460d8a3ea | ||
|
|
8dabc27272 | ||
|
|
5e08f1d712 | ||
|
|
8302f1d97a | ||
|
|
eac9338352 | ||
|
|
667931dc80 | ||
|
|
5890221528 | ||
|
|
c8fc3c2496 | ||
|
|
ffb7a32402 | ||
|
|
beb1630fcf | ||
|
|
2af48b2642 | ||
|
|
f69993eac8 | ||
|
|
9bdcd143cf | ||
|
|
9230e12eb0 |
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
|
||||
21
.github/workflows/docker.yml
vendored
21
.github/workflows/docker.yml
vendored
@@ -68,16 +68,35 @@ jobs:
|
||||
echo "✗ Timeout waiting for PyPI"
|
||||
exit 1
|
||||
|
||||
- name: Check if latest release
|
||||
id: latest
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
# Get latest release tag from GitHub (strip 'v' prefix)
|
||||
LATEST=$(gh release view --json tagName -q '.tagName' | sed 's/^v//')
|
||||
echo "Building version: $VERSION"
|
||||
echo "Latest release: $LATEST"
|
||||
if [ "$VERSION" = "$LATEST" ]; then
|
||||
echo "is_latest=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ This is the latest release, will tag as :latest"
|
||||
else
|
||||
echo "is_latest=false" >> $GITHUB_OUTPUT
|
||||
echo "⚠ This is NOT the latest release, skipping :latest tag"
|
||||
fi
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Only tag as 'latest' if this is the latest release (prevents re-runs of old releases from overwriting)
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}},value=v${{ steps.version.outputs.version }}
|
||||
type=raw,value=latest
|
||||
type=raw,value=latest,enable=${{ steps.latest.outputs.is_latest }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
|
||||
4
.github/workflows/update-readme.yml
vendored
4
.github/workflows/update-readme.yml
vendored
@@ -26,7 +26,9 @@ jobs:
|
||||
env:
|
||||
TERM: dumb
|
||||
NO_COLOR: 1
|
||||
TERMINAL_WIDTH: 90
|
||||
COLUMNS: 90 # POSIX terminal width for Rich
|
||||
TERMINAL_WIDTH: 90 # Typer MAX_WIDTH for help panels
|
||||
_TYPER_FORCE_DISABLE_TERMINAL: 1 # Prevent Typer forcing terminal mode in CI
|
||||
run: |
|
||||
uvx --with . markdown-code-runner README.md
|
||||
sed -i 's/[[:space:]]*$//' README.md
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -37,6 +37,7 @@ ENV/
|
||||
.coverage
|
||||
.pytest_cache/
|
||||
htmlcov/
|
||||
.code/
|
||||
|
||||
# Local config (don't commit real configs)
|
||||
compose-farm.yaml
|
||||
@@ -45,3 +46,4 @@ coverage.xml
|
||||
.env
|
||||
homepage/
|
||||
site/
|
||||
.playwright-mcp/
|
||||
|
||||
@@ -59,18 +59,20 @@ Check:
|
||||
- Config file search order is accurate
|
||||
- Example YAML would actually work
|
||||
|
||||
### 4. Verify docs/architecture.md
|
||||
### 4. Verify docs/architecture.md and CLAUDE.md
|
||||
|
||||
```bash
|
||||
# What source files actually exist?
|
||||
git ls-files "src/**/*.py"
|
||||
```
|
||||
|
||||
Check:
|
||||
Check **both** `docs/architecture.md` and `CLAUDE.md` (Architecture section):
|
||||
- Listed files exist
|
||||
- No files are missing from the list
|
||||
- Descriptions match what the code does
|
||||
|
||||
Both files have architecture listings that can drift independently.
|
||||
|
||||
### 5. Check Examples
|
||||
|
||||
For examples in any doc:
|
||||
|
||||
79
.prompts/duplication-audit.md
Normal file
79
.prompts/duplication-audit.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Duplication audit and generalization prompt
|
||||
|
||||
You are a coding agent working inside a repository. Your job is to find duplicated
|
||||
functionality (not just identical code) and propose a minimal, safe generalization.
|
||||
Keep it simple and avoid adding features.
|
||||
|
||||
## First steps
|
||||
|
||||
- Read project-specific instructions (AGENTS.md, CONTRIBUTING.md, or similar) and follow them.
|
||||
- If instructions mention tooling or style (e.g., preferred search tools), use those.
|
||||
- Ask a brief clarification if the request is ambiguous (for example: report only vs refactor).
|
||||
|
||||
## Objective
|
||||
|
||||
Identify and consolidate duplicated functionality across the codebase. Duplication includes:
|
||||
- Multiple functions that parse or validate the same data in slightly different ways
|
||||
- Repeated file reads or config parsing
|
||||
- Similar command building or subprocess execution paths
|
||||
- Near-identical error handling or logging patterns
|
||||
- Repeated data transforms that can become a shared helper
|
||||
|
||||
The goal is to propose a general, reusable abstraction that reduces duplication while
|
||||
preserving behavior. Keep changes minimal and easy to review.
|
||||
|
||||
## Search strategy
|
||||
|
||||
1) Map the hot paths
|
||||
- Scan entry points (CLI, web handlers, tasks, jobs) to see what they do repeatedly.
|
||||
- Look for cross-module patterns: same steps, different files.
|
||||
|
||||
2) Find duplicate operations
|
||||
- Use fast search tools (prefer `rg`) to find repeated keywords and patterns.
|
||||
- Check for repeated YAML/JSON parsing, env interpolation, file IO, command building,
|
||||
data validation, or response formatting.
|
||||
|
||||
3) Validate duplication is real
|
||||
- Confirm the functional intent matches (not just similar code).
|
||||
- Note any subtle differences that must be preserved.
|
||||
|
||||
4) Propose a minimal generalization
|
||||
- Suggest a shared helper, utility, or wrapper.
|
||||
- Avoid over-engineering. If only two call sites exist, keep the helper small.
|
||||
- Prefer pure functions and centralized IO if that already exists.
|
||||
|
||||
## Deliverables
|
||||
|
||||
Provide a concise report with:
|
||||
|
||||
1) Findings
|
||||
- List duplicated behaviors with file references and a short description of the
|
||||
shared functionality.
|
||||
- Explain why these are functionally the same (or nearly the same).
|
||||
|
||||
2) Proposed generalizations
|
||||
- For each duplication, propose a shared helper and where it should live.
|
||||
- Outline any behavior differences that need to be parameterized.
|
||||
|
||||
3) Impact and risk
|
||||
- Note any behavior risks, test needs, or migration steps.
|
||||
|
||||
If the user asked you to implement changes:
|
||||
- Make only the minimal edits needed to dedupe behavior.
|
||||
- Keep the public API stable unless explicitly requested.
|
||||
- Add small comments only when the logic is non-obvious.
|
||||
- Summarize what changed and why.
|
||||
|
||||
## Output format
|
||||
|
||||
- Start with a short summary of the top 1-3 duplications.
|
||||
- Then provide a list of findings, ordered by impact.
|
||||
- Include a small proposed refactor plan (step-by-step, no more than 5 steps).
|
||||
- End with any questions or assumptions.
|
||||
|
||||
## Guardrails
|
||||
|
||||
- Do not add new features or change behavior beyond deduplication.
|
||||
- Avoid deep refactors without explicit request.
|
||||
- Preserve existing style conventions and import rules.
|
||||
- If a duplication is better left alone (e.g., clarity, single usage), say so.
|
||||
@@ -6,6 +6,7 @@ Review the pull request for:
|
||||
- **Organization**: Is everything in the right place?
|
||||
- **Consistency**: Is it in the same style as other parts of the codebase?
|
||||
- **Simplicity**: Is it not over-engineered? Remember KISS and YAGNI. No dead code paths and NO defensive programming.
|
||||
- **No pointless wrappers**: Identify functions/methods that just call another function and return its result. Callers should call the underlying function directly instead of going through unnecessary indirection.
|
||||
- **User experience**: Does it provide a good user experience?
|
||||
- **PR**: Is the PR description and title clear and informative?
|
||||
- **Tests**: Are there tests, and do they cover the changes adequately? Are they testing something meaningful or are they just trivial?
|
||||
|
||||
32
CLAUDE.md
32
CLAUDE.md
@@ -17,18 +17,20 @@ src/compose_farm/
|
||||
│ ├── config.py # Config subcommand (init, show, path, validate, edit, symlink)
|
||||
│ ├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose commands
|
||||
│ ├── management.py # refresh, check, init-network, traefik-file commands
|
||||
│ ├── monitoring.py # logs, ps, stats commands
|
||||
│ ├── monitoring.py # logs, ps, stats, list commands
|
||||
│ ├── ssh.py # SSH key management (setup, status, keygen)
|
||||
│ └── web.py # Web UI server command
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── console.py # Shared Rich console instances
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which stack on which host)
|
||||
├── glances.py # Glances API integration for host resource stats
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── paths.py # Path utilities, config file discovery
|
||||
├── registry.py # Container registry client for update checking
|
||||
├── ssh_keys.py # SSH key path constants and utilities
|
||||
├── state.py # Deployment state tracking (which stack on which host)
|
||||
├── traefik.py # Traefik file-provider config generation from labels
|
||||
└── web/ # Web UI (FastAPI + HTMX)
|
||||
```
|
||||
@@ -100,6 +102,17 @@ Browser tests are marked with `@pytest.mark.browser`. They use Playwright to tes
|
||||
- **NEVER merge anything into main.** Always commit directly or use fast-forward/rebase.
|
||||
- Never force push.
|
||||
|
||||
## SSH Agent in Remote Sessions
|
||||
|
||||
When pushing to GitHub via SSH fails with "Permission denied (publickey)", fix the SSH agent socket:
|
||||
|
||||
```bash
|
||||
# Find and set the correct SSH agent socket
|
||||
SSH_AUTH_SOCK=$(ls -t ~/.ssh/agent/s.*.sshd.* 2>/dev/null | head -1) git push origin branch-name
|
||||
```
|
||||
|
||||
This is needed because the SSH agent socket path changes between sessions.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- Never include unchecked checklists (e.g., `- [ ] ...`) in PR descriptions. Either omit the checklist or use checked items.
|
||||
@@ -110,6 +123,10 @@ Browser tests are marked with `@pytest.mark.browser`. They use Playwright to tes
|
||||
Use `gh release create` to create releases. The tag is created automatically.
|
||||
|
||||
```bash
|
||||
# IMPORTANT: Ensure you're on latest origin/main before releasing!
|
||||
git fetch origin
|
||||
git checkout origin/main
|
||||
|
||||
# Check current version
|
||||
git tag --sort=-v:refname | head -1
|
||||
|
||||
@@ -133,17 +150,18 @@ CLI available as `cf` or `compose-farm`.
|
||||
| `down` | Stop stacks (`docker compose down`). Use `--orphaned` to stop stacks removed from config |
|
||||
| `stop` | Stop services without removing containers (`docker compose stop`) |
|
||||
| `pull` | Pull latest images |
|
||||
| `restart` | `down` + `up -d` |
|
||||
| `update` | `pull` + `build` + `down` + `up -d` |
|
||||
| `restart` | Restart running containers (`docker compose restart`) |
|
||||
| `update` | Pull, build, recreate only if changed (`up -d --pull always --build`) |
|
||||
| `apply` | Make reality match config: migrate stacks + stop orphans. Use `--dry-run` to preview |
|
||||
| `compose` | Run any docker compose command on a stack (passthrough) |
|
||||
| `logs` | Show stack logs |
|
||||
| `ps` | Show status of all stacks |
|
||||
| `stats` | Show overview (hosts, stacks, pending migrations; `--live` for container counts) |
|
||||
| `list` | List stacks and hosts (`--simple` for scripting, `--host` to filter) |
|
||||
| `refresh` | Update state from reality: discover running stacks, capture image digests |
|
||||
| `check` | Validate config, traefik labels, mounts, networks; show host compatibility |
|
||||
| `init-network` | Create Docker network on hosts with consistent subnet/gateway |
|
||||
| `traefik-file` | Generate Traefik file-provider config from compose labels |
|
||||
| `config` | Manage config files (init, show, path, validate, edit, symlink) |
|
||||
| `config` | Manage config files (init, init-env, show, path, validate, edit, symlink) |
|
||||
| `ssh` | Manage SSH keys (setup, status, keygen) |
|
||||
| `web` | Start web UI server |
|
||||
|
||||
583
README.md
583
README.md
@@ -43,17 +43,21 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [What Compose Farm doesn't do](#what-compose-farm-doesnt-do)
|
||||
- [Installation](#installation)
|
||||
- [SSH Authentication](#ssh-authentication)
|
||||
- [SSH Agent (default)](#ssh-agent-default)
|
||||
- [Dedicated SSH Key (recommended for Docker/Web UI)](#dedicated-ssh-key-recommended-for-dockerweb-ui)
|
||||
- [SSH Agent](#ssh-agent)
|
||||
- [Dedicated SSH Key (default for Docker)](#dedicated-ssh-key-default-for-docker)
|
||||
- [Configuration](#configuration)
|
||||
- [Single-host example](#single-host-example)
|
||||
- [Multi-host example](#multi-host-example)
|
||||
- [Multi-Host Stacks](#multi-host-stacks)
|
||||
- [Config Command](#config-command)
|
||||
- [Usage](#usage)
|
||||
- [Docker Compose Commands](#docker-compose-commands)
|
||||
- [Compose Farm Commands](#compose-farm-commands)
|
||||
- [Aliases](#aliases)
|
||||
- [CLI `--help` Output](#cli---help-output)
|
||||
- [Auto-Migration](#auto-migration)
|
||||
- [Traefik Multihost Ingress (File Provider)](#traefik-multihost-ingress-file-provider)
|
||||
- [Host Resource Monitoring (Glances)](#host-resource-monitoring-glances)
|
||||
- [Comparison with Alternatives](#comparison-with-alternatives)
|
||||
- [License](#license)
|
||||
|
||||
@@ -208,9 +212,9 @@ cp .envrc.example .envrc && direnv allow
|
||||
|
||||
Compose Farm uses SSH to run commands on remote hosts. There are two authentication methods:
|
||||
|
||||
### SSH Agent (default)
|
||||
### SSH Agent
|
||||
|
||||
Works out of the box if you have an SSH agent running with your keys loaded:
|
||||
Works out of the box when running locally if you have an SSH agent running with your keys loaded:
|
||||
|
||||
```bash
|
||||
# Verify your agent has keys
|
||||
@@ -220,9 +224,9 @@ ssh-add -l
|
||||
cf up --all
|
||||
```
|
||||
|
||||
### Dedicated SSH Key (recommended for Docker/Web UI)
|
||||
### Dedicated SSH Key (default for Docker)
|
||||
|
||||
When running compose-farm in Docker, the SSH agent connection can be lost (e.g., after container restart). The `cf ssh` command sets up a dedicated key that persists:
|
||||
When running in Docker, SSH agent sockets are ephemeral and can be lost after container restarts. The `cf ssh` command sets up a dedicated key that persists:
|
||||
|
||||
```bash
|
||||
# Generate key and copy to all configured hosts
|
||||
@@ -250,6 +254,13 @@ volumes:
|
||||
- cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
```
|
||||
|
||||
**Option 3: SSH agent forwarding** - if you prefer using your host's ssh-agent
|
||||
```yaml
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
```
|
||||
Note: Requires `SSH_AUTH_SOCK` environment variable to be set. The socket path is ephemeral and changes across sessions.
|
||||
|
||||
Run setup once after starting the container (while the SSH agent still works):
|
||||
|
||||
```bash
|
||||
@@ -355,24 +366,49 @@ Use `cf config init` to get started with a fully documented template.
|
||||
|
||||
The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
These wrap `docker compose` with multi-host superpowers:
|
||||
|
||||
| Command | Wraps | Compose Farm Additions |
|
||||
|---------|-------|------------------------|
|
||||
| `cf up` | `up -d` | `--all`, `--host`, parallel execution, auto-migration |
|
||||
| `cf down` | `down` | `--all`, `--host`, `--orphaned`, state tracking |
|
||||
| `cf stop` | `stop` | `--all`, `--service` |
|
||||
| `cf restart` | `restart` | `--all`, `--service` |
|
||||
| `cf pull` | `pull` | `--all`, `--service`, parallel execution |
|
||||
| `cf logs` | `logs` | `--all`, `--host`, multi-stack output |
|
||||
| `cf ps` | `ps` | `--all`, `--host`, unified cross-host view |
|
||||
| `cf compose` | any | passthrough for commands not listed above |
|
||||
|
||||
### Compose Farm Commands
|
||||
|
||||
Multi-host orchestration that Docker Compose can't do:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| **`cf apply`** | **Make reality match config (start + migrate + stop orphans)** |
|
||||
| `cf up <stack>` | Start stack (auto-migrates if host changed) |
|
||||
| `cf down <stack>` | Stop and remove stack containers |
|
||||
| `cf stop <stack>` | Stop stack without removing containers |
|
||||
| `cf restart <stack>` | down + up |
|
||||
| `cf update <stack>` | pull + build + down + up |
|
||||
| `cf pull <stack>` | Pull latest images |
|
||||
| `cf logs -f <stack>` | Follow logs |
|
||||
| `cf ps` | Show status of all stacks |
|
||||
| `cf refresh` | Update state from running stacks |
|
||||
| **`cf apply`** | **Reconcile: start missing, migrate moved, stop orphans** |
|
||||
| `cf update` | Shorthand for `up --pull --build` |
|
||||
| `cf refresh` | Sync state from what's actually running |
|
||||
| `cf check` | Validate config, mounts, networks |
|
||||
| `cf init-network` | Create Docker network on hosts |
|
||||
| `cf init-network` | Create Docker network on all hosts |
|
||||
| `cf traefik-file` | Generate Traefik file-provider config |
|
||||
| `cf config <cmd>` | Manage config files (init, show, path, validate, edit, symlink) |
|
||||
| `cf config` | Manage config files (init, show, validate, edit, symlink) |
|
||||
| `cf ssh` | Manage SSH keys (setup, status, keygen) |
|
||||
| `cf list` | List all stacks and their assigned hosts |
|
||||
|
||||
All commands support `--all` to operate on all stacks.
|
||||
### Aliases
|
||||
|
||||
Short aliases for frequently used commands:
|
||||
|
||||
| Alias | Command | Alias | Command |
|
||||
|-------|---------|-------|---------|
|
||||
| `cf a` | `apply` | `cf s` | `stats` |
|
||||
| `cf l` | `logs` | `cf ls` | `list` |
|
||||
| `cf r` | `restart` | `cf rf` | `refresh` |
|
||||
| `cf u` | `update` | `cf ck` | `check` |
|
||||
| `cf p` | `pull` | `cf tf` | `traefik-file` |
|
||||
| `cf c` | `compose` | | |
|
||||
|
||||
Each command replaces: look up host → SSH → find compose file → run `ssh host "cd /opt/compose/plex && docker compose up -d"`.
|
||||
|
||||
@@ -392,10 +428,10 @@ cf down --orphaned # stop stacks removed from config
|
||||
# Pull latest images
|
||||
cf pull --all
|
||||
|
||||
# Restart (down + up)
|
||||
# Restart running containers
|
||||
cf restart plex
|
||||
|
||||
# Update (pull + build + down + up) - the end-to-end update command
|
||||
# Update (pull + build, only recreates containers if images changed)
|
||||
cf update --all
|
||||
|
||||
# Update state from reality (discovers running stacks + captures digests)
|
||||
@@ -442,46 +478,41 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Compose Farm - run docker compose commands across multiple hosts
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to │
|
||||
│ copy it or customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ──────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose │
|
||||
│ Traefik labels. │
|
||||
│ refresh Update local state from running stacks. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
│ ssh Manage SSH keys for passwordless authentication. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ──────────────────────────────────────────────────────────────────╮
|
||||
│ up Start stacks (docker compose up -d). Auto-migrates if host │
|
||||
│ changed. │
|
||||
│ down Stop stacks (docker compose down). │
|
||||
│ stop Stop services without removing containers (docker compose │
|
||||
│ stop). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart stacks (down + up). With --service, restarts just │
|
||||
│ that service. │
|
||||
│ update Update stacks (pull + build + down + up). With --service, │
|
||||
│ updates just that service. │
|
||||
│ apply Make reality match config (start, migrate, stop │
|
||||
│ strays/orphans as needed). │
|
||||
│ compose Run any docker compose command on a stack. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ─────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show stack logs. With --service, shows logs for just that │
|
||||
│ service. │
|
||||
│ ps Show status of stacks. │
|
||||
│ stats Show overview statistics for hosts and stacks. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ─────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to copy it or │
|
||||
│ customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ────────────────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose Traefik labels. │
|
||||
│ refresh Update local state from running stacks. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
│ ssh Manage SSH keys for passwordless authentication. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ up Start stacks (docker compose up -d). Auto-migrates if host changed. │
|
||||
│ down Stop stacks (docker compose down). │
|
||||
│ stop Stop services without removing containers (docker compose stop). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart running containers (docker compose restart). │
|
||||
│ update Update stacks (pull + build + up). Shorthand for 'up --pull --build'. │
|
||||
│ apply Make reality match config (start, migrate, stop strays/orphans as │
|
||||
│ needed). │
|
||||
│ compose Run any docker compose command on a stack. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ───────────────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show stack logs. With --service, shows logs for just that service. │
|
||||
│ ps Show status of stacks. │
|
||||
│ stats Show overview statistics for hosts and stacks. │
|
||||
│ list List all stacks and their assigned hosts. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ───────────────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -510,16 +541,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Start stacks (docker compose up -d). Auto-migrates if host changed.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --pull Pull images before starting (--pull always) │
|
||||
│ --build Build images before starting │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -546,17 +579,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Stop stacks (docker compose down).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --orphaned Stop orphaned stacks (in state but removed from │
|
||||
│ config) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --orphaned Stop orphaned stacks (in state but removed from config) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -583,15 +615,15 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Stop services without removing containers (docker compose stop).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -618,15 +650,15 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Pull latest images (docker compose pull).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -651,17 +683,17 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf restart [OPTIONS] [STACKS]...
|
||||
|
||||
Restart stacks (down + up). With --service, restarts just that service.
|
||||
Restart running containers (docker compose restart).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -686,18 +718,17 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf update [OPTIONS] [STACKS]...
|
||||
|
||||
Update stacks (pull + build + down + up). With --service, updates just that
|
||||
service.
|
||||
Update stacks (pull + build + up). Shorthand for 'up --pull --build'.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -737,15 +768,14 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
Use --no-strays to skip stopping stray stacks.
|
||||
Use --full to also run 'up' on all stacks (picks up compose/env changes).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned stacks │
|
||||
│ --no-strays Don't stop stray stacks (running on wrong host) │
|
||||
│ --full -f Also run up on all stacks to apply config │
|
||||
│ changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned stacks │
|
||||
│ --no-strays Don't stop stray stacks (running on wrong host) │
|
||||
│ --full -f Also run up on all stacks to apply config changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -782,17 +812,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
cf compose mystack exec web bash - interactive shell
|
||||
cf compose mystack config - view parsed config
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ * stack TEXT Stack to operate on (use '.' for current dir) │
|
||||
│ [required] │
|
||||
│ * command TEXT Docker compose command [required] │
|
||||
│ args [ARGS]... Additional arguments │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ * stack TEXT Stack to operate on (use '.' for current dir) [required] │
|
||||
│ * command TEXT Docker compose command [required] │
|
||||
│ args [ARGS]... Additional arguments │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -821,16 +850,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Generate a Traefik file-provider fragment from compose Traefik labels.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path │
|
||||
│ (stdout if omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path (stdout if │
|
||||
│ omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -866,16 +895,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -908,14 +937,14 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -947,16 +976,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ hosts [HOSTS]... Hosts to create network on (default: all) │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --network -n TEXT Network name [default: mynetwork] │
|
||||
│ --subnet -s TEXT Network subnet [default: 172.20.0.0/16] │
|
||||
│ --gateway -g TEXT Network gateway [default: 172.20.0.1] │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ hosts [HOSTS]... Hosts to create network on (default: all) │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --network -n TEXT Network name [default: mynetwork] │
|
||||
│ --subnet -s TEXT Network subnet [default: 172.20.0.0/16] │
|
||||
│ --gateway -g TEXT Network gateway [default: 172.20.0.1] │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -984,18 +1013,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Manage compose-farm configuration files.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ───────────────────────────────────────────────────────────────────╮
|
||||
│ init Create a new config file with documented example. │
|
||||
│ edit Open the config file in your default editor. │
|
||||
│ show Display the config file location and contents. │
|
||||
│ path Print the config file path (useful for scripting). │
|
||||
│ validate Validate the config file syntax and schema. │
|
||||
│ symlink Create a symlink from the default config location to a config │
|
||||
│ file. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ─────────────────────────────────────────────────────────────────────────────╮
|
||||
│ init Create a new config file with documented example. │
|
||||
│ edit Open the config file in your default editor. │
|
||||
│ show Display the config file location and contents. │
|
||||
│ path Print the config file path (useful for scripting). │
|
||||
│ validate Validate the config file syntax and schema. │
|
||||
│ symlink Create a symlink from the default config location to a config file. │
|
||||
│ init-env Generate a .env file for Docker deployment. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1023,14 +1052,14 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Manage SSH keys for passwordless authentication.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ───────────────────────────────────────────────────────────────────╮
|
||||
│ keygen Generate SSH key (does not distribute to hosts). │
|
||||
│ setup Generate SSH key and distribute to all configured hosts. │
|
||||
│ status Show SSH key status and host connectivity. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ─────────────────────────────────────────────────────────────────────────────╮
|
||||
│ keygen Generate SSH key (does not distribute to hosts). │
|
||||
│ setup Generate SSH key and distribute to all configured hosts. │
|
||||
│ status Show SSH key status and host connectivity. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1059,19 +1088,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Show stack logs. With --service, shows logs for just that service.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 │
|
||||
│ otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1104,16 +1132,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
With --host: shows stacks on that host.
|
||||
With --service: filters to a specific service within the stack.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1141,14 +1169,49 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
Without flags: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
With --containers: Shows per-container resource stats (requires Glances).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --live -l Query Docker for live container stats │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --live -l Query Docker for live container stats │
|
||||
│ --containers -C Show per-container resource stats (requires Glances) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf list --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf list --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf list [OPTIONS]
|
||||
|
||||
List all stacks and their assigned hosts.
|
||||
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --simple -s Plain output (one stack per line, for scripting) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1177,12 +1240,12 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Start the web UI server.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Host to bind to [default: 0.0.0.0] │
|
||||
│ --port -p INTEGER Port to listen on [default: 8000] │
|
||||
│ --reload -r Enable auto-reload for development │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Host to bind to [default: 0.0.0.0] │
|
||||
│ --port -p INTEGER Port to listen on [default: 8000] │
|
||||
│ --reload -r Enable auto-reload for development │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1274,12 +1337,12 @@ published ports.
|
||||
|
||||
**Auto-regeneration**
|
||||
|
||||
To automatically regenerate the Traefik config after `up`, `down`, `restart`, or `update`,
|
||||
To automatically regenerate the Traefik config after `up`, `down`, or `update`,
|
||||
add `traefik_file` to your config:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml # auto-regenerate on up/down/restart/update
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml # auto-regenerate on up/down/update
|
||||
traefik_stack: traefik # skip stacks on same host (docker provider handles them)
|
||||
|
||||
hosts:
|
||||
@@ -1318,6 +1381,54 @@ Update your Traefik config to use directory watching instead of a single file:
|
||||
- --providers.file.watch=true
|
||||
```
|
||||
|
||||
## Host Resource Monitoring (Glances)
|
||||
|
||||
The web UI can display real-time CPU, memory, and load stats for all configured hosts. This uses [Glances](https://nicolargo.github.io/glances/), a cross-platform system monitoring tool with a REST API.
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Deploy a Glances stack that runs on all hosts:
|
||||
|
||||
```yaml
|
||||
# glances/compose.yaml
|
||||
name: glances
|
||||
services:
|
||||
glances:
|
||||
image: nicolargo/glances:latest
|
||||
container_name: glances
|
||||
restart: unless-stopped
|
||||
pid: host
|
||||
ports:
|
||||
- "61208:61208"
|
||||
environment:
|
||||
- GLANCES_OPT=-w # Enable web server mode
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
```
|
||||
|
||||
2. Add it to your config as a multi-host stack:
|
||||
|
||||
```yaml
|
||||
# compose-farm.yaml
|
||||
stacks:
|
||||
glances: all # Runs on every host
|
||||
|
||||
glances_stack: glances # Enables resource stats in web UI
|
||||
```
|
||||
|
||||
3. Deploy: `cf up glances`
|
||||
|
||||
4. **(Docker web UI only)** The web UI container infers the local host from `CF_WEB_STACK` and reaches Glances via the container name to avoid Docker network isolation issues.
|
||||
|
||||
The web UI dashboard will now show a "Host Resources" section with live stats from all hosts. Hosts where Glances is unreachable show an error indicator.
|
||||
|
||||
**Live Stats Page**
|
||||
|
||||
With Glances configured, a Live Stats page (`/live-stats`) shows all running containers across all hosts:
|
||||
|
||||
- **Columns**: Stack, Service, Host, Image, Status, Uptime, CPU, Memory, Net I/O
|
||||
- **Features**: Sorting, filtering, live updates (no SSH required—uses Glances REST API)
|
||||
|
||||
## Comparison with Alternatives
|
||||
|
||||
There are many ways to run containers on multiple hosts. Here is where Compose Farm sits:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
compose_dir: /opt/compose
|
||||
|
||||
# Optional: Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
# Optional: Auto-regenerate Traefik file-provider config after up/down/update
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik # Skip stacks on same host (docker provider handles them)
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ services:
|
||||
# Defaults to root (0:0) for backwards compatibility
|
||||
user: "${CF_UID:-0}:${CF_GID:-0}"
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# Compose directory (contains compose files AND compose-farm.yaml config)
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
# SSH keys for passwordless auth (generated by `cf ssh setup`)
|
||||
@@ -15,6 +14,8 @@ services:
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:${CF_HOME:-/root}/.ssh/compose-farm
|
||||
# Option 2: Named volume - managed by Docker, shared between services
|
||||
# - cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
# Option 3: SSH agent forwarding (uncomment if using ssh-agent)
|
||||
# - ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
# Config file path (state stored alongside it)
|
||||
@@ -31,13 +32,14 @@ services:
|
||||
# Run as current user to preserve file ownership on mounted volumes
|
||||
user: "${CF_UID:-0}:${CF_GID:-0}"
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
# SSH keys - use the SAME option as cf service above
|
||||
# Option 1: Host path (default)
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:${CF_HOME:-/root}/.ssh/compose-farm
|
||||
# Option 2: Named volume
|
||||
# - cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
# Option 3: SSH agent forwarding (uncomment if using ssh-agent)
|
||||
# - ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# XDG config dir for backups and image digest logs (persists across restarts)
|
||||
- ${CF_XDG_CONFIG:-~/.config/compose-farm}:${CF_HOME:-/root}/.config/compose-farm
|
||||
environment:
|
||||
|
||||
@@ -96,7 +96,7 @@ Typer-based CLI with subcommand modules:
|
||||
cli/
|
||||
├── app.py # Shared Typer app, version callback
|
||||
├── common.py # Shared helpers, options, progress utilities
|
||||
├── config.py # config subcommand (init, show, path, validate, edit, symlink)
|
||||
├── config.py # config subcommand (init, init-env, show, path, validate, edit, symlink)
|
||||
├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose
|
||||
├── management.py # refresh, check, init-network, traefik-file
|
||||
├── monitoring.py # logs, ps, stats
|
||||
@@ -343,3 +343,19 @@ For repeated connections to the same host, SSH reuses connections.
|
||||
```
|
||||
|
||||
Icons use [Lucide](https://lucide.dev/). Add new icons as macros in `web/templates/partials/icons.html`.
|
||||
|
||||
### Host Resource Monitoring (`src/compose_farm/glances.py`)
|
||||
|
||||
Integration with [Glances](https://nicolargo.github.io/glances/) for real-time host stats:
|
||||
|
||||
- Fetches CPU, memory, and load from Glances REST API on each host
|
||||
- Used by web UI dashboard to display host resource usage
|
||||
- Requires `glances_stack` config option pointing to a Glances stack running on all hosts
|
||||
|
||||
### Container Registry Client (`src/compose_farm/registry.py`)
|
||||
|
||||
OCI Distribution API client for checking image updates:
|
||||
|
||||
- Parses image references (registry, namespace, name, tag, digest)
|
||||
- Fetches available tags from Docker Hub, GHCR, and other registries
|
||||
- Compares semantic versions to find newer releases
|
||||
|
||||
3
docs/assets/web-live_stats.gif
Normal file
3
docs/assets/web-live_stats.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4135888689a10c5ae2904825d98f2a6d215c174a4bd823e25761f619590f04ff
|
||||
size 3990104
|
||||
3
docs/assets/web-live_stats.webm
Normal file
3
docs/assets/web-live_stats.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:87739cd6f6576a81100392d8d1e59d3e776fecc8f0721a31332df89e7fc8593d
|
||||
size 5814274
|
||||
@@ -8,19 +8,22 @@ The Compose Farm CLI is available as both `compose-farm` and the shorter alias `
|
||||
|
||||
## Command Overview
|
||||
|
||||
Commands are either **Docker Compose wrappers** (`up`, `down`, `stop`, `restart`, `pull`, `logs`, `ps`, `compose`) with multi-host superpowers, or **Compose Farm originals** (`apply`, `update`, `refresh`, `check`) for orchestration Docker Compose can't do.
|
||||
|
||||
| Category | Command | Description |
|
||||
|----------|---------|-------------|
|
||||
| **Lifecycle** | `apply` | Make reality match config |
|
||||
| | `up` | Start stacks |
|
||||
| | `down` | Stop stacks |
|
||||
| | `stop` | Stop services without removing containers |
|
||||
| | `restart` | Restart stacks (down + up) |
|
||||
| | `update` | Update stacks (pull + build + down + up) |
|
||||
| | `restart` | Restart running containers |
|
||||
| | `update` | Shorthand for `up --pull --build` |
|
||||
| | `pull` | Pull latest images |
|
||||
| | `compose` | Run any docker compose command |
|
||||
| **Monitoring** | `ps` | Show stack status |
|
||||
| | `logs` | Show stack logs |
|
||||
| | `stats` | Show overview statistics |
|
||||
| | `list` | List stacks and hosts |
|
||||
| **Configuration** | `check` | Validate config and mounts |
|
||||
| | `refresh` | Sync state from reality |
|
||||
| | `init-network` | Create Docker network |
|
||||
@@ -36,6 +39,19 @@ cf --version, -v # Show version
|
||||
cf --help, -h # Show help
|
||||
```
|
||||
|
||||
## Command Aliases
|
||||
|
||||
Short aliases for frequently used commands:
|
||||
|
||||
| Alias | Command | Alias | Command |
|
||||
|-------|---------|-------|---------|
|
||||
| `cf a` | `apply` | `cf s` | `stats` |
|
||||
| `cf l` | `logs` | `cf ls` | `list` |
|
||||
| `cf r` | `restart` | `cf rf` | `refresh` |
|
||||
| `cf u` | `update` | `cf ck` | `check` |
|
||||
| `cf p` | `pull` | `cf tf` | `traefik-file` |
|
||||
| `cf c` | `compose` | | |
|
||||
|
||||
---
|
||||
|
||||
## Lifecycle Commands
|
||||
@@ -58,14 +74,16 @@ cf apply [OPTIONS]
|
||||
|--------|-------------|
|
||||
| `--dry-run, -n` | Preview changes without executing |
|
||||
| `--no-orphans` | Skip stopping orphaned stacks |
|
||||
| `--full, -f` | Also refresh running stacks |
|
||||
| `--no-strays` | Skip stopping stray stacks (running on wrong host) |
|
||||
| `--full, -f` | Also run up on all stacks (applies compose/env changes, triggers migrations) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**What it does:**
|
||||
|
||||
1. Stops orphaned stacks (in state but removed from config)
|
||||
2. Migrates stacks on wrong host
|
||||
3. Starts missing stacks (in config but not running)
|
||||
2. Stops stray stacks (running on unauthorized hosts)
|
||||
3. Migrates stacks on wrong host
|
||||
4. Starts missing stacks (in config but not running)
|
||||
|
||||
**Examples:**
|
||||
|
||||
@@ -79,7 +97,10 @@ cf apply
|
||||
# Only start/migrate, don't stop orphans
|
||||
cf apply --no-orphans
|
||||
|
||||
# Also refresh all running stacks
|
||||
# Don't stop stray stacks
|
||||
cf apply --no-strays
|
||||
|
||||
# Also run up on all stacks (applies compose/env changes, triggers migrations)
|
||||
cf apply --full
|
||||
```
|
||||
|
||||
@@ -100,6 +121,8 @@ cf up [OPTIONS] [STACKS]...
|
||||
| `--all, -a` | Start all stacks |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--pull` | Pull images before starting (`--pull always`) |
|
||||
| `--build` | Build images before starting |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -197,7 +220,7 @@ cf stop immich --service database
|
||||
|
||||
### cf restart
|
||||
|
||||
Restart stacks (down + up). With `--service`, restarts just that service.
|
||||
Restart running containers (`docker compose restart`). With `--service`, restarts just that service.
|
||||
|
||||
```bash
|
||||
cf restart [OPTIONS] [STACKS]...
|
||||
@@ -225,7 +248,7 @@ cf restart immich --service database
|
||||
|
||||
### cf update
|
||||
|
||||
Update stacks (pull + build + down + up). With `--service`, updates just that service.
|
||||
Update stacks (pull + build + up). Shorthand for `up --pull --build`. With `--service`, updates just that service.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/update.webm" type="video/webm">
|
||||
@@ -445,6 +468,40 @@ cf stats --live
|
||||
|
||||
---
|
||||
|
||||
### cf list
|
||||
|
||||
List all stacks and their assigned hosts.
|
||||
|
||||
```bash
|
||||
cf list [OPTIONS]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--simple, -s` | Plain output for scripting (one stack per line) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# List all stacks
|
||||
cf list
|
||||
|
||||
# Filter by host
|
||||
cf list --host nas
|
||||
|
||||
# Plain output for scripting
|
||||
cf list --simple
|
||||
|
||||
# Combine: list stack names on a specific host
|
||||
cf list --host nuc --simple
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Commands
|
||||
|
||||
### cf check
|
||||
@@ -587,6 +644,7 @@ cf config COMMAND
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `init` | Create new config with examples |
|
||||
| `init-env` | Generate .env file for Docker deployment |
|
||||
| `show` | Display config with highlighting |
|
||||
| `path` | Print config file path |
|
||||
| `validate` | Validate syntax and schema |
|
||||
@@ -598,6 +656,7 @@ cf config COMMAND
|
||||
| Subcommand | Options |
|
||||
|------------|---------|
|
||||
| `init` | `--path/-p PATH`, `--force/-f` |
|
||||
| `init-env` | `--path/-p PATH`, `--output/-o PATH`, `--force/-f` |
|
||||
| `show` | `--path/-p PATH`, `--raw/-r` |
|
||||
| `edit` | `--path/-p PATH` |
|
||||
| `path` | `--path/-p PATH` |
|
||||
@@ -633,6 +692,12 @@ cf config symlink
|
||||
|
||||
# Create symlink to specific file
|
||||
cf config symlink /opt/compose-farm/config.yaml
|
||||
|
||||
# Generate .env file in current directory
|
||||
cf config init-env
|
||||
|
||||
# Generate .env at specific path
|
||||
cf config init-env -o /opt/stacks/.env
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -107,7 +107,7 @@ Supported compose file names (checked in order):
|
||||
|
||||
### traefik_file
|
||||
|
||||
Path to auto-generated Traefik file-provider config. When set, Compose Farm regenerates this file after `up`, `down`, `restart`, and `update` commands.
|
||||
Path to auto-generated Traefik file-provider config. When set, Compose Farm regenerates this file after `up`, `down`, and `update` commands.
|
||||
|
||||
```yaml
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
@@ -121,6 +121,16 @@ Stack name running Traefik. Stacks on the same host are skipped in file-provider
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
### glances_stack
|
||||
|
||||
Stack name running [Glances](https://nicolargo.github.io/glances/) for host resource monitoring. When set, the CLI (`cf stats --containers`) and web UI display CPU, memory, and container stats for all hosts.
|
||||
|
||||
```yaml
|
||||
glances_stack: glances
|
||||
```
|
||||
|
||||
The Glances stack should run on all hosts and expose port 61208. See the README for full setup instructions.
|
||||
|
||||
## Hosts Configuration
|
||||
|
||||
### Basic Host
|
||||
@@ -257,6 +267,25 @@ When generating Traefik config, Compose Farm resolves `${VAR}` and `${VAR:-defau
|
||||
1. The stack's `.env` file
|
||||
2. Current environment
|
||||
|
||||
### Compose Farm Environment Variables
|
||||
|
||||
These environment variables configure Compose Farm itself:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `CF_CONFIG` | Path to config file |
|
||||
| `CF_WEB_STACK` | Web UI stack name (Docker only, enables self-update detection and local host inference) |
|
||||
|
||||
**Docker deployment variables** (used in docker-compose.yml):
|
||||
|
||||
| Variable | Description | Generated by |
|
||||
|----------|-------------|--------------|
|
||||
| `CF_COMPOSE_DIR` | Compose files directory | `cf config init-env` |
|
||||
| `CF_UID` / `CF_GID` | User/group ID for containers | `cf config init-env` |
|
||||
| `CF_HOME` / `CF_USER` | Home directory and username | `cf config init-env` |
|
||||
| `CF_SSH_DIR` | SSH keys volume mount | Manual |
|
||||
| `CF_XDG_CONFIG` | Config backup volume mount | Manual |
|
||||
|
||||
## Config Commands
|
||||
|
||||
### Initialize Config
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Update Demo
|
||||
# Shows updating stacks (pull + build + down + up)
|
||||
# Shows updating stacks (only recreates containers if images changed)
|
||||
|
||||
Output docs/assets/update.gif
|
||||
Output docs/assets/update.webm
|
||||
|
||||
@@ -21,24 +21,37 @@ import uvicorn
|
||||
|
||||
from compose_farm.config import Config as CFConfig
|
||||
from compose_farm.config import load_config
|
||||
from compose_farm.executor import (
|
||||
get_container_compose_labels as _original_get_compose_labels,
|
||||
)
|
||||
from compose_farm.glances import ContainerStats
|
||||
from compose_farm.glances import fetch_container_stats as _original_fetch_container_stats
|
||||
from compose_farm.state import load_state as _original_load_state
|
||||
from compose_farm.web.app import create_app
|
||||
from compose_farm.web.cdn import CDN_ASSETS, ensure_vendor_cache
|
||||
|
||||
# NOTE: Do NOT import create_app here - it must be imported AFTER patches are applied
|
||||
# to ensure the patched get_config is used by all route modules
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
|
||||
from playwright.sync_api import BrowserContext, Page, Route
|
||||
|
||||
# Stacks to exclude from demo recordings (exact match)
|
||||
DEMO_EXCLUDE_STACKS = {"arr"}
|
||||
# Substrings to exclude from demo recordings (case-insensitive)
|
||||
DEMO_EXCLUDE_PATTERNS = {"arr", "vpn", "tash"}
|
||||
|
||||
|
||||
def _should_exclude(name: str) -> bool:
|
||||
"""Check if a stack/container name should be excluded from demo."""
|
||||
name_lower = name.lower()
|
||||
return any(pattern in name_lower for pattern in DEMO_EXCLUDE_PATTERNS)
|
||||
|
||||
|
||||
def _get_filtered_config() -> CFConfig:
|
||||
"""Load config but filter out excluded stacks."""
|
||||
config = load_config()
|
||||
filtered_stacks = {
|
||||
name: host for name, host in config.stacks.items() if name not in DEMO_EXCLUDE_STACKS
|
||||
name: host for name, host in config.stacks.items() if not _should_exclude(name)
|
||||
}
|
||||
return CFConfig(
|
||||
compose_dir=config.compose_dir,
|
||||
@@ -46,6 +59,7 @@ def _get_filtered_config() -> CFConfig:
|
||||
stacks=filtered_stacks,
|
||||
traefik_file=config.traefik_file,
|
||||
traefik_stack=config.traefik_stack,
|
||||
glances_stack=config.glances_stack,
|
||||
config_path=config.config_path,
|
||||
)
|
||||
|
||||
@@ -53,7 +67,37 @@ def _get_filtered_config() -> CFConfig:
|
||||
def _get_filtered_state(config: CFConfig) -> dict[str, str | list[str]]:
|
||||
"""Load state but filter out excluded stacks."""
|
||||
state = _original_load_state(config)
|
||||
return {name: host for name, host in state.items() if name not in DEMO_EXCLUDE_STACKS}
|
||||
return {name: host for name, host in state.items() if not _should_exclude(name)}
|
||||
|
||||
|
||||
async def _filtered_fetch_container_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = 61208,
|
||||
request_timeout: float = 10.0,
|
||||
) -> tuple[list[ContainerStats] | None, str | None]:
|
||||
"""Fetch container stats but filter out excluded containers."""
|
||||
containers, error = await _original_fetch_container_stats(
|
||||
host_name, host_address, port, request_timeout
|
||||
)
|
||||
if containers:
|
||||
# Filter by container name (stack is empty at this point)
|
||||
containers = [c for c in containers if not _should_exclude(c.name)]
|
||||
return containers, error
|
||||
|
||||
|
||||
async def _filtered_get_compose_labels(
|
||||
config: CFConfig,
|
||||
host_name: str,
|
||||
) -> dict[str, tuple[str, str]]:
|
||||
"""Get compose labels but filter out excluded stacks."""
|
||||
labels = await _original_get_compose_labels(config, host_name)
|
||||
# Filter out containers whose stack (project) name should be excluded
|
||||
return {
|
||||
name: (stack, service)
|
||||
for name, (stack, service) in labels.items()
|
||||
if not _should_exclude(stack)
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
@@ -84,19 +128,23 @@ def server_url() -> Generator[str, None, None]:
|
||||
|
||||
# Patch at source module level so all callers get filtered versions
|
||||
patches = [
|
||||
# Patch load_state at source - all functions calling it get filtered state
|
||||
# Patch load_config at source - get_config() calls this internally
|
||||
patch("compose_farm.config.load_config", _get_filtered_config),
|
||||
# Patch load_state at source and where imported
|
||||
patch("compose_farm.state.load_state", _get_filtered_state),
|
||||
# Patch get_config where imported
|
||||
patch("compose_farm.web.routes.pages.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.api.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.actions.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.app.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.ws.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.pages.load_state", _get_filtered_state),
|
||||
# Patch container fetch to filter out excluded containers (Live Stats page)
|
||||
patch("compose_farm.glances.fetch_container_stats", _filtered_fetch_container_stats),
|
||||
# Patch compose labels to filter out excluded stacks
|
||||
patch("compose_farm.executor.get_container_compose_labels", _filtered_get_compose_labels),
|
||||
]
|
||||
|
||||
for p in patches:
|
||||
p.start()
|
||||
|
||||
# Import create_app AFTER patches are started so route modules see patched get_config
|
||||
from compose_farm.web.app import create_app # noqa: PLC0415
|
||||
|
||||
with socket.socket() as s:
|
||||
s.bind(("127.0.0.1", 0))
|
||||
port = s.getsockname()[1]
|
||||
@@ -160,6 +208,7 @@ def recording_context(
|
||||
if url.startswith(url_prefix):
|
||||
route.fulfill(status=200, content_type=content_type, body=filepath.read_bytes())
|
||||
return
|
||||
print(f"UNCACHED CDN request: {url}")
|
||||
route.abort("failed")
|
||||
|
||||
context.route(re.compile(r"https://(cdn\.jsdelivr\.net|unpkg\.com)/.*"), handle_cdn)
|
||||
@@ -176,6 +225,35 @@ def recording_page(recording_context: BrowserContext) -> Generator[Page, None, N
|
||||
page.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wide_recording_context(
|
||||
browser: Any, # pytest-playwright's browser fixture
|
||||
recording_output_dir: Path,
|
||||
) -> Generator[BrowserContext, None, None]:
|
||||
"""Browser context with wider viewport for demos needing more horizontal space.
|
||||
|
||||
NOTE: This fixture does NOT use CDN interception (unlike recording_context).
|
||||
CDN interception was causing inline scripts from containers.html to be
|
||||
removed from the DOM, likely due to Tailwind's browser plugin behavior.
|
||||
"""
|
||||
context = browser.new_context(
|
||||
viewport={"width": 1920, "height": 1080},
|
||||
record_video_dir=str(recording_output_dir),
|
||||
record_video_size={"width": 1920, "height": 1080},
|
||||
)
|
||||
|
||||
yield context
|
||||
context.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wide_recording_page(wide_recording_context: BrowserContext) -> Generator[Page, None, None]:
|
||||
"""Page with wider viewport for demos needing more horizontal space."""
|
||||
page = wide_recording_context.new_page()
|
||||
yield page
|
||||
page.close()
|
||||
|
||||
|
||||
# Demo helper functions
|
||||
|
||||
|
||||
|
||||
85
docs/demos/web/demo_live_stats.py
Normal file
85
docs/demos/web/demo_live_stats.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Demo: Live Stats page.
|
||||
|
||||
Records a ~20 second demo showing:
|
||||
- Navigating to Live Stats via command palette
|
||||
- Container table with real-time stats
|
||||
- Filtering containers
|
||||
- Sorting by different columns
|
||||
- Auto-refresh countdown
|
||||
|
||||
Run: pytest docs/demos/web/demo_live_stats.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
open_command_palette,
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_live_stats(wide_recording_page: Page, server_url: str) -> None:
|
||||
"""Record Live Stats page demo."""
|
||||
page = wide_recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 1000)
|
||||
|
||||
# Navigate to Live Stats via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "live", delay=100)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/live-stats", timeout=5000)
|
||||
|
||||
# Wait for containers to load (may take ~10s on first load due to SSH)
|
||||
page.wait_for_selector("#container-rows tr:not(:has(.loading))", timeout=30000)
|
||||
pause(page, 2000) # Let viewer see the full table with timer
|
||||
|
||||
# Demonstrate filtering
|
||||
slow_type(page, "#filter-input", "grocy", delay=100)
|
||||
pause(page, 1500) # Show filtered results
|
||||
|
||||
# Clear filter
|
||||
page.fill("#filter-input", "")
|
||||
pause(page, 1000)
|
||||
|
||||
# Sort by memory (click header)
|
||||
page.click("th:has-text('Mem')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Sort by CPU
|
||||
page.click("th:has-text('CPU')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Sort by host
|
||||
page.click("th:has-text('Host')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Watch auto-refresh timer count down
|
||||
pause(page, 3500) # Wait for refresh to happen
|
||||
|
||||
# Hover on action menu to show pause behavior
|
||||
action_btn = page.locator('button[onclick^="openActionMenu"]').first
|
||||
action_btn.scroll_into_view_if_needed()
|
||||
action_btn.hover()
|
||||
pause(page, 2000) # Show paused state (timer shows ⏸) and action menu
|
||||
|
||||
# Move away to close menu and resume refresh
|
||||
page.locator("h2").first.hover() # Move to header
|
||||
pause(page, 3500) # Watch countdown resume and refresh happen
|
||||
|
||||
# Final pause
|
||||
pause(page, 1000)
|
||||
@@ -37,6 +37,7 @@ DEMOS = [
|
||||
"workflow",
|
||||
"console",
|
||||
"shell",
|
||||
"live_stats",
|
||||
]
|
||||
|
||||
# High-quality ffmpeg settings for VP8 encoding
|
||||
|
||||
101
docs/docker-deployment.md
Normal file
101
docs/docker-deployment.md
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
icon: lucide/container
|
||||
---
|
||||
|
||||
# Docker Deployment
|
||||
|
||||
Run the Compose Farm web UI in Docker.
|
||||
|
||||
## Quick Start
|
||||
|
||||
**1. Get the compose file:**
|
||||
|
||||
```bash
|
||||
curl -O https://raw.githubusercontent.com/basnijholt/compose-farm/main/docker-compose.yml
|
||||
```
|
||||
|
||||
**2. Generate `.env` file:**
|
||||
|
||||
```bash
|
||||
cf config init-env
|
||||
```
|
||||
|
||||
This auto-detects settings from your `compose-farm.yaml`:
|
||||
- `DOMAIN` from existing traefik labels
|
||||
- `CF_COMPOSE_DIR` from config
|
||||
- `CF_UID/GID/HOME/USER` from current user
|
||||
|
||||
Review the output and edit if needed.
|
||||
|
||||
**3. Set up SSH keys:**
|
||||
|
||||
```bash
|
||||
docker compose run --rm cf ssh setup
|
||||
```
|
||||
|
||||
**4. Start the web UI:**
|
||||
|
||||
```bash
|
||||
docker compose up -d web
|
||||
```
|
||||
|
||||
Open `http://localhost:9000` (or `https://compose-farm.example.com` if using Traefik).
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
The `cf config init-env` command auto-detects most settings. After running it, review the generated `.env` file and edit if needed:
|
||||
|
||||
```bash
|
||||
$EDITOR .env
|
||||
```
|
||||
|
||||
### What init-env detects
|
||||
|
||||
| Variable | How it's detected |
|
||||
|----------|-------------------|
|
||||
| `DOMAIN` | Extracted from traefik labels in your stacks |
|
||||
| `CF_COMPOSE_DIR` | From `compose_dir` in your config |
|
||||
| `CF_UID/GID/HOME/USER` | From current user (for NFS compatibility) |
|
||||
|
||||
If auto-detection fails for any value, edit the `.env` file manually.
|
||||
|
||||
### Glances Monitoring
|
||||
|
||||
To show host CPU/memory stats in the dashboard, deploy [Glances](https://nicolargo.github.io/glances/) on your hosts. When running the web UI container, Compose Farm infers the local host from `CF_WEB_STACK` and uses the Glances container name for that host.
|
||||
|
||||
See [Host Resource Monitoring](https://github.com/basnijholt/compose-farm#host-resource-monitoring-glances) in the README.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### SSH "Permission denied" or "Host key verification failed"
|
||||
|
||||
Regenerate keys:
|
||||
|
||||
```bash
|
||||
docker compose run --rm cf ssh setup
|
||||
```
|
||||
|
||||
### Files created as root
|
||||
|
||||
Add the non-root variables above and restart.
|
||||
|
||||
---
|
||||
|
||||
## All Environment Variables
|
||||
|
||||
For advanced users, here's the complete reference:
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `DOMAIN` | Domain for Traefik labels | *(required)* |
|
||||
| `CF_COMPOSE_DIR` | Compose files directory | `/opt/stacks` |
|
||||
| `CF_UID` / `CF_GID` | User/group ID | `0` (root) |
|
||||
| `CF_HOME` | Home directory | `/root` |
|
||||
| `CF_USER` | Username for SSH | `root` |
|
||||
| `CF_WEB_STACK` | Web UI stack name (enables self-update, local host inference) | *(none)* |
|
||||
| `CF_SSH_DIR` | SSH keys directory | `~/.ssh/compose-farm` |
|
||||
| `CF_XDG_CONFIG` | Config/backup directory | `~/.config/compose-farm` |
|
||||
@@ -329,7 +329,7 @@ cf apply
|
||||
|
||||
```bash
|
||||
cf update --all
|
||||
# Runs: pull + build + down + up for each stack
|
||||
# Only recreates containers if images changed
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
<!-- Privacy-friendly analytics by Plausible -->
|
||||
<script async src="https://plausible.nijho.lt/js/pa-NRX7MolONWKTUREJpAjkB.js"></script>
|
||||
<script>
|
||||
window.plausible=window.plausible||function(){(plausible.q=plausible.q||[]).push(arguments)},plausible.init=plausible.init||function(i){plausible.o=i||{}};
|
||||
plausible.init()
|
||||
</script>
|
||||
@@ -139,7 +139,6 @@ stacks:
|
||||
With `traefik_file` set, these commands auto-regenerate the config:
|
||||
- `cf up`
|
||||
- `cf down`
|
||||
- `cf restart`
|
||||
- `cf update`
|
||||
- `cf apply`
|
||||
|
||||
|
||||
@@ -51,10 +51,32 @@ Press `Ctrl+K` (or `Cmd+K` on macOS) to open the command palette. Use fuzzy sear
|
||||
### Dashboard (`/`)
|
||||
|
||||
- Stack overview with status indicators
|
||||
- Host statistics
|
||||
- Host statistics (CPU, memory, disk, load via Glances)
|
||||
- Pending operations (migrations, orphaned stacks)
|
||||
- Quick actions via command palette
|
||||
|
||||
### Live Stats (`/live-stats`)
|
||||
|
||||
Real-time container monitoring across all hosts, powered by [Glances](https://nicolargo.github.io/glances/).
|
||||
|
||||
- **Live metrics**: CPU, memory, network I/O for every container
|
||||
- **Auto-refresh**: Updates every 3 seconds (pauses when dropdown menus are open)
|
||||
- **Filtering**: Type to filter containers by name, stack, host, or image
|
||||
- **Sorting**: Click column headers to sort by any metric
|
||||
- **Update detection**: Shows when container images have updates available
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-live_stats.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
#### Requirements
|
||||
|
||||
Live Stats requires Glances to be deployed on all hosts:
|
||||
|
||||
1. Add `glances_stack: glances` to your `compose-farm.yaml`
|
||||
2. Deploy a Glances stack that runs on all hosts (see [example](https://github.com/basnijholt/compose-farm/tree/main/examples/glances))
|
||||
3. Glances must expose its REST API on port 61208
|
||||
|
||||
### Stack Detail (`/stack/{name}`)
|
||||
|
||||
- Compose file editor (Monaco)
|
||||
|
||||
@@ -7,9 +7,10 @@ Real-world examples demonstrating compose-farm patterns for multi-host Docker de
|
||||
| Stack | Type | Demonstrates |
|
||||
|---------|------|--------------|
|
||||
| [traefik](traefik/) | Infrastructure | Reverse proxy, Let's Encrypt, file-provider |
|
||||
| [coredns](coredns/) | Infrastructure | Wildcard DNS for `*.local` domains |
|
||||
| [mealie](mealie/) | Single container | Traefik labels, resource limits, environment vars |
|
||||
| [uptime-kuma](uptime-kuma/) | Single container | Docker socket, user mapping, custom DNS |
|
||||
| [paperless-ngx](paperless-ngx/) | Multi-container | Redis + App stack (SQLite) |
|
||||
| [paperless-ngx](paperless-ngx/) | Multi-container | Redis + PostgreSQL + App stack |
|
||||
| [autokuma](autokuma/) | Multi-host | Demonstrates `all` keyword (runs on every host) |
|
||||
|
||||
## Key Patterns
|
||||
@@ -53,7 +54,8 @@ labels:
|
||||
- traefik.http.routers.myapp-local.entrypoints=web
|
||||
```
|
||||
|
||||
> **Note:** `.local` domains require local DNS (e.g., Pi-hole, Technitium) to resolve to your Traefik host.
|
||||
> **Note:** `.local` domains require local DNS to resolve to your Traefik host.
|
||||
> The [coredns](coredns/) example provides this - edit `Corefile` to set your Traefik IP.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
@@ -88,23 +90,6 @@ stacks:
|
||||
autokuma: all # Runs on every configured host
|
||||
```
|
||||
|
||||
### Multi-Container Stacks
|
||||
|
||||
Database-backed apps with multiple services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
app:
|
||||
depends_on:
|
||||
- redis
|
||||
```
|
||||
|
||||
> **NFS + PostgreSQL Warning:** PostgreSQL should NOT run on NFS storage due to
|
||||
> fsync and file locking issues. Use SQLite (safe for single-writer on NFS) or
|
||||
> keep PostgreSQL data on local volumes (non-migratable).
|
||||
|
||||
### AutoKuma Labels (Optional)
|
||||
|
||||
The autokuma example demonstrates compose-farm's **multi-host feature** - running the same stack on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers.
|
||||
@@ -125,8 +110,8 @@ cd examples
|
||||
# 1. Create the shared network on all hosts
|
||||
compose-farm init-network
|
||||
|
||||
# 2. Start Traefik first (the reverse proxy)
|
||||
compose-farm up traefik
|
||||
# 2. Start infrastructure (reverse proxy + DNS)
|
||||
compose-farm up traefik coredns
|
||||
|
||||
# 3. Start other stacks
|
||||
compose-farm up mealie uptime-kuma
|
||||
@@ -168,4 +153,4 @@ traefik_file: /opt/stacks/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, `restart`, and `update` commands.
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, and `update` commands.
|
||||
|
||||
@@ -3,6 +3,7 @@ deployed:
|
||||
- primary
|
||||
- secondary
|
||||
- local
|
||||
coredns: primary
|
||||
mealie: secondary
|
||||
paperless-ngx: primary
|
||||
traefik: primary
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
compose_dir: /opt/stacks/compose-farm/examples
|
||||
|
||||
# Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
# Auto-regenerate Traefik file-provider config after up/down/update
|
||||
traefik_file: /opt/stacks/compose-farm/examples/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik # Skip Traefik's host in file-provider (docker provider handles it)
|
||||
|
||||
@@ -27,6 +27,7 @@ hosts:
|
||||
stacks:
|
||||
# Infrastructure (runs on primary where Traefik is)
|
||||
traefik: primary
|
||||
coredns: primary # DNS for *.local resolution
|
||||
|
||||
# Multi-host stacks (runs on ALL hosts)
|
||||
# AutoKuma monitors Docker containers on each host
|
||||
|
||||
2
examples/coredns/.env
Normal file
2
examples/coredns/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
# CoreDNS doesn't need environment variables
|
||||
# The Traefik IP is configured in the Corefile
|
||||
22
examples/coredns/Corefile
Normal file
22
examples/coredns/Corefile
Normal file
@@ -0,0 +1,22 @@
|
||||
# CoreDNS configuration for .local domain resolution
|
||||
#
|
||||
# Resolves *.local to the Traefik host IP (where your reverse proxy runs).
|
||||
# All other queries are forwarded to upstream DNS.
|
||||
|
||||
# Handle .local domains - resolve everything to Traefik's host
|
||||
local {
|
||||
template IN A {
|
||||
answer "{{ .Name }} 60 IN A 192.168.1.10"
|
||||
}
|
||||
template IN AAAA {
|
||||
# Return empty for AAAA to avoid delays on IPv4-only networks
|
||||
rcode NOERROR
|
||||
}
|
||||
}
|
||||
|
||||
# Forward everything else to upstream DNS
|
||||
. {
|
||||
forward . 1.1.1.1 8.8.8.8
|
||||
cache 300
|
||||
errors
|
||||
}
|
||||
27
examples/coredns/compose.yaml
Normal file
27
examples/coredns/compose.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# CoreDNS - DNS server for .local domain resolution
|
||||
#
|
||||
# Demonstrates:
|
||||
# - Wildcard DNS for *.local domains
|
||||
# - Config file mounting from stack directory
|
||||
# - UDP/TCP port exposure
|
||||
#
|
||||
# This enables all the .local routes in the examples to work.
|
||||
# Point your devices/router DNS to this server's IP.
|
||||
name: coredns
|
||||
services:
|
||||
coredns:
|
||||
image: coredns/coredns:latest
|
||||
container_name: coredns
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "53:53/udp"
|
||||
- "53:53/tcp"
|
||||
volumes:
|
||||
- ./Corefile:/root/Corefile:ro
|
||||
command: -conf /root/Corefile
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
@@ -1,3 +1,4 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
PAPERLESS_SECRET_KEY=change-me-to-a-random-string
|
||||
POSTGRES_PASSWORD=change-me-to-a-secure-password
|
||||
PAPERLESS_SECRET_KEY=change-me-to-a-long-random-string
|
||||
|
||||
@@ -1,44 +1,57 @@
|
||||
# Paperless-ngx - Document management system
|
||||
#
|
||||
# Demonstrates:
|
||||
# - HTTPS route: paperless.${DOMAIN} (e.g., paperless.example.com) with Let's Encrypt
|
||||
# - HTTP route: paperless.local for LAN access without TLS
|
||||
# - Multi-container stack (Redis + App with SQLite)
|
||||
#
|
||||
# NOTE: This example uses SQLite (the default) instead of PostgreSQL.
|
||||
# PostgreSQL should NOT be used with NFS storage due to fsync/locking issues.
|
||||
# If you need PostgreSQL, use local volumes for the database.
|
||||
# - HTTPS route: paperless.${DOMAIN} with Let's Encrypt
|
||||
# - HTTP route: paperless.local for LAN access
|
||||
# - Multi-container stack (Redis + PostgreSQL + App)
|
||||
# - Separate env_file for app-specific settings
|
||||
name: paperless-ngx
|
||||
services:
|
||||
redis:
|
||||
image: redis:8
|
||||
broker:
|
||||
image: redis:7
|
||||
container_name: paperless-redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
volumes:
|
||||
- /mnt/data/paperless/redis:/data
|
||||
- /mnt/data/paperless/redisdata:/data
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
container_name: paperless-db
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
volumes:
|
||||
- /mnt/data/paperless/pgdata:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: paperless
|
||||
POSTGRES_USER: paperless
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
paperless:
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:latest
|
||||
container_name: paperless
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- redis
|
||||
- db
|
||||
- broker
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
# SQLite database stored here (safe on NFS for single-writer)
|
||||
- /mnt/data/paperless/data:/usr/src/paperless/data
|
||||
- /mnt/data/paperless/media:/usr/src/paperless/media
|
||||
- /mnt/data/paperless/export:/usr/src/paperless/export
|
||||
- /mnt/data/paperless/consume:/usr/src/paperless/consume
|
||||
environment:
|
||||
PAPERLESS_REDIS: redis://redis:6379
|
||||
PAPERLESS_REDIS: redis://broker:6379
|
||||
PAPERLESS_DBHOST: db
|
||||
PAPERLESS_URL: https://paperless.${DOMAIN}
|
||||
PAPERLESS_SECRET_KEY: ${PAPERLESS_SECRET_KEY}
|
||||
PAPERLESS_TIME_ZONE: America/Los_Angeles
|
||||
PAPERLESS_OCR_LANGUAGE: eng
|
||||
USERMAP_UID: 1000
|
||||
USERMAP_GID: 1000
|
||||
labels:
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
name: traefik
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v3.2
|
||||
image: traefik:v3.6
|
||||
container_name: traefik
|
||||
command:
|
||||
- --api.dashboard=true
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Hatch build hook to vendor CDN assets for offline use.
|
||||
|
||||
During wheel builds, this hook:
|
||||
1. Parses base.html to find elements with data-vendor attributes
|
||||
1. Reads vendor-assets.json to find assets marked for vendoring
|
||||
2. Downloads each CDN asset to a temporary vendor directory
|
||||
3. Rewrites base.html to use local /static/vendor/ paths
|
||||
4. Fetches and bundles license information
|
||||
@@ -13,6 +13,7 @@ distributed wheel has vendored assets.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
@@ -23,22 +24,6 @@ from urllib.request import Request, urlopen
|
||||
|
||||
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
|
||||
|
||||
# Matches elements with data-vendor attribute: extracts URL and target filename
|
||||
# Example: <script src="https://..." data-vendor="htmx.js">
|
||||
# Captures: (1) src/href, (2) URL, (3) attributes between, (4) vendor filename
|
||||
VENDOR_PATTERN = re.compile(r'(src|href)="(https://[^"]+)"([^>]*?)data-vendor="([^"]+)"')
|
||||
|
||||
# License URLs for each package (GitHub raw URLs)
|
||||
LICENSE_URLS: dict[str, tuple[str, str]] = {
|
||||
"htmx": ("MIT", "https://raw.githubusercontent.com/bigskysoftware/htmx/master/LICENSE"),
|
||||
"xterm": ("MIT", "https://raw.githubusercontent.com/xtermjs/xterm.js/master/LICENSE"),
|
||||
"daisyui": ("MIT", "https://raw.githubusercontent.com/saadeghi/daisyui/master/LICENSE"),
|
||||
"tailwindcss": (
|
||||
"MIT",
|
||||
"https://raw.githubusercontent.com/tailwindlabs/tailwindcss/master/LICENSE",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _download(url: str) -> bytes:
|
||||
"""Download a URL, trying urllib first then curl as fallback."""
|
||||
@@ -61,7 +46,14 @@ def _download(url: str) -> bytes:
|
||||
return bytes(result.stdout)
|
||||
|
||||
|
||||
def _generate_licenses_file(temp_dir: Path) -> None:
|
||||
def _load_vendor_assets(root: Path) -> dict[str, Any]:
|
||||
"""Load vendor-assets.json from the web module."""
|
||||
json_path = root / "src" / "compose_farm" / "web" / "vendor-assets.json"
|
||||
with json_path.open() as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _generate_licenses_file(temp_dir: Path, licenses: dict[str, dict[str, str]]) -> None:
|
||||
"""Download and combine license files into LICENSES.txt."""
|
||||
lines = [
|
||||
"# Vendored Dependencies - License Information",
|
||||
@@ -73,7 +65,9 @@ def _generate_licenses_file(temp_dir: Path) -> None:
|
||||
"",
|
||||
]
|
||||
|
||||
for pkg_name, (license_type, license_url) in LICENSE_URLS.items():
|
||||
for pkg_name, license_info in licenses.items():
|
||||
license_type = license_info["type"]
|
||||
license_url = license_info["url"]
|
||||
lines.append(f"## {pkg_name} ({license_type})")
|
||||
lines.append(f"Source: {license_url}")
|
||||
lines.append("")
|
||||
@@ -107,44 +101,57 @@ class VendorAssetsHook(BuildHookInterface): # type: ignore[misc]
|
||||
if not base_html_path.exists():
|
||||
return
|
||||
|
||||
# Load vendor assets configuration
|
||||
vendor_config = _load_vendor_assets(Path(self.root))
|
||||
assets_to_vendor = vendor_config["assets"]
|
||||
|
||||
if not assets_to_vendor:
|
||||
return
|
||||
|
||||
# Create temp directory for vendored assets
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="compose_farm_vendor_"))
|
||||
vendor_dir = temp_dir / "vendor"
|
||||
vendor_dir.mkdir()
|
||||
|
||||
# Read and parse base.html
|
||||
# Read base.html
|
||||
html_content = base_html_path.read_text()
|
||||
|
||||
# Build URL to filename mapping and download assets
|
||||
url_to_filename: dict[str, str] = {}
|
||||
|
||||
# Find all elements with data-vendor attribute and download them
|
||||
for match in VENDOR_PATTERN.finditer(html_content):
|
||||
url = match.group(2)
|
||||
filename = match.group(4)
|
||||
|
||||
if url in url_to_filename:
|
||||
continue
|
||||
|
||||
for asset in assets_to_vendor:
|
||||
url = asset["url"]
|
||||
filename = asset["filename"]
|
||||
url_to_filename[url] = filename
|
||||
filepath = vendor_dir / filename
|
||||
filepath.parent.mkdir(parents=True, exist_ok=True)
|
||||
content = _download(url)
|
||||
(vendor_dir / filename).write_bytes(content)
|
||||
filepath.write_bytes(content)
|
||||
|
||||
if not url_to_filename:
|
||||
return
|
||||
# Generate LICENSES.txt from the JSON config
|
||||
_generate_licenses_file(vendor_dir, vendor_config["licenses"])
|
||||
|
||||
# Generate LICENSES.txt
|
||||
_generate_licenses_file(vendor_dir)
|
||||
# Rewrite HTML: replace CDN URLs with local paths and remove data-vendor attributes
|
||||
# Pattern matches: src="URL" ... data-vendor="filename" or href="URL" ... data-vendor="filename"
|
||||
vendor_pattern = re.compile(r'(src|href)="(https://[^"]+)"([^>]*?)data-vendor="([^"]+)"')
|
||||
|
||||
# Rewrite HTML to use local paths (remove data-vendor, update URL)
|
||||
def replace_vendor_tag(match: re.Match[str]) -> str:
|
||||
attr = match.group(1) # src or href
|
||||
url = match.group(2)
|
||||
between = match.group(3) # attributes between URL and data-vendor
|
||||
filename = match.group(4)
|
||||
if url in url_to_filename:
|
||||
filename = url_to_filename[url]
|
||||
return f'{attr}="/static/vendor/{filename}"{between}'
|
||||
return match.group(0)
|
||||
|
||||
modified_html = VENDOR_PATTERN.sub(replace_vendor_tag, html_content)
|
||||
modified_html = vendor_pattern.sub(replace_vendor_tag, html_content)
|
||||
|
||||
# Inject vendored mode flag for JavaScript to detect
|
||||
# Insert right after <head> tag so it's available early
|
||||
modified_html = modified_html.replace(
|
||||
"<head>",
|
||||
"<head>\n <script>window.CF_VENDORED=true;</script>",
|
||||
1, # Only replace first occurrence
|
||||
)
|
||||
|
||||
# Write modified base.html to temp
|
||||
templates_dir = temp_dir / "templates"
|
||||
|
||||
@@ -30,7 +30,8 @@ classifiers = [
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: System Administrators",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Operating System :: MacOS",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
@@ -46,6 +47,7 @@ dependencies = [
|
||||
"asyncssh>=2.14.0",
|
||||
"pyyaml>=6.0",
|
||||
"rich>=13.0.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
@@ -53,6 +55,7 @@ web = [
|
||||
"fastapi[standard]>=0.109.0",
|
||||
"jinja2>=3.1.0",
|
||||
"websockets>=12.0",
|
||||
"humanize>=4.0.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -121,6 +124,10 @@ python_version = "3.11"
|
||||
strict = true
|
||||
plugins = ["pydantic.mypy"]
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "compose_farm._version"
|
||||
ignore_missing_imports = true
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "asyncssh.*"
|
||||
ignore_missing_imports = true
|
||||
@@ -171,8 +178,12 @@ python-version = "3.11"
|
||||
exclude = [
|
||||
"hatch_build.py", # Build-time only, hatchling not in dev deps
|
||||
"docs/demos/**", # Demo scripts with local conftest imports
|
||||
"src/compose_farm/_version.py", # Generated at build time
|
||||
]
|
||||
|
||||
[tool.ty.rules]
|
||||
unresolved-import = "ignore" # _version.py is generated at build time
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"mypy>=1.19.0",
|
||||
|
||||
@@ -142,6 +142,9 @@ def load_config_or_exit(config_path: Path | None) -> Config:
|
||||
except FileNotFoundError as e:
|
||||
print_error(str(e))
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
print_error(f"Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def get_stacks(
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
from importlib import resources
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
|
||||
@@ -17,6 +16,9 @@ from compose_farm.cli.app import app
|
||||
from compose_farm.console import MSG_CONFIG_NOT_FOUND, console, print_error, print_success
|
||||
from compose_farm.paths import config_search_paths, default_config_path, find_config_path
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
config_app = typer.Typer(
|
||||
name="config",
|
||||
help="Manage compose-farm configuration files.",
|
||||
@@ -43,8 +45,6 @@ def _get_editor() -> str:
|
||||
"""Get the user's preferred editor ($EDITOR > $VISUAL > platform default)."""
|
||||
if editor := os.environ.get("EDITOR") or os.environ.get("VISUAL"):
|
||||
return editor
|
||||
if platform.system() == "Windows":
|
||||
return "notepad"
|
||||
return next((e for e in ("nano", "vim", "vi") if shutil.which(e)), "vi")
|
||||
|
||||
|
||||
@@ -68,6 +68,22 @@ def _get_config_file(path: Path | None) -> Path | None:
|
||||
return config_path.resolve() if config_path else None
|
||||
|
||||
|
||||
def _load_config_with_path(path: Path | None) -> tuple[Path, Config]:
|
||||
"""Load config and return both the resolved path and Config object.
|
||||
|
||||
Exits with error if config not found or invalid.
|
||||
"""
|
||||
from compose_farm.cli.common import load_config_or_exit # noqa: PLC0415
|
||||
|
||||
config_file = _get_config_file(path)
|
||||
if config_file is None:
|
||||
print_error(MSG_CONFIG_NOT_FOUND)
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config_file)
|
||||
return config_file, cfg
|
||||
|
||||
|
||||
def _report_missing_config(explicit_path: Path | None = None) -> None:
|
||||
"""Report that a config file was not found."""
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
@@ -135,7 +151,7 @@ def config_edit(
|
||||
console.print(f"[dim]Opening {config_file} with {editor}...[/dim]")
|
||||
|
||||
try:
|
||||
editor_cmd = shlex.split(editor, posix=os.name != "nt")
|
||||
editor_cmd = shlex.split(editor)
|
||||
except ValueError as e:
|
||||
print_error("Invalid editor command. Check [bold]$EDITOR[/]/[bold]$VISUAL[/]")
|
||||
raise typer.Exit(1) from e
|
||||
@@ -207,23 +223,7 @@ def config_validate(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Validate the config file syntax and schema."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
print_error(MSG_CONFIG_NOT_FOUND)
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Lazy import: pydantic adds ~50ms to startup, only load when actually needed
|
||||
from compose_farm.config import load_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
cfg = load_config(config_file)
|
||||
except FileNotFoundError as e:
|
||||
print_error(str(e))
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
print_error(f"Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
config_file, cfg = _load_config_with_path(path)
|
||||
|
||||
print_success(f"Valid config: {config_file}")
|
||||
console.print(f" Hosts: {len(cfg.hosts)}")
|
||||
@@ -293,5 +293,114 @@ def config_symlink(
|
||||
console.print(f" -> {target_path}")
|
||||
|
||||
|
||||
def _detect_domain(cfg: Config) -> str | None:
|
||||
"""Try to detect DOMAIN from traefik Host() rules in existing stacks.
|
||||
|
||||
Uses extract_website_urls from traefik module to get interpolated
|
||||
URLs, then extracts the domain from the first valid URL.
|
||||
Skips local domains (.local, localhost, etc.).
|
||||
"""
|
||||
from urllib.parse import urlparse # noqa: PLC0415
|
||||
|
||||
from compose_farm.traefik import extract_website_urls # noqa: PLC0415
|
||||
|
||||
max_stacks_to_check = 10
|
||||
min_domain_parts = 2
|
||||
subdomain_parts = 4
|
||||
skip_tlds = {"local", "localhost", "internal", "lan", "home"}
|
||||
|
||||
for stack_name in list(cfg.stacks.keys())[:max_stacks_to_check]:
|
||||
urls = extract_website_urls(cfg, stack_name)
|
||||
for url in urls:
|
||||
host = urlparse(url).netloc
|
||||
parts = host.split(".")
|
||||
# Skip local/internal domains
|
||||
if parts[-1].lower() in skip_tlds:
|
||||
continue
|
||||
if len(parts) >= subdomain_parts:
|
||||
# e.g., "app.lab.nijho.lt" -> "lab.nijho.lt"
|
||||
return ".".join(parts[-3:])
|
||||
if len(parts) >= min_domain_parts:
|
||||
# e.g., "app.example.com" -> "example.com"
|
||||
return ".".join(parts[-2:])
|
||||
return None
|
||||
|
||||
|
||||
@config_app.command("init-env")
|
||||
def config_init_env(
|
||||
path: _PathOption = None,
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
"--output", "-o", help="Output .env file path. Defaults to .env in current directory."
|
||||
),
|
||||
] = None,
|
||||
force: _ForceOption = False,
|
||||
) -> None:
|
||||
"""Generate a .env file for Docker deployment.
|
||||
|
||||
Reads the compose-farm.yaml config and auto-detects settings:
|
||||
|
||||
- CF_COMPOSE_DIR from compose_dir
|
||||
- CF_UID/GID/HOME/USER from current user
|
||||
- DOMAIN from traefik labels in stacks (if found)
|
||||
|
||||
Example::
|
||||
|
||||
cf config init-env # Create .env in current directory
|
||||
cf config init-env -o /path/to/.env # Create .env at specific path
|
||||
|
||||
"""
|
||||
config_file, cfg = _load_config_with_path(path)
|
||||
|
||||
# Determine output path (default: current directory)
|
||||
env_path = output.expanduser().resolve() if output else Path.cwd() / ".env"
|
||||
|
||||
if env_path.exists() and not force:
|
||||
console.print(f"[yellow].env file already exists:[/] {env_path}")
|
||||
if not typer.confirm("Overwrite?"):
|
||||
console.print("[dim]Aborted.[/dim]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
# Auto-detect values
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
home = os.environ.get("HOME", "/root")
|
||||
user = os.environ.get("USER", "root")
|
||||
compose_dir = str(cfg.compose_dir)
|
||||
domain = _detect_domain(cfg)
|
||||
|
||||
# Generate .env content
|
||||
lines = [
|
||||
"# Generated by: cf config init-env",
|
||||
f"# From config: {config_file}",
|
||||
"",
|
||||
"# Domain for Traefik labels",
|
||||
f"DOMAIN={domain or 'example.com'}",
|
||||
"",
|
||||
"# Compose files location",
|
||||
f"CF_COMPOSE_DIR={compose_dir}",
|
||||
"",
|
||||
"# Run as current user (recommended for NFS)",
|
||||
f"CF_UID={uid}",
|
||||
f"CF_GID={gid}",
|
||||
f"CF_HOME={home}",
|
||||
f"CF_USER={user}",
|
||||
"",
|
||||
]
|
||||
|
||||
env_path.write_text("\n".join(lines), encoding="utf-8")
|
||||
|
||||
print_success(f"Created .env file: {env_path}")
|
||||
console.print()
|
||||
console.print("[dim]Detected settings:[/dim]")
|
||||
console.print(f" DOMAIN: {domain or '[yellow]example.com[/] (edit this)'}")
|
||||
console.print(f" CF_COMPOSE_DIR: {compose_dir}")
|
||||
console.print(f" CF_UID/GID: {uid}:{gid}")
|
||||
console.print()
|
||||
console.print("[dim]Review and edit as needed:[/dim]")
|
||||
console.print(f" [cyan]$EDITOR {env_path}[/cyan]")
|
||||
|
||||
|
||||
# Register config subcommand on the shared app
|
||||
app.add_typer(config_app, name="config", rich_help_panel="Configuration")
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
@@ -28,8 +29,9 @@ from compose_farm.cli.common import (
|
||||
)
|
||||
from compose_farm.cli.management import _discover_stacks_full
|
||||
from compose_farm.console import MSG_DRY_RUN, console, print_error, print_success
|
||||
from compose_farm.executor import run_compose_on_host, run_on_stacks, run_sequential_on_stacks
|
||||
from compose_farm.executor import run_compose_on_host, run_on_stacks
|
||||
from compose_farm.operations import (
|
||||
build_up_cmd,
|
||||
stop_orphaned_stacks,
|
||||
stop_stray_stacks,
|
||||
up_stacks,
|
||||
@@ -49,6 +51,14 @@ def up(
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
pull: Annotated[
|
||||
bool,
|
||||
typer.Option("--pull", help="Pull images before starting (--pull always)"),
|
||||
] = False,
|
||||
build: Annotated[
|
||||
bool,
|
||||
typer.Option("--build", help="Build images before starting"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start stacks (docker compose up -d). Auto-migrates if host changed."""
|
||||
@@ -58,9 +68,13 @@ def up(
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level up, use run_on_stacks directly (no migration logic)
|
||||
results = run_async(run_on_stacks(cfg, stack_list, f"up -d {service}", raw=True))
|
||||
results = run_async(
|
||||
run_on_stacks(
|
||||
cfg, stack_list, build_up_cmd(pull=pull, build=build, service=service), raw=True
|
||||
)
|
||||
)
|
||||
else:
|
||||
results = run_async(up_stacks(cfg, stack_list, raw=True))
|
||||
results = run_async(up_stacks(cfg, stack_list, raw=True, pull=pull, build=build))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
@@ -161,19 +175,17 @@ def restart(
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart stacks (down + up). With --service, restarts just that service."""
|
||||
"""Restart running containers (docker compose restart)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level restart, use docker compose restart (more efficient)
|
||||
raw = True
|
||||
results = run_async(run_on_stacks(cfg, stack_list, f"restart {service}", raw=raw))
|
||||
cmd = f"restart {service}"
|
||||
else:
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_sequential_on_stacks(cfg, stack_list, ["down", "up -d"], raw=raw))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
cmd = "restart"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -184,36 +196,8 @@ def update(
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update stacks (pull + build + down + up). With --service, updates just that service."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level update: pull + build + stop + up (stop instead of down)
|
||||
raw = True
|
||||
results = run_async(
|
||||
run_sequential_on_stacks(
|
||||
cfg,
|
||||
stack_list,
|
||||
[
|
||||
f"pull --ignore-buildable {service}",
|
||||
f"build {service}",
|
||||
f"stop {service}",
|
||||
f"up -d {service}",
|
||||
],
|
||||
raw=raw,
|
||||
)
|
||||
)
|
||||
else:
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_stacks(
|
||||
cfg, stack_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw
|
||||
)
|
||||
)
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
"""Update stacks (pull + build + up). Shorthand for 'up --pull --build'."""
|
||||
up(stacks=stacks, all_stacks=all_stacks, service=service, pull=True, build=True, config=config)
|
||||
|
||||
|
||||
def _discover_strays(cfg: Config) -> dict[str, list[str]]:
|
||||
@@ -410,10 +394,10 @@ def compose(
|
||||
else:
|
||||
target_host = hosts[0]
|
||||
|
||||
# Build the full compose command
|
||||
# Build the full compose command (quote args to preserve spaces)
|
||||
full_cmd = command
|
||||
if args:
|
||||
full_cmd += " " + " ".join(args)
|
||||
full_cmd += " " + " ".join(shlex.quote(arg) for arg in args)
|
||||
|
||||
# Run with raw=True for proper TTY handling (progress bars, interactive)
|
||||
result = run_async(run_compose_on_host(cfg, resolved_stack, target_host, full_cmd, raw=True))
|
||||
@@ -423,5 +407,9 @@ def compose(
|
||||
raise typer.Exit(result.exit_code)
|
||||
|
||||
|
||||
# Alias: cf a = cf apply
|
||||
app.command("a", hidden=True)(apply)
|
||||
# Aliases (hidden from help, shown in --help as "Aliases: ...")
|
||||
app.command("a", hidden=True)(apply) # cf a = cf apply
|
||||
app.command("r", hidden=True)(restart) # cf r = cf restart
|
||||
app.command("u", hidden=True)(update) # cf u = cf update
|
||||
app.command("p", hidden=True)(pull) # cf p = cf pull
|
||||
app.command("c", hidden=True)(compose) # cf c = cf compose
|
||||
|
||||
@@ -56,7 +56,6 @@ from compose_farm.operations import (
|
||||
check_stack_requirements,
|
||||
)
|
||||
from compose_farm.state import get_orphaned_stacks, load_state, save_state
|
||||
from compose_farm.traefik import generate_traefik_config, render_traefik_config
|
||||
|
||||
# --- Sync helpers ---
|
||||
|
||||
@@ -328,6 +327,8 @@ def _report_orphaned_stacks(cfg: Config) -> bool:
|
||||
|
||||
def _report_traefik_status(cfg: Config, stacks: list[str]) -> None:
|
||||
"""Check and report traefik label status."""
|
||||
from compose_farm.traefik import generate_traefik_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
_, warnings = generate_traefik_config(cfg, stacks, check_all=True)
|
||||
except (FileNotFoundError, ValueError):
|
||||
@@ -447,6 +448,11 @@ def traefik_file(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Generate a Traefik file-provider fragment from compose Traefik labels."""
|
||||
from compose_farm.traefik import ( # noqa: PLC0415
|
||||
generate_traefik_config,
|
||||
render_traefik_config,
|
||||
)
|
||||
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, stack_list)
|
||||
@@ -653,3 +659,9 @@ def init_network(
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
# Aliases (hidden from help)
|
||||
app.command("rf", hidden=True)(refresh) # cf rf = cf refresh
|
||||
app.command("ck", hidden=True)(check) # cf ck = cf check
|
||||
app.command("tf", hidden=True)(traefik_file) # cf tf = cf traefik-file
|
||||
|
||||
@@ -21,17 +21,22 @@ from compose_farm.cli.common import (
|
||||
report_results,
|
||||
run_async,
|
||||
run_parallel_with_progress,
|
||||
validate_hosts,
|
||||
)
|
||||
from compose_farm.console import console, print_error
|
||||
from compose_farm.console import console, print_error, print_warning
|
||||
from compose_farm.executor import run_command, run_on_stacks
|
||||
from compose_farm.state import get_stacks_needing_migration, group_stacks_by_host, load_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from compose_farm.config import Config
|
||||
from compose_farm.glances import ContainerStats
|
||||
|
||||
|
||||
def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
"""Get container counts from all hosts with a progress bar."""
|
||||
def _get_container_counts(cfg: Config, hosts: list[str] | None = None) -> dict[str, int]:
|
||||
"""Get container counts from hosts with a progress bar."""
|
||||
host_list = hosts if hosts is not None else list(cfg.hosts.keys())
|
||||
|
||||
async def get_count(host_name: str) -> tuple[str, int]:
|
||||
host = cfg.hosts[host_name]
|
||||
@@ -44,7 +49,7 @@ def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
|
||||
results = run_parallel_with_progress(
|
||||
"Querying hosts",
|
||||
list(cfg.hosts.keys()),
|
||||
host_list,
|
||||
get_count,
|
||||
)
|
||||
return dict(results)
|
||||
@@ -67,7 +72,7 @@ def _build_host_table(
|
||||
if show_containers:
|
||||
table.add_column("Containers", justify="right")
|
||||
|
||||
for host_name in sorted(cfg.hosts.keys()):
|
||||
for host_name in sorted(stacks_by_host.keys()):
|
||||
host = cfg.hosts[host_name]
|
||||
configured = len(stacks_by_host[host_name])
|
||||
running = len(running_by_host[host_name])
|
||||
@@ -86,19 +91,46 @@ def _build_host_table(
|
||||
return table
|
||||
|
||||
|
||||
def _state_includes_host(host_value: str | list[str], host_name: str) -> bool:
|
||||
"""Check whether a state entry includes the given host."""
|
||||
if isinstance(host_value, list):
|
||||
return host_name in host_value
|
||||
return host_value == host_name
|
||||
|
||||
|
||||
def _build_summary_table(
|
||||
cfg: Config, state: dict[str, str | list[str]], pending: list[str]
|
||||
cfg: Config,
|
||||
state: dict[str, str | list[str]],
|
||||
pending: list[str],
|
||||
*,
|
||||
host_filter: str | None = None,
|
||||
) -> Table:
|
||||
"""Build the summary table."""
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
if host_filter:
|
||||
stacks_configured = [stack for stack in cfg.stacks if host_filter in cfg.get_hosts(stack)]
|
||||
stacks_configured_set = set(stacks_configured)
|
||||
state = {
|
||||
stack: hosts
|
||||
for stack, hosts in state.items()
|
||||
if _state_includes_host(hosts, host_filter)
|
||||
}
|
||||
on_disk = {stack for stack in on_disk if stack in stacks_configured_set}
|
||||
total_hosts = 1
|
||||
stacks_configured_count = len(stacks_configured)
|
||||
stacks_tracked_count = len(state)
|
||||
else:
|
||||
total_hosts = len(cfg.hosts)
|
||||
stacks_configured_count = len(cfg.stacks)
|
||||
stacks_tracked_count = len(state)
|
||||
|
||||
table = Table(title="Summary", show_header=False)
|
||||
table.add_column("Label", style="dim")
|
||||
table.add_column("Value", style="bold")
|
||||
|
||||
table.add_row("Total hosts", str(len(cfg.hosts)))
|
||||
table.add_row("Stacks (configured)", str(len(cfg.stacks)))
|
||||
table.add_row("Stacks (tracked)", str(len(state)))
|
||||
table.add_row("Total hosts", str(total_hosts))
|
||||
table.add_row("Stacks (configured)", str(stacks_configured_count))
|
||||
table.add_row("Stacks (tracked)", str(stacks_tracked_count))
|
||||
table.add_row("Compose files on disk", str(len(on_disk)))
|
||||
|
||||
if pending:
|
||||
@@ -111,6 +143,81 @@ def _build_summary_table(
|
||||
return table
|
||||
|
||||
|
||||
def _format_network(rx: int, tx: int, fmt: Callable[[int], str]) -> str:
|
||||
"""Format network I/O."""
|
||||
return f"[dim]↓[/]{fmt(rx)} [dim]↑[/]{fmt(tx)}"
|
||||
|
||||
|
||||
def _cpu_style(percent: float) -> str:
|
||||
"""Rich style for CPU percentage."""
|
||||
if percent > 80: # noqa: PLR2004
|
||||
return "red"
|
||||
if percent > 50: # noqa: PLR2004
|
||||
return "yellow"
|
||||
return "green"
|
||||
|
||||
|
||||
def _mem_style(percent: float) -> str:
|
||||
"""Rich style for memory percentage."""
|
||||
if percent > 90: # noqa: PLR2004
|
||||
return "red"
|
||||
if percent > 70: # noqa: PLR2004
|
||||
return "yellow"
|
||||
return "green"
|
||||
|
||||
|
||||
def _status_style(status: str) -> str:
|
||||
"""Rich style for container status."""
|
||||
s = status.lower()
|
||||
if s == "running":
|
||||
return "green"
|
||||
if s == "exited":
|
||||
return "red"
|
||||
if s == "paused":
|
||||
return "yellow"
|
||||
return "dim"
|
||||
|
||||
|
||||
def _build_containers_table(
|
||||
containers: list[ContainerStats],
|
||||
host_filter: str | None = None,
|
||||
) -> Table:
|
||||
"""Build Rich table for container stats."""
|
||||
from compose_farm.glances import format_bytes # noqa: PLC0415
|
||||
|
||||
table = Table(title="Containers", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Stack", style="cyan")
|
||||
table.add_column("Service", style="dim")
|
||||
table.add_column("Host", style="magenta")
|
||||
table.add_column("Image")
|
||||
table.add_column("Status")
|
||||
table.add_column("Uptime", justify="right")
|
||||
table.add_column("CPU%", justify="right")
|
||||
table.add_column("Memory", justify="right")
|
||||
table.add_column("Net I/O", justify="right")
|
||||
|
||||
if host_filter:
|
||||
containers = [c for c in containers if c.host == host_filter]
|
||||
|
||||
# Sort by stack, then service
|
||||
containers = sorted(containers, key=lambda c: (c.stack.lower(), c.service.lower()))
|
||||
|
||||
for c in containers:
|
||||
table.add_row(
|
||||
c.stack or c.name,
|
||||
c.service or c.name,
|
||||
c.host,
|
||||
c.image,
|
||||
f"[{_status_style(c.status)}]{c.status}[/]",
|
||||
c.uptime or "[dim]-[/]",
|
||||
f"[{_cpu_style(c.cpu_percent)}]{c.cpu_percent:.1f}%[/]",
|
||||
f"[{_mem_style(c.memory_percent)}]{format_bytes(c.memory_usage)}[/]",
|
||||
_format_network(c.network_rx, c.network_tx, format_bytes),
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
|
||||
# --- Command functions ---
|
||||
|
||||
|
||||
@@ -175,24 +282,66 @@ def stats(
|
||||
bool,
|
||||
typer.Option("--live", "-l", help="Query Docker for live container stats"),
|
||||
] = False,
|
||||
containers: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--containers", "-C", help="Show per-container resource stats (requires Glances)"
|
||||
),
|
||||
] = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
Without flags: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
With --containers: Shows per-container resource stats (requires Glances).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
host_filter = None
|
||||
if host:
|
||||
validate_hosts(cfg, host)
|
||||
host_filter = host
|
||||
|
||||
# Handle --containers mode
|
||||
if containers:
|
||||
if not cfg.glances_stack:
|
||||
print_error("Glances not configured")
|
||||
console.print("[dim]Add 'glances_stack: glances' to compose-farm.yaml[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
from compose_farm.glances import fetch_all_container_stats # noqa: PLC0415
|
||||
|
||||
host_list = [host_filter] if host_filter else None
|
||||
container_list = run_async(fetch_all_container_stats(cfg, hosts=host_list))
|
||||
|
||||
if not container_list:
|
||||
print_warning("No containers found")
|
||||
raise typer.Exit(0)
|
||||
|
||||
console.print(_build_containers_table(container_list, host_filter=host_filter))
|
||||
return
|
||||
|
||||
# Validate and filter by host if specified
|
||||
if host_filter:
|
||||
all_hosts = [host_filter]
|
||||
selected_hosts = {host_filter: cfg.hosts[host_filter]}
|
||||
else:
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
selected_hosts = cfg.hosts
|
||||
|
||||
state = load_state(cfg)
|
||||
pending = get_stacks_needing_migration(cfg)
|
||||
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
stacks_by_host = group_stacks_by_host(cfg.stacks, cfg.hosts, all_hosts)
|
||||
running_by_host = group_stacks_by_host(state, cfg.hosts, all_hosts)
|
||||
# Filter pending migrations to selected host(s)
|
||||
if host_filter:
|
||||
pending = [stack for stack in pending if host_filter in cfg.get_hosts(stack)]
|
||||
stacks_by_host = group_stacks_by_host(cfg.stacks, selected_hosts, all_hosts)
|
||||
running_by_host = group_stacks_by_host(state, selected_hosts, all_hosts)
|
||||
|
||||
container_counts: dict[str, int] = {}
|
||||
if live:
|
||||
container_counts = _get_container_counts(cfg)
|
||||
container_counts = _get_container_counts(cfg, all_hosts)
|
||||
|
||||
host_table = _build_host_table(
|
||||
cfg, stacks_by_host, running_by_host, container_counts, show_containers=live
|
||||
@@ -200,4 +349,46 @@ def stats(
|
||||
console.print(host_table)
|
||||
|
||||
console.print()
|
||||
console.print(_build_summary_table(cfg, state, pending))
|
||||
console.print(_build_summary_table(cfg, state, pending, host_filter=host_filter))
|
||||
|
||||
|
||||
@app.command("list", rich_help_panel="Monitoring")
|
||||
def list_(
|
||||
host: HostOption = None,
|
||||
simple: Annotated[
|
||||
bool,
|
||||
typer.Option("--simple", "-s", help="Plain output (one stack per line, for scripting)"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""List all stacks and their assigned hosts."""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
stacks: list[tuple[str, str | list[str]]] = list(cfg.stacks.items())
|
||||
if host:
|
||||
stacks = [(s, h) for s, h in stacks if str(h) == host or host in str(h).split(",")]
|
||||
|
||||
if simple:
|
||||
for stack, _ in sorted(stacks):
|
||||
console.print(stack)
|
||||
else:
|
||||
# Assign colors to hosts for visual grouping
|
||||
host_colors = ["magenta", "cyan", "green", "yellow", "blue", "red"]
|
||||
unique_hosts = sorted({str(h) for _, h in stacks})
|
||||
host_color_map = {h: host_colors[i % len(host_colors)] for i, h in enumerate(unique_hosts)}
|
||||
|
||||
table = Table(title="Stacks", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Stack")
|
||||
table.add_column("Host")
|
||||
|
||||
for stack, host_val in sorted(stacks):
|
||||
color = host_color_map.get(str(host_val), "white")
|
||||
table.add_row(f"[{color}]{stack}[/]", f"[{color}]{host_val}[/]")
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
# Aliases (hidden from help)
|
||||
app.command("l", hidden=True)(logs) # cf l = cf logs
|
||||
app.command("ls", hidden=True)(list_) # cf ls = cf list
|
||||
app.command("s", hidden=True)(stats) # cf s = cf stats
|
||||
|
||||
@@ -13,6 +13,7 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import yaml
|
||||
from dotenv import dotenv_values
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
@@ -40,25 +41,37 @@ def _load_env(compose_path: Path) -> dict[str, str]:
|
||||
Reads from .env file in the same directory as compose file,
|
||||
then overlays current environment variables.
|
||||
"""
|
||||
env: dict[str, str] = {}
|
||||
env_path = compose_path.parent / ".env"
|
||||
if env_path.exists():
|
||||
for line in env_path.read_text().splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#") or "=" not in stripped:
|
||||
continue
|
||||
key, value = stripped.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env[key] = value
|
||||
env: dict[str, str] = {k: v for k, v in dotenv_values(env_path).items() if v is not None}
|
||||
env.update({k: v for k, v in os.environ.items() if isinstance(v, str)})
|
||||
return env
|
||||
|
||||
|
||||
def parse_compose_data(content: str) -> dict[str, Any]:
|
||||
"""Parse compose YAML content into a dict."""
|
||||
compose_data = yaml.safe_load(content) or {}
|
||||
return compose_data if isinstance(compose_data, dict) else {}
|
||||
|
||||
|
||||
def load_compose_data(compose_path: Path) -> dict[str, Any]:
|
||||
"""Load compose YAML from a file path."""
|
||||
return parse_compose_data(compose_path.read_text())
|
||||
|
||||
|
||||
def load_compose_data_for_stack(config: Config, stack: str) -> tuple[Path, dict[str, Any]]:
|
||||
"""Load compose YAML for a stack, returning (path, data)."""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path.exists():
|
||||
return compose_path, {}
|
||||
return compose_path, load_compose_data(compose_path)
|
||||
|
||||
|
||||
def extract_services(compose_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract services mapping from compose data."""
|
||||
raw_services = compose_data.get("services", {})
|
||||
return raw_services if isinstance(raw_services, dict) else {}
|
||||
|
||||
|
||||
def _interpolate(value: str, env: dict[str, str]) -> str:
|
||||
"""Perform ${VAR} and ${VAR:-default} interpolation."""
|
||||
|
||||
@@ -185,16 +198,15 @@ def parse_host_volumes(config: Config, stack: str) -> list[str]:
|
||||
Returns a list of absolute host paths used as volume mounts.
|
||||
Skips named volumes and resolves relative paths.
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
paths: list[str] = []
|
||||
compose_dir = compose_path.parent
|
||||
|
||||
@@ -221,16 +233,15 @@ def parse_devices(config: Config, stack: str) -> list[str]:
|
||||
|
||||
Returns a list of host device paths (e.g., /dev/dri, /dev/dri/renderD128).
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
devices: list[str] = []
|
||||
for definition in raw_services.values():
|
||||
if not isinstance(definition, dict):
|
||||
@@ -260,18 +271,20 @@ def parse_external_networks(config: Config, stack: str) -> list[str]:
|
||||
|
||||
Returns a list of network names marked as external: true.
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
networks = compose_data.get("networks", {})
|
||||
if not isinstance(networks, dict):
|
||||
return []
|
||||
|
||||
external_networks: list[str] = []
|
||||
for name, definition in networks.items():
|
||||
for key, definition in networks.items():
|
||||
if isinstance(definition, dict) and definition.get("external") is True:
|
||||
# Networks may have a "name" field, which may differ from the key.
|
||||
# Use it if present, else fall back to the key.
|
||||
name = str(definition.get("name", key))
|
||||
external_networks.append(name)
|
||||
|
||||
return external_networks
|
||||
@@ -285,15 +298,14 @@ def load_compose_services(
|
||||
|
||||
Returns (services_dict, env_dict, host_address).
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
message = f"[{stack}] Compose file not found: {compose_path}"
|
||||
raise FileNotFoundError(message)
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return {}, env, config.get_host(stack).address
|
||||
return raw_services, env, config.get_host(stack).address
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import getpass
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
@@ -31,6 +32,9 @@ class Config(BaseModel, extra="forbid"):
|
||||
stacks: dict[str, str | list[str]] # stack_name -> host_name or list of hosts
|
||||
traefik_file: Path | None = None # Auto-regenerate traefik config after up/down
|
||||
traefik_stack: str | None = None # Stack name for Traefik (skip its host in file-provider)
|
||||
glances_stack: str | None = (
|
||||
None # Stack name for Glances (enables host resource stats in web UI)
|
||||
)
|
||||
config_path: Path = Path() # Set by load_config()
|
||||
|
||||
def get_state_path(self) -> Path:
|
||||
@@ -93,9 +97,17 @@ class Config(BaseModel, extra="forbid"):
|
||||
host_names = self.get_hosts(stack)
|
||||
return self.hosts[host_names[0]]
|
||||
|
||||
def get_stack_dir(self, stack: str) -> Path:
|
||||
"""Get stack directory path."""
|
||||
return self.compose_dir / stack
|
||||
|
||||
def get_compose_path(self, stack: str) -> Path:
|
||||
"""Get compose file path for a stack (tries compose.yaml first)."""
|
||||
stack_dir = self.compose_dir / stack
|
||||
"""Get compose file path for a stack (tries compose.yaml first).
|
||||
|
||||
Note: This checks local filesystem. For remote execution, use
|
||||
get_stack_dir() and let docker compose find the file.
|
||||
"""
|
||||
stack_dir = self.get_stack_dir(stack)
|
||||
for filename in COMPOSE_FILENAMES:
|
||||
candidate = stack_dir / filename
|
||||
if candidate.exists():
|
||||
@@ -113,6 +125,31 @@ class Config(BaseModel, extra="forbid"):
|
||||
found.add(subdir.name)
|
||||
return found
|
||||
|
||||
def get_web_stack(self) -> str:
|
||||
"""Get web stack name from CF_WEB_STACK environment variable."""
|
||||
return os.environ.get("CF_WEB_STACK", "")
|
||||
|
||||
def get_local_host_from_web_stack(self) -> str | None:
|
||||
"""Resolve the local host from the web stack configuration (container only).
|
||||
|
||||
When running in the web UI container (CF_WEB_STACK is set), this returns
|
||||
the host that the web stack runs on. This is used for:
|
||||
- Glances connectivity (use container name instead of IP)
|
||||
- Container exec (local docker exec vs SSH)
|
||||
- File read/write (local filesystem vs SSH)
|
||||
|
||||
Returns None if not in container mode or web stack is not configured.
|
||||
"""
|
||||
if os.environ.get("CF_WEB_STACK") is None:
|
||||
return None
|
||||
web_stack = self.get_web_stack()
|
||||
if not web_stack or web_stack not in self.stacks:
|
||||
return None
|
||||
host_names = self.get_hosts(web_stack)
|
||||
if len(host_names) != 1:
|
||||
return None
|
||||
return host_names[0]
|
||||
|
||||
|
||||
def _parse_hosts(raw_hosts: dict[str, Any]) -> dict[str, Host]:
|
||||
"""Parse hosts from config, handling both simple and full forms."""
|
||||
|
||||
@@ -76,7 +76,7 @@ stacks:
|
||||
# traefik_file: (optional) Auto-generate Traefik file-provider config
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, compose-farm automatically regenerates this file after
|
||||
# up/down/restart/update commands. Traefik watches this file for changes.
|
||||
# up/down/update commands. Traefik watches this file for changes.
|
||||
#
|
||||
# traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
@@ -87,3 +87,13 @@ stacks:
|
||||
# skipped (they're handled by Traefik's Docker provider directly).
|
||||
#
|
||||
# traefik_stack: traefik
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# glances_stack: (optional) Stack/container name for Glances
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, enables host resource monitoring via the Glances API. Used by:
|
||||
# - CLI: `cf stats --containers` shows container stats from all hosts
|
||||
# - Web UI: displays host resource graphs and container metrics
|
||||
# This should be the container name that runs Glances on the same Docker network.
|
||||
#
|
||||
# glances_stack: glances
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING, Any
|
||||
@@ -23,24 +24,46 @@ LOCAL_ADDRESSES = frozenset({"local", "localhost", "127.0.0.1", "::1"})
|
||||
_DEFAULT_SSH_PORT = 22
|
||||
|
||||
|
||||
class TTLCache:
|
||||
"""Simple TTL cache for async function results."""
|
||||
|
||||
def __init__(self, ttl_seconds: float = 30.0) -> None:
|
||||
"""Initialize cache with default TTL in seconds."""
|
||||
# Cache stores: key -> (timestamp, value, item_ttl)
|
||||
self._cache: dict[str, tuple[float, Any, float]] = {}
|
||||
self._default_ttl = ttl_seconds
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
"""Get value if exists and not expired."""
|
||||
if key in self._cache:
|
||||
timestamp, value, item_ttl = self._cache[key]
|
||||
if time.monotonic() - timestamp < item_ttl:
|
||||
return value
|
||||
del self._cache[key]
|
||||
return None
|
||||
|
||||
def set(self, key: str, value: Any, ttl_seconds: float | None = None) -> None:
|
||||
"""Set value with current timestamp and optional custom TTL."""
|
||||
ttl = ttl_seconds if ttl_seconds is not None else self._default_ttl
|
||||
self._cache[key] = (time.monotonic(), value, ttl)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached values."""
|
||||
self._cache.clear()
|
||||
|
||||
|
||||
# Cache compose labels per host for 30 seconds
|
||||
_compose_labels_cache = TTLCache(ttl_seconds=30.0)
|
||||
|
||||
|
||||
def _print_compose_command(
|
||||
host_name: str,
|
||||
compose_dir: str,
|
||||
compose_path: str,
|
||||
stack: str,
|
||||
compose_cmd: str,
|
||||
) -> None:
|
||||
"""Print the docker compose command being executed.
|
||||
|
||||
Shows the host and a simplified command with relative path from compose_dir.
|
||||
"""
|
||||
# Show relative path from compose_dir for cleaner output
|
||||
if compose_path.startswith(compose_dir):
|
||||
rel_path = compose_path[len(compose_dir) :].lstrip("/")
|
||||
else:
|
||||
rel_path = compose_path
|
||||
|
||||
"""Print the docker compose command being executed."""
|
||||
console.print(
|
||||
f"[dim][magenta]{host_name}[/magenta]: docker compose -f {rel_path} {compose_cmd}[/dim]"
|
||||
f"[dim][magenta]{host_name}[/magenta]: ({stack}) docker compose {compose_cmd}[/dim]"
|
||||
)
|
||||
|
||||
|
||||
@@ -158,15 +181,20 @@ def ssh_connect_kwargs(host: Host) -> dict[str, Any]:
|
||||
"port": host.port,
|
||||
"username": host.user,
|
||||
"known_hosts": None,
|
||||
"gss_auth": False, # Disable GSSAPI - causes multi-second delays
|
||||
}
|
||||
# Add SSH agent path (auto-detect forwarded agent if needed)
|
||||
agent_path = get_ssh_auth_sock()
|
||||
if agent_path:
|
||||
kwargs["agent_path"] = agent_path
|
||||
# Add key file fallback for when SSH agent is unavailable
|
||||
# Add key file fallback (prioritized over agent if present)
|
||||
key_path = get_key_path()
|
||||
agent_path = get_ssh_auth_sock()
|
||||
|
||||
if key_path:
|
||||
# If dedicated key exists, force use of it and ignore agent
|
||||
# This avoids issues with stale/broken forwarded agents in Docker
|
||||
kwargs["client_keys"] = [str(key_path)]
|
||||
elif agent_path:
|
||||
# Fallback to agent if no dedicated key
|
||||
kwargs["agent_path"] = agent_path
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
@@ -324,11 +352,12 @@ async def run_compose(
|
||||
"""Run a docker compose command for a stack."""
|
||||
host_name = config.get_hosts(stack)[0]
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
_print_compose_command(host_name, stack, compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {compose_cmd}'
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
@@ -347,11 +376,12 @@ async def run_compose_on_host(
|
||||
Used for migration - running 'down' on the old host before 'up' on new host.
|
||||
"""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
_print_compose_command(host_name, stack, compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {compose_cmd}'
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
@@ -403,14 +433,15 @@ async def _run_sequential_stack_commands_multi_host(
|
||||
For multi-host stacks, prefix defaults to stack@host format.
|
||||
"""
|
||||
host_names = config.get_hosts(stack)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
final_results: list[CommandResult] = []
|
||||
|
||||
for cmd in commands:
|
||||
command = f"docker compose -f {compose_path} {cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {cmd}'
|
||||
tasks = []
|
||||
for host_name in host_names:
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), cmd)
|
||||
_print_compose_command(host_name, stack, cmd)
|
||||
host = config.hosts[host_name]
|
||||
# For multi-host stacks, always use stack@host prefix to distinguish output
|
||||
label = f"{stack}@{host_name}" if len(host_names) > 1 else stack
|
||||
@@ -487,10 +518,11 @@ async def check_stack_running(
|
||||
) -> bool:
|
||||
"""Check if a stack has running containers on a specific host."""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
# Use ps --status running to check for running containers
|
||||
command = f"docker compose -f {compose_path} ps --status running -q"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose ps --status running -q'
|
||||
result = await run_command(host, command, stack, stream=False)
|
||||
|
||||
# If command succeeded and has output, containers are running
|
||||
@@ -519,6 +551,50 @@ async def get_running_stacks_on_host(
|
||||
return {line.strip() for line in result.stdout.splitlines() if line.strip()}
|
||||
|
||||
|
||||
async def get_container_compose_labels(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
) -> dict[str, tuple[str, str]]:
|
||||
"""Get compose labels for all containers on a host.
|
||||
|
||||
Returns dict of container_name -> (project, service).
|
||||
Includes all containers (-a flag) since Glances shows stopped containers too.
|
||||
Falls back to empty dict on timeout/error (5s timeout).
|
||||
Results are cached for 30 seconds to reduce SSH overhead.
|
||||
"""
|
||||
# Check cache first
|
||||
cached: dict[str, tuple[str, str]] | None = _compose_labels_cache.get(host_name)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
host = config.hosts[host_name]
|
||||
cmd = (
|
||||
"docker ps -a --format "
|
||||
'\'{{.Names}}\t{{.Label "com.docker.compose.project"}}\t'
|
||||
'{{.Label "com.docker.compose.service"}}\''
|
||||
)
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(5.0):
|
||||
result = await run_command(host, cmd, stack=host_name, stream=False, prefix="")
|
||||
except TimeoutError:
|
||||
return {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
labels: dict[str, tuple[str, str]] = {}
|
||||
if result.success:
|
||||
for line in result.stdout.splitlines():
|
||||
parts = line.strip().split("\t")
|
||||
if len(parts) >= 3: # noqa: PLR2004
|
||||
name, project, service = parts[0], parts[1], parts[2]
|
||||
labels[name] = (project or "", service or "")
|
||||
|
||||
# Cache the result
|
||||
_compose_labels_cache.set(host_name, labels)
|
||||
return labels
|
||||
|
||||
|
||||
async def _batch_check_existence(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
@@ -555,18 +631,28 @@ async def check_paths_exist(
|
||||
host_name: str,
|
||||
paths: list[str],
|
||||
) -> dict[str, bool]:
|
||||
"""Check if multiple paths exist on a specific host.
|
||||
"""Check if multiple paths exist and are accessible on a specific host.
|
||||
|
||||
Returns a dict mapping path -> exists.
|
||||
Handles permission denied as "exists" (path is there, just not accessible).
|
||||
Uses timeout to detect stale NFS mounts that would hang.
|
||||
"""
|
||||
# Only report missing if stat says "No such file", otherwise assume exists
|
||||
# (handles permission denied correctly - path exists, just not accessible)
|
||||
# Use timeout to detect stale NFS mounts (which hang on access)
|
||||
# - First try ls with timeout to check accessibility
|
||||
# - If ls succeeds: path exists and is accessible
|
||||
# - If ls fails/times out: use stat (also with timeout) to distinguish
|
||||
# "no such file" from "permission denied" or stale NFS
|
||||
# - Timeout (exit code 124) is treated as inaccessible (stale NFS mount)
|
||||
return await _batch_check_existence(
|
||||
config,
|
||||
host_name,
|
||||
paths,
|
||||
lambda esc: f"stat '{esc}' 2>&1 | grep -q 'No such file' && echo 'N:{esc}' || echo 'Y:{esc}'",
|
||||
lambda esc: (
|
||||
f"OUT=$(timeout 2 stat '{esc}' 2>&1); RC=$?; "
|
||||
f"if [ $RC -eq 124 ]; then echo 'N:{esc}'; "
|
||||
f"elif echo \"$OUT\" | grep -q 'No such file'; then echo 'N:{esc}'; "
|
||||
f"else echo 'Y:{esc}'; fi"
|
||||
),
|
||||
"mount-check",
|
||||
)
|
||||
|
||||
|
||||
299
src/compose_farm/glances.py
Normal file
299
src/compose_farm/glances.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""Glances API client for host resource monitoring."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .executor import is_local
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config, Host
|
||||
|
||||
# Default Glances REST API port
|
||||
DEFAULT_GLANCES_PORT = 61208
|
||||
|
||||
|
||||
def format_bytes(bytes_val: int) -> str:
|
||||
"""Format bytes to human readable string (e.g., 1.5 GiB)."""
|
||||
import humanize # noqa: PLC0415
|
||||
|
||||
return humanize.naturalsize(bytes_val, binary=True, format="%.1f")
|
||||
|
||||
|
||||
def _get_glances_address(
|
||||
host_name: str,
|
||||
host: Host,
|
||||
glances_container: str | None,
|
||||
local_host: str | None = None,
|
||||
) -> str:
|
||||
"""Get the address to use for Glances API requests.
|
||||
|
||||
When running in a Docker container (CF_WEB_STACK set), the local host's Glances
|
||||
may not be reachable via its LAN IP due to Docker network isolation. In this
|
||||
case, we use the Glances container name for the local host.
|
||||
"""
|
||||
# CF_WEB_STACK indicates we're running in the web UI container.
|
||||
in_container = os.environ.get("CF_WEB_STACK") is not None
|
||||
if not in_container or not glances_container:
|
||||
return host.address
|
||||
|
||||
if local_host and host_name == local_host:
|
||||
return glances_container
|
||||
|
||||
# Fall back to is_local detection (may not work in container)
|
||||
if is_local(host):
|
||||
return glances_container
|
||||
|
||||
return host.address
|
||||
|
||||
|
||||
@dataclass
|
||||
class HostStats:
|
||||
"""Resource statistics for a host."""
|
||||
|
||||
host: str
|
||||
cpu_percent: float
|
||||
mem_percent: float
|
||||
swap_percent: float
|
||||
load: float
|
||||
disk_percent: float
|
||||
net_rx_rate: float = 0.0 # bytes/sec
|
||||
net_tx_rate: float = 0.0 # bytes/sec
|
||||
error: str | None = None
|
||||
|
||||
@classmethod
|
||||
def from_error(cls, host: str, error: str) -> HostStats:
|
||||
"""Create a HostStats with an error."""
|
||||
return cls(
|
||||
host=host,
|
||||
cpu_percent=0,
|
||||
mem_percent=0,
|
||||
swap_percent=0,
|
||||
load=0,
|
||||
disk_percent=0,
|
||||
net_rx_rate=0,
|
||||
net_tx_rate=0,
|
||||
error=error,
|
||||
)
|
||||
|
||||
|
||||
async def fetch_host_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
request_timeout: float = 10.0,
|
||||
) -> HostStats:
|
||||
"""Fetch stats from a single host's Glances API."""
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
base_url = f"http://{host_address}:{port}/api/4"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=request_timeout) as client:
|
||||
# Fetch quicklook stats (CPU, mem, load)
|
||||
response = await client.get(f"{base_url}/quicklook")
|
||||
if not response.is_success:
|
||||
return HostStats.from_error(host_name, f"HTTP {response.status_code}")
|
||||
data = response.json()
|
||||
|
||||
# Fetch filesystem stats for disk usage (root fs or max across all)
|
||||
disk_percent = 0.0
|
||||
try:
|
||||
fs_response = await client.get(f"{base_url}/fs")
|
||||
if fs_response.is_success:
|
||||
fs_data = fs_response.json()
|
||||
root = next((fs for fs in fs_data if fs.get("mnt_point") == "/"), None)
|
||||
disk_percent = (
|
||||
root.get("percent", 0)
|
||||
if root
|
||||
else max((fs.get("percent", 0) for fs in fs_data), default=0)
|
||||
)
|
||||
except httpx.HTTPError:
|
||||
pass # Disk stats are optional
|
||||
|
||||
# Fetch network stats for rate (sum across non-loopback interfaces)
|
||||
net_rx_rate, net_tx_rate = 0.0, 0.0
|
||||
try:
|
||||
net_response = await client.get(f"{base_url}/network")
|
||||
if net_response.is_success:
|
||||
for iface in net_response.json():
|
||||
if not iface.get("interface_name", "").startswith("lo"):
|
||||
net_rx_rate += iface.get("bytes_recv_rate_per_sec") or 0
|
||||
net_tx_rate += iface.get("bytes_sent_rate_per_sec") or 0
|
||||
except httpx.HTTPError:
|
||||
pass # Network stats are optional
|
||||
|
||||
return HostStats(
|
||||
host=host_name,
|
||||
cpu_percent=data.get("cpu", 0),
|
||||
mem_percent=data.get("mem", 0),
|
||||
swap_percent=data.get("swap", 0),
|
||||
load=data.get("load", 0),
|
||||
disk_percent=disk_percent,
|
||||
net_rx_rate=net_rx_rate,
|
||||
net_tx_rate=net_tx_rate,
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HostStats.from_error(host_name, "timeout")
|
||||
except httpx.HTTPError as e:
|
||||
return HostStats.from_error(host_name, str(e))
|
||||
except Exception as e:
|
||||
return HostStats.from_error(host_name, str(e))
|
||||
|
||||
|
||||
async def fetch_all_host_stats(
|
||||
config: Config,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
) -> dict[str, HostStats]:
|
||||
"""Fetch stats from all hosts in parallel."""
|
||||
glances_container = config.glances_stack
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
tasks = [
|
||||
fetch_host_stats(
|
||||
name,
|
||||
_get_glances_address(name, host, glances_container, local_host),
|
||||
port,
|
||||
)
|
||||
for name, host in config.hosts.items()
|
||||
]
|
||||
results = await asyncio.gather(*tasks)
|
||||
return {stats.host: stats for stats in results}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContainerStats:
|
||||
"""Container statistics from Glances."""
|
||||
|
||||
name: str
|
||||
host: str
|
||||
status: str
|
||||
image: str
|
||||
cpu_percent: float
|
||||
memory_usage: int # bytes
|
||||
memory_limit: int # bytes
|
||||
memory_percent: float
|
||||
network_rx: int # cumulative bytes received
|
||||
network_tx: int # cumulative bytes sent
|
||||
uptime: str
|
||||
ports: str
|
||||
engine: str # docker, podman, etc.
|
||||
stack: str = "" # compose project name (from docker labels)
|
||||
service: str = "" # compose service name (from docker labels)
|
||||
|
||||
|
||||
def _parse_container(data: dict[str, Any], host_name: str) -> ContainerStats:
|
||||
"""Parse container data from Glances API response."""
|
||||
# Image can be a list or string
|
||||
image = data.get("image", ["unknown"])
|
||||
if isinstance(image, list):
|
||||
image = image[0] if image else "unknown"
|
||||
|
||||
# Calculate memory percent
|
||||
mem_usage = data.get("memory_usage", 0) or 0
|
||||
mem_limit = data.get("memory_limit", 1) or 1 # Avoid division by zero
|
||||
mem_percent = (mem_usage / mem_limit) * 100 if mem_limit > 0 else 0
|
||||
|
||||
# Network stats
|
||||
network = data.get("network", {}) or {}
|
||||
network_rx = network.get("cumulative_rx", 0) or 0
|
||||
network_tx = network.get("cumulative_tx", 0) or 0
|
||||
|
||||
return ContainerStats(
|
||||
name=data.get("name", "unknown"),
|
||||
host=host_name,
|
||||
status=data.get("status", "unknown"),
|
||||
image=image,
|
||||
cpu_percent=data.get("cpu_percent", 0) or 0,
|
||||
memory_usage=mem_usage,
|
||||
memory_limit=mem_limit,
|
||||
memory_percent=mem_percent,
|
||||
network_rx=network_rx,
|
||||
network_tx=network_tx,
|
||||
uptime=data.get("uptime", ""),
|
||||
ports=data.get("ports", "") or "",
|
||||
engine=data.get("engine", "docker"),
|
||||
)
|
||||
|
||||
|
||||
async def fetch_container_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
request_timeout: float = 10.0,
|
||||
) -> tuple[list[ContainerStats] | None, str | None]:
|
||||
"""Fetch container stats from a single host's Glances API.
|
||||
|
||||
Returns:
|
||||
(containers, error_message)
|
||||
- Success: ([...], None)
|
||||
- Failure: (None, "error message")
|
||||
|
||||
"""
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
url = f"http://{host_address}:{port}/api/4/containers"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=request_timeout) as client:
|
||||
response = await client.get(url)
|
||||
if not response.is_success:
|
||||
return None, f"HTTP {response.status_code}: {response.reason_phrase}"
|
||||
data = response.json()
|
||||
return [_parse_container(c, host_name) for c in data], None
|
||||
except httpx.ConnectError:
|
||||
return None, "Connection refused (Glances offline?)"
|
||||
except httpx.TimeoutException:
|
||||
return None, "Connection timed out"
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
|
||||
|
||||
async def fetch_all_container_stats(
|
||||
config: Config,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
hosts: list[str] | None = None,
|
||||
) -> list[ContainerStats]:
|
||||
"""Fetch container stats from all hosts in parallel, enriched with compose labels."""
|
||||
from .executor import get_container_compose_labels # noqa: PLC0415
|
||||
|
||||
glances_container = config.glances_stack
|
||||
host_names = hosts if hosts is not None else list(config.hosts.keys())
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
|
||||
async def fetch_host_data(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
) -> list[ContainerStats]:
|
||||
# Fetch Glances stats and compose labels in parallel
|
||||
stats_task = fetch_container_stats(host_name, host_address, port)
|
||||
labels_task = get_container_compose_labels(config, host_name)
|
||||
(containers, _), labels = await asyncio.gather(stats_task, labels_task)
|
||||
|
||||
if containers is None:
|
||||
# Skip failed hosts in aggregate view
|
||||
return []
|
||||
|
||||
# Enrich containers with compose labels (mutate in place)
|
||||
for c in containers:
|
||||
c.stack, c.service = labels.get(c.name, ("", ""))
|
||||
return containers
|
||||
|
||||
tasks = [
|
||||
fetch_host_data(
|
||||
name,
|
||||
_get_glances_address(
|
||||
name,
|
||||
config.hosts[name],
|
||||
glances_container,
|
||||
local_host,
|
||||
),
|
||||
)
|
||||
for name in host_names
|
||||
if name in config.hosts
|
||||
]
|
||||
results = await asyncio.gather(*tasks)
|
||||
# Flatten list of lists
|
||||
return [container for host_containers in results for container in host_containers]
|
||||
@@ -185,18 +185,38 @@ def _report_preflight_failures(
|
||||
print_error(f" missing device: {dev}")
|
||||
|
||||
|
||||
def build_up_cmd(
|
||||
*,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
service: str | None = None,
|
||||
) -> str:
|
||||
"""Build compose 'up' subcommand with optional flags."""
|
||||
parts = ["up", "-d"]
|
||||
if pull:
|
||||
parts.append("--pull always")
|
||||
if build:
|
||||
parts.append("--build")
|
||||
if service:
|
||||
parts.append(service)
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
async def _up_multi_host_stack(
|
||||
cfg: Config,
|
||||
stack: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start a multi-host stack on all configured hosts."""
|
||||
host_names = cfg.get_hosts(stack)
|
||||
results: list[CommandResult] = []
|
||||
compose_path = cfg.get_compose_path(stack)
|
||||
command = f"docker compose -f {compose_path} up -d"
|
||||
stack_dir = cfg.get_stack_dir(stack)
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {build_up_cmd(pull=pull, build=build)}'
|
||||
|
||||
# Pre-flight checks on all hosts
|
||||
for host_name in host_names:
|
||||
@@ -269,6 +289,8 @@ async def _up_single_stack(
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host stack with migration support."""
|
||||
target_host = cfg.get_hosts(stack)[0]
|
||||
@@ -297,7 +319,7 @@ async def _up_single_stack(
|
||||
|
||||
# Start on target host
|
||||
console.print(f"{prefix} Starting on [magenta]{target_host}[/]...")
|
||||
up_result = await _run_compose_step(cfg, stack, "up -d", raw=raw)
|
||||
up_result = await _run_compose_step(cfg, stack, build_up_cmd(pull=pull, build=build), raw=raw)
|
||||
|
||||
# Update state on success, or rollback on failure
|
||||
if up_result.success:
|
||||
@@ -316,24 +338,101 @@ async def _up_single_stack(
|
||||
return up_result
|
||||
|
||||
|
||||
async def _up_stack_simple(
|
||||
cfg: Config,
|
||||
stack: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host stack without migration (parallel-safe)."""
|
||||
target_host = cfg.get_hosts(stack)[0]
|
||||
|
||||
# Pre-flight check
|
||||
preflight = await check_stack_requirements(cfg, stack, target_host)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(stack, target_host, preflight)
|
||||
return CommandResult(stack=stack, exit_code=1, success=False)
|
||||
|
||||
# Run with streaming for parallel output
|
||||
result = await run_compose(cfg, stack, build_up_cmd(pull=pull, build=build), raw=raw)
|
||||
if raw:
|
||||
print()
|
||||
if result.interrupted:
|
||||
raise OperationInterruptedError
|
||||
|
||||
# Update state on success
|
||||
if result.success:
|
||||
set_stack_host(cfg, stack, target_host)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def up_stacks(
|
||||
cfg: Config,
|
||||
stacks: list[str],
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start stacks with automatic migration if host changed."""
|
||||
"""Start stacks with automatic migration if host changed.
|
||||
|
||||
Stacks without migration run in parallel. Migration stacks run sequentially.
|
||||
"""
|
||||
# Categorize stacks
|
||||
multi_host: list[str] = []
|
||||
needs_migration: list[str] = []
|
||||
simple: list[str] = []
|
||||
|
||||
for stack in stacks:
|
||||
if cfg.is_multi_host(stack):
|
||||
multi_host.append(stack)
|
||||
else:
|
||||
target = cfg.get_hosts(stack)[0]
|
||||
current = get_stack_host(cfg, stack)
|
||||
if current and current != target:
|
||||
needs_migration.append(stack)
|
||||
else:
|
||||
simple.append(stack)
|
||||
|
||||
results: list[CommandResult] = []
|
||||
total = len(stacks)
|
||||
|
||||
try:
|
||||
for idx, stack in enumerate(stacks, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{stack}][/]"
|
||||
# Simple stacks: run in parallel (no migration needed)
|
||||
if simple:
|
||||
use_raw = raw and len(simple) == 1
|
||||
simple_results = await asyncio.gather(
|
||||
*[
|
||||
_up_stack_simple(cfg, stack, raw=use_raw, pull=pull, build=build)
|
||||
for stack in simple
|
||||
]
|
||||
)
|
||||
results.extend(simple_results)
|
||||
|
||||
# Multi-host stacks: run in parallel
|
||||
if multi_host:
|
||||
multi_results = await asyncio.gather(
|
||||
*[
|
||||
_up_multi_host_stack(
|
||||
cfg, stack, f"[cyan]\\[{stack}][/]", raw=raw, pull=pull, build=build
|
||||
)
|
||||
for stack in multi_host
|
||||
]
|
||||
)
|
||||
for result_list in multi_results:
|
||||
results.extend(result_list)
|
||||
|
||||
# Migration stacks: run sequentially for clear output and rollback
|
||||
if needs_migration:
|
||||
total = len(needs_migration)
|
||||
for idx, stack in enumerate(needs_migration, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{stack}][/]"
|
||||
results.append(
|
||||
await _up_single_stack(cfg, stack, prefix, raw=raw, pull=pull, build=build)
|
||||
)
|
||||
|
||||
if cfg.is_multi_host(stack):
|
||||
results.extend(await _up_multi_host_stack(cfg, stack, prefix, raw=raw))
|
||||
else:
|
||||
results.append(await _up_single_stack(cfg, stack, prefix, raw=raw))
|
||||
except OperationInterruptedError:
|
||||
raise KeyboardInterrupt from None
|
||||
|
||||
|
||||
220
src/compose_farm/registry.py
Normal file
220
src/compose_farm/registry.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""Container registry API client for tag discovery."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
|
||||
# Image reference pattern: [registry/][namespace/]name[:tag][@digest]
|
||||
IMAGE_PATTERN = re.compile(
|
||||
r"^(?:(?P<registry>[^/]+\.[^/]+)/)?(?:(?P<namespace>[^/:@]+)/)?(?P<name>[^/:@]+)(?::(?P<tag>[^@]+))?(?:@(?P<digest>.+))?$"
|
||||
)
|
||||
|
||||
# Docker Hub aliases
|
||||
DOCKER_HUB_ALIASES = frozenset(
|
||||
{"docker.io", "index.docker.io", "registry.hub.docker.com", "registry-1.docker.io"}
|
||||
)
|
||||
|
||||
# Token endpoints per registry: (url, extra_params)
|
||||
TOKEN_ENDPOINTS: dict[str, tuple[str, dict[str, str]]] = {
|
||||
"docker.io": ("https://auth.docker.io/token", {"service": "registry.docker.io"}),
|
||||
"ghcr.io": ("https://ghcr.io/token", {}),
|
||||
}
|
||||
|
||||
# Registry URL overrides (Docker Hub uses a different host for API)
|
||||
REGISTRY_URLS: dict[str, str] = {
|
||||
"docker.io": "https://registry-1.docker.io",
|
||||
}
|
||||
|
||||
HTTP_OK = 200
|
||||
|
||||
MANIFEST_ACCEPT = (
|
||||
"application/vnd.docker.distribution.manifest.v2+json, "
|
||||
"application/vnd.oci.image.manifest.v1+json, "
|
||||
"application/vnd.oci.image.index.v1+json"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ImageRef:
|
||||
"""Parsed container image reference."""
|
||||
|
||||
registry: str
|
||||
namespace: str
|
||||
name: str
|
||||
tag: str
|
||||
digest: str | None = None
|
||||
|
||||
@property
|
||||
def full_name(self) -> str:
|
||||
"""Full image name with namespace."""
|
||||
return f"{self.namespace}/{self.name}" if self.namespace else self.name
|
||||
|
||||
@property
|
||||
def display_name(self) -> str:
|
||||
"""Display name (omits docker.io/library for official images)."""
|
||||
if self.registry in DOCKER_HUB_ALIASES:
|
||||
if self.namespace == "library":
|
||||
return self.name
|
||||
return self.full_name
|
||||
return f"{self.registry}/{self.full_name}"
|
||||
|
||||
@classmethod
|
||||
def parse(cls, image: str) -> ImageRef:
|
||||
"""Parse image string into components."""
|
||||
match = IMAGE_PATTERN.match(image)
|
||||
if not match:
|
||||
return cls("docker.io", "library", image.split(":")[0].split("@")[0], "latest")
|
||||
|
||||
groups = match.groupdict()
|
||||
registry = groups.get("registry") or "docker.io"
|
||||
namespace = groups.get("namespace") or ""
|
||||
name = groups.get("name") or image
|
||||
tag = groups.get("tag") or "latest"
|
||||
digest = groups.get("digest")
|
||||
|
||||
# Docker Hub official images have implicit "library" namespace
|
||||
if registry in DOCKER_HUB_ALIASES and not namespace:
|
||||
namespace = "library"
|
||||
|
||||
return cls(registry, namespace, name, tag, digest)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TagCheckResult:
|
||||
"""Result of checking tags for an image."""
|
||||
|
||||
image: ImageRef
|
||||
current_digest: str
|
||||
available_updates: list[str] = field(default_factory=list)
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class RegistryClient:
|
||||
"""Unified OCI Distribution API client."""
|
||||
|
||||
def __init__(self, registry: str) -> None:
|
||||
"""Initialize for a specific registry."""
|
||||
self.registry = registry.lower()
|
||||
# Normalize Docker Hub aliases
|
||||
if self.registry in DOCKER_HUB_ALIASES:
|
||||
self.registry = "docker.io"
|
||||
|
||||
self.registry_url = REGISTRY_URLS.get(self.registry, f"https://{self.registry}")
|
||||
self._token_cache: dict[str, str] = {}
|
||||
|
||||
async def _get_token(self, image: ImageRef, client: httpx.AsyncClient) -> str | None:
|
||||
"""Get auth token for the registry (cached per image)."""
|
||||
cache_key = image.full_name
|
||||
if cache_key in self._token_cache:
|
||||
return self._token_cache[cache_key]
|
||||
|
||||
endpoint = TOKEN_ENDPOINTS.get(self.registry)
|
||||
if not endpoint:
|
||||
return None # No auth needed or unknown registry
|
||||
|
||||
url, extra_params = endpoint
|
||||
params = {"scope": f"repository:{image.full_name}:pull", **extra_params}
|
||||
resp = await client.get(url, params=params)
|
||||
|
||||
if resp.status_code == HTTP_OK:
|
||||
token: str | None = resp.json().get("token")
|
||||
if token:
|
||||
self._token_cache[cache_key] = token
|
||||
return token
|
||||
return None
|
||||
|
||||
async def get_tags(self, image: ImageRef, client: httpx.AsyncClient) -> list[str]:
|
||||
"""Fetch available tags for an image."""
|
||||
headers = {}
|
||||
token = await self._get_token(image, client)
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
url = f"{self.registry_url}/v2/{image.full_name}/tags/list"
|
||||
resp = await client.get(url, headers=headers)
|
||||
|
||||
if resp.status_code != HTTP_OK:
|
||||
return []
|
||||
tags: list[str] = resp.json().get("tags", [])
|
||||
return tags
|
||||
|
||||
async def get_digest(self, image: ImageRef, tag: str, client: httpx.AsyncClient) -> str | None:
|
||||
"""Get digest for a specific tag."""
|
||||
headers = {"Accept": MANIFEST_ACCEPT}
|
||||
token = await self._get_token(image, client)
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
url = f"{self.registry_url}/v2/{image.full_name}/manifests/{tag}"
|
||||
resp = await client.head(url, headers=headers)
|
||||
|
||||
if resp.status_code == HTTP_OK:
|
||||
digest: str | None = resp.headers.get("docker-content-digest")
|
||||
return digest
|
||||
return None
|
||||
|
||||
|
||||
def _parse_version(tag: str) -> tuple[int, ...] | None:
|
||||
"""Parse version string into comparable tuple."""
|
||||
tag = tag.lstrip("vV")
|
||||
parts = tag.split(".")
|
||||
try:
|
||||
return tuple(int(p) for p in parts)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _find_updates(current_tag: str, tags: list[str]) -> list[str]:
|
||||
"""Find tags newer than current based on version comparison."""
|
||||
current_version = _parse_version(current_tag)
|
||||
if current_version is None:
|
||||
return []
|
||||
|
||||
updates = []
|
||||
for tag in tags:
|
||||
tag_version = _parse_version(tag)
|
||||
if tag_version and tag_version > current_version:
|
||||
updates.append(tag)
|
||||
|
||||
updates.sort(key=lambda t: _parse_version(t) or (), reverse=True)
|
||||
return updates
|
||||
|
||||
|
||||
async def check_image_updates(
|
||||
image_str: str,
|
||||
client: httpx.AsyncClient,
|
||||
) -> TagCheckResult:
|
||||
"""Check if newer versions are available for an image.
|
||||
|
||||
Args:
|
||||
image_str: Image string like "nginx:1.25" or "ghcr.io/user/repo:tag"
|
||||
client: httpx async client
|
||||
|
||||
Returns:
|
||||
TagCheckResult with available updates
|
||||
|
||||
"""
|
||||
image = ImageRef.parse(image_str)
|
||||
registry_client = RegistryClient(image.registry)
|
||||
|
||||
try:
|
||||
tags = await registry_client.get_tags(image, client)
|
||||
updates = _find_updates(image.tag, tags)
|
||||
current_digest = await registry_client.get_digest(image, image.tag, client) or ""
|
||||
|
||||
return TagCheckResult(
|
||||
image=image,
|
||||
current_digest=current_digest,
|
||||
available_updates=updates,
|
||||
)
|
||||
except Exception as e:
|
||||
return TagCheckResult(
|
||||
image=image,
|
||||
current_digest="",
|
||||
error=str(e),
|
||||
)
|
||||
@@ -64,8 +64,11 @@ def load_state(config: Config) -> dict[str, str | list[str]]:
|
||||
|
||||
|
||||
def _sorted_dict(d: dict[str, str | list[str]]) -> dict[str, str | list[str]]:
|
||||
"""Return a dictionary sorted by keys."""
|
||||
return dict(sorted(d.items(), key=lambda item: item[0]))
|
||||
"""Return a dictionary sorted by keys, with list values also sorted."""
|
||||
return {
|
||||
k: sorted(v) if isinstance(v, list) else v
|
||||
for k, v in sorted(d.items(), key=lambda item: item[0])
|
||||
}
|
||||
|
||||
|
||||
def save_state(config: Config, deployed: dict[str, str | list[str]]) -> None:
|
||||
|
||||
@@ -4,18 +4,19 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import ValidationError
|
||||
from rich.logging import RichHandler
|
||||
|
||||
from compose_farm.web.deps import STATIC_DIR, get_config
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
from compose_farm.web.routes import actions, api, containers, pages
|
||||
from compose_farm.web.streaming import TASK_TTL_SECONDS, cleanup_stale_tasks
|
||||
from compose_farm.web.ws import router as ws_router
|
||||
|
||||
# Configure logging with Rich handler for compose_farm.web modules
|
||||
logging.basicConfig(
|
||||
@@ -64,17 +65,17 @@ def create_app() -> FastAPI:
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# Enable Gzip compression for faster transfers over slow networks
|
||||
app.add_middleware(cast("Any", GZipMiddleware), minimum_size=1000)
|
||||
|
||||
# Mount static files
|
||||
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
|
||||
|
||||
app.include_router(pages.router)
|
||||
app.include_router(containers.router)
|
||||
app.include_router(api.router, prefix="/api")
|
||||
app.include_router(actions.router, prefix="/api")
|
||||
|
||||
# WebSocket routes use Unix-only modules (fcntl, pty)
|
||||
if sys.platform != "win32":
|
||||
from compose_farm.web.ws import router as ws_router # noqa: PLC0415
|
||||
|
||||
app.include_router(ws_router)
|
||||
app.include_router(ws_router)
|
||||
|
||||
return app
|
||||
|
||||
@@ -1,70 +1,39 @@
|
||||
"""CDN asset definitions and caching for tests and demo recordings.
|
||||
|
||||
This module provides a single source of truth for CDN asset URLs used in
|
||||
browser tests and demo recordings. Assets are intercepted and served from
|
||||
a local cache to eliminate network variability.
|
||||
This module provides CDN asset URLs used in browser tests and demo recordings.
|
||||
Assets are intercepted and served from a local cache to eliminate network
|
||||
variability.
|
||||
|
||||
Note: The canonical list of CDN assets for production is in base.html
|
||||
(with data-vendor attributes). This module includes those plus dynamically
|
||||
loaded assets (like Monaco editor modules loaded by app.js).
|
||||
The canonical list of CDN assets is in vendor-assets.json. This module loads
|
||||
that file and provides the CDN_ASSETS dict for test caching.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _load_cdn_assets() -> dict[str, tuple[str, str]]:
|
||||
"""Load CDN assets from vendor-assets.json.
|
||||
|
||||
Returns:
|
||||
Dict mapping URL to (filename, content_type) tuple.
|
||||
|
||||
"""
|
||||
json_path = Path(__file__).parent / "vendor-assets.json"
|
||||
with json_path.open() as f:
|
||||
config = json.load(f)
|
||||
|
||||
return {asset["url"]: (asset["filename"], asset["content_type"]) for asset in config["assets"]}
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
# CDN assets to cache locally for tests/demos
|
||||
# Format: URL -> (local_filename, content_type)
|
||||
#
|
||||
# If tests fail with "Uncached CDN request", add the URL here.
|
||||
CDN_ASSETS: dict[str, tuple[str, str]] = {
|
||||
# From base.html (data-vendor attributes)
|
||||
"https://cdn.jsdelivr.net/npm/daisyui@5/themes.css": ("daisyui-themes.css", "text/css"),
|
||||
"https://cdn.jsdelivr.net/npm/daisyui@5": ("daisyui.css", "text/css"),
|
||||
"https://cdn.jsdelivr.net/npm/@tailwindcss/browser@4": (
|
||||
"tailwind.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/css/xterm.css": ("xterm.css", "text/css"),
|
||||
"https://unpkg.com/htmx.org@2.0.4": ("htmx.js", "application/javascript"),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.js": (
|
||||
"xterm.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.js": (
|
||||
"xterm-fit.js",
|
||||
"application/javascript",
|
||||
),
|
||||
# Monaco editor - dynamically loaded by app.js
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/loader.js": (
|
||||
"monaco-loader.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.js": (
|
||||
"monaco-editor-main.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.css": (
|
||||
"monaco-editor-main.css",
|
||||
"text/css",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/worker/workerMain.js": (
|
||||
"monaco-workerMain.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/basic-languages/yaml/yaml.js": (
|
||||
"monaco-yaml.js",
|
||||
"application/javascript",
|
||||
),
|
||||
"https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/browser/ui/codicons/codicon/codicon.ttf": (
|
||||
"monaco-codicon.ttf",
|
||||
"font/ttf",
|
||||
),
|
||||
}
|
||||
# If tests fail with "Uncached CDN request", add the URL to vendor-assets.json.
|
||||
CDN_ASSETS: dict[str, tuple[str, str]] = _load_cdn_assets()
|
||||
|
||||
|
||||
def download_url(url: str) -> bytes | None:
|
||||
@@ -99,6 +68,7 @@ def ensure_vendor_cache(cache_dir: Path) -> Path:
|
||||
filepath = cache_dir / filename
|
||||
if filepath.exists():
|
||||
continue
|
||||
filepath.parent.mkdir(parents=True, exist_ok=True)
|
||||
content = download_url(url)
|
||||
if not content:
|
||||
msg = f"Failed to download {url} - check network/curl"
|
||||
|
||||
@@ -15,7 +15,7 @@ from pydantic import ValidationError
|
||||
from compose_farm.executor import is_local
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
from compose_farm.config import Config, Host
|
||||
|
||||
# Paths
|
||||
WEB_DIR = Path(__file__).parent
|
||||
@@ -52,8 +52,35 @@ def extract_config_error(exc: Exception) -> str:
|
||||
return str(exc)
|
||||
|
||||
|
||||
def is_local_host(host_name: str, host: Host, config: Config) -> bool:
|
||||
"""Check if a host should be treated as local.
|
||||
|
||||
When running in a Docker container, is_local() may not work correctly because
|
||||
the container has different network IPs. This function first checks if the
|
||||
host matches the web stack host (container only), then falls back to is_local().
|
||||
|
||||
This affects:
|
||||
- Container exec (local docker exec vs SSH)
|
||||
- File read/write (local filesystem vs SSH)
|
||||
- Shell sessions (local shell vs SSH)
|
||||
"""
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
if local_host and host_name == local_host:
|
||||
return True
|
||||
return is_local(host)
|
||||
|
||||
|
||||
def get_local_host(config: Config) -> str | None:
|
||||
"""Find the local host name from config, if any."""
|
||||
"""Find the local host name from config, if any.
|
||||
|
||||
First checks the web stack host (container only), then falls back to is_local()
|
||||
detection.
|
||||
"""
|
||||
# Web stack host takes precedence in container mode
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
if local_host and local_host in config.hosts:
|
||||
return local_host
|
||||
# Fall back to auto-detection
|
||||
for name, host in config.hosts.items():
|
||||
if is_local(host):
|
||||
return name
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Web routes."""
|
||||
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
from compose_farm.web.routes import actions, api, containers, pages
|
||||
|
||||
__all__ = ["actions", "api", "pages"]
|
||||
__all__ = ["actions", "api", "containers", "pages"]
|
||||
|
||||
@@ -96,7 +96,16 @@ async def pull_all() -> dict[str, Any]:
|
||||
|
||||
@router.post("/update-all")
|
||||
async def update_all() -> dict[str, Any]:
|
||||
"""Update all stacks (pull + build + down + up)."""
|
||||
"""Update all stacks, excluding the web stack. Only recreates if images changed.
|
||||
|
||||
The web stack is excluded to prevent the UI from shutting down mid-operation.
|
||||
Use 'cf update <web-stack>' manually to update the web UI.
|
||||
"""
|
||||
config = get_config()
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["update", "--all"], tid))
|
||||
return {"task_id": task_id, "command": "update --all"}
|
||||
# Get all stacks except the web stack to avoid self-shutdown
|
||||
web_stack = config.get_web_stack()
|
||||
stacks = [s for s in config.stacks if s != web_stack]
|
||||
if not stacks:
|
||||
return {"task_id": "", "command": "update (no stacks)", "skipped": True}
|
||||
task_id = _start_task(lambda tid: run_cli_streaming(config, ["update", *stacks], tid))
|
||||
return {"task_id": task_id, "command": f"update {' '.join(stacks)}"}
|
||||
|
||||
@@ -19,11 +19,12 @@ import yaml
|
||||
from fastapi import APIRouter, Body, HTTPException, Query
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from compose_farm.compose import get_container_name
|
||||
from compose_farm.executor import is_local, run_compose_on_host, ssh_connect_kwargs
|
||||
from compose_farm.compose import extract_services, get_container_name, load_compose_data_for_stack
|
||||
from compose_farm.executor import run_compose_on_host, ssh_connect_kwargs
|
||||
from compose_farm.glances import fetch_all_host_stats
|
||||
from compose_farm.paths import backup_dir, find_config_path
|
||||
from compose_farm.state import load_state
|
||||
from compose_farm.web.deps import get_config, get_templates
|
||||
from compose_farm.web.deps import get_config, get_templates, is_local_host
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -50,7 +51,6 @@ def _backup_file(file_path: Path) -> Path | None:
|
||||
|
||||
# Create backup directory mirroring original path structure
|
||||
# e.g., /opt/stacks/plex/compose.yaml -> ~/.config/compose-farm/backups/opt/stacks/plex/
|
||||
# On Windows: C:\Users\foo\stacks -> backups/Users/foo/stacks
|
||||
resolved = file_path.resolve()
|
||||
file_backup_dir = backup_dir() / resolved.parent.relative_to(resolved.anchor)
|
||||
file_backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -106,13 +106,11 @@ def _get_compose_services(config: Any, stack: str, hosts: list[str]) -> list[dic
|
||||
|
||||
Returns one entry per container per host for multi-host stacks.
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path or not compose_path.exists():
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return []
|
||||
|
||||
# Project name is the directory name (docker compose default)
|
||||
@@ -346,10 +344,11 @@ async def read_console_file(
|
||||
path: Annotated[str, Query(description="File path")],
|
||||
) -> dict[str, Any]:
|
||||
"""Read a file from a host for the console editor."""
|
||||
config = get_config()
|
||||
host_config = _get_console_host(host, path)
|
||||
|
||||
try:
|
||||
if is_local(host_config):
|
||||
if is_local_host(host, host_config, config):
|
||||
content = await _read_file_local(path)
|
||||
else:
|
||||
content = await _read_file_remote(host_config, path)
|
||||
@@ -370,10 +369,11 @@ async def write_console_file(
|
||||
content: Annotated[str, Body(media_type="text/plain")],
|
||||
) -> dict[str, Any]:
|
||||
"""Write a file to a host from the console editor."""
|
||||
config = get_config()
|
||||
host_config = _get_console_host(host, path)
|
||||
|
||||
try:
|
||||
if is_local(host_config):
|
||||
if is_local_host(host, host_config, config):
|
||||
saved = await _write_file_local(path, content)
|
||||
msg = f"Saved: {path}" if saved else "No changes to save"
|
||||
else:
|
||||
@@ -385,3 +385,19 @@ async def write_console_file(
|
||||
except Exception as e:
|
||||
logger.exception("Failed to write file %s to host %s", path, host)
|
||||
raise HTTPException(status_code=500, detail=str(e)) from e
|
||||
|
||||
|
||||
@router.get("/glances", response_class=HTMLResponse)
|
||||
async def get_glances_stats() -> HTMLResponse:
|
||||
"""Get resource stats from Glances for all hosts."""
|
||||
config = get_config()
|
||||
|
||||
if not config.glances_stack:
|
||||
return HTMLResponse("") # Glances not configured
|
||||
|
||||
stats = await fetch_all_host_stats(config)
|
||||
|
||||
templates = get_templates()
|
||||
template = templates.env.get_template("partials/glances.html")
|
||||
html = template.render(stats=stats)
|
||||
return HTMLResponse(html)
|
||||
|
||||
367
src/compose_farm/web/routes/containers.py
Normal file
367
src/compose_farm/web/routes/containers.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""Container dashboard routes using Glances API."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import html
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
from urllib.parse import quote
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
|
||||
from compose_farm.executor import TTLCache
|
||||
from compose_farm.glances import ContainerStats, fetch_all_container_stats, format_bytes
|
||||
from compose_farm.registry import DOCKER_HUB_ALIASES, ImageRef
|
||||
from compose_farm.web.deps import get_config, get_templates
|
||||
|
||||
router = APIRouter(tags=["containers"])
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.registry import TagCheckResult
|
||||
|
||||
# Cache registry update checks for 5 minutes (300 seconds)
|
||||
# Registry calls are slow and often rate-limited
|
||||
_update_check_cache = TTLCache(ttl_seconds=300.0)
|
||||
|
||||
# Minimum parts needed to infer stack/service from container name
|
||||
MIN_NAME_PARTS = 2
|
||||
|
||||
# HTML for "no update info" dash
|
||||
_DASH_HTML = '<span class="text-xs opacity-50">-</span>'
|
||||
|
||||
|
||||
def _parse_image(image: str) -> tuple[str, str]:
|
||||
"""Parse image string into (name, tag)."""
|
||||
# Handle registry prefix (e.g., ghcr.io/user/repo:tag)
|
||||
if ":" in image:
|
||||
# Find last colon that's not part of port
|
||||
parts = image.rsplit(":", 1)
|
||||
if "/" in parts[-1]:
|
||||
# The "tag" contains a slash, so it's probably a port
|
||||
return image, "latest"
|
||||
return parts[0], parts[1]
|
||||
return image, "latest"
|
||||
|
||||
|
||||
def _infer_stack_service(name: str) -> tuple[str, str]:
|
||||
"""Fallback: infer stack and service from container name.
|
||||
|
||||
Used when compose labels are not available.
|
||||
Docker Compose naming conventions:
|
||||
- Default: {project}_{service}_{instance} or {project}-{service}-{instance}
|
||||
- Custom: {container_name} from compose file
|
||||
"""
|
||||
# Try underscore separator first (older compose)
|
||||
if "_" in name:
|
||||
parts = name.split("_")
|
||||
if len(parts) >= MIN_NAME_PARTS:
|
||||
return parts[0], parts[1]
|
||||
# Try hyphen separator (newer compose)
|
||||
if "-" in name:
|
||||
parts = name.split("-")
|
||||
if len(parts) >= MIN_NAME_PARTS:
|
||||
return parts[0], "-".join(parts[1:-1]) if len(parts) > MIN_NAME_PARTS else parts[1]
|
||||
# Fallback: use name as both stack and service
|
||||
return name, name
|
||||
|
||||
|
||||
@router.get("/live-stats", response_class=HTMLResponse)
|
||||
async def containers_page(request: Request) -> HTMLResponse:
|
||||
"""Container dashboard page."""
|
||||
config = get_config()
|
||||
templates = get_templates()
|
||||
|
||||
# Check if Glances is configured
|
||||
glances_enabled = config.glances_stack is not None
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"containers.html",
|
||||
{
|
||||
"request": request,
|
||||
"glances_enabled": glances_enabled,
|
||||
"hosts": sorted(config.hosts.keys()) if glances_enabled else [],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
_STATUS_CLASSES = {
|
||||
"running": "badge badge-success badge-sm",
|
||||
"exited": "badge badge-error badge-sm",
|
||||
"paused": "badge badge-warning badge-sm",
|
||||
}
|
||||
|
||||
|
||||
def _status_class(status: str) -> str:
|
||||
"""Get CSS class for status badge."""
|
||||
return _STATUS_CLASSES.get(status.lower(), "badge badge-ghost badge-sm")
|
||||
|
||||
|
||||
def _progress_class(percent: float) -> str:
|
||||
"""Get CSS class for progress bar color."""
|
||||
if percent > 80: # noqa: PLR2004
|
||||
return "bg-error"
|
||||
if percent > 50: # noqa: PLR2004
|
||||
return "bg-warning"
|
||||
return "bg-success"
|
||||
|
||||
|
||||
def _render_update_cell(image: str, tag: str) -> str:
|
||||
"""Render update check cell with client-side batch updates."""
|
||||
encoded_image = quote(image, safe="")
|
||||
encoded_tag = quote(tag, safe="")
|
||||
cached_html = _update_check_cache.get(f"{image}:{tag}")
|
||||
inner = cached_html if cached_html is not None else _DASH_HTML
|
||||
return (
|
||||
f"""<td class="update-cell" data-image="{encoded_image}" data-tag="{encoded_tag}">"""
|
||||
f"{inner}</td>"
|
||||
)
|
||||
|
||||
|
||||
def _image_web_url(image: str) -> str | None:
|
||||
"""Return a human-friendly registry URL for an image (without tag)."""
|
||||
ref = ImageRef.parse(image)
|
||||
if ref.registry in DOCKER_HUB_ALIASES:
|
||||
if ref.namespace == "library":
|
||||
return f"https://hub.docker.com/_/{ref.name}"
|
||||
return f"https://hub.docker.com/r/{ref.namespace}/{ref.name}"
|
||||
return f"https://{ref.registry}/{ref.full_name}"
|
||||
|
||||
|
||||
def _render_row(c: ContainerStats, idx: int | str) -> str:
|
||||
"""Render a single container as an HTML table row."""
|
||||
image_name, tag = _parse_image(c.image)
|
||||
stack = c.stack if c.stack else _infer_stack_service(c.name)[0]
|
||||
service = c.service if c.service else _infer_stack_service(c.name)[1]
|
||||
|
||||
cpu = c.cpu_percent
|
||||
mem = c.memory_percent
|
||||
cpu_class = _progress_class(cpu)
|
||||
mem_class = _progress_class(mem)
|
||||
|
||||
# Highlight rows with high resource usage
|
||||
high_cpu = cpu > 80 # noqa: PLR2004
|
||||
high_mem = mem > 90 # noqa: PLR2004
|
||||
row_class = "high-usage" if (high_cpu or high_mem) else ""
|
||||
|
||||
uptime_sec = _parse_uptime_seconds(c.uptime)
|
||||
actions = _render_actions(stack)
|
||||
update_cell = _render_update_cell(image_name, tag)
|
||||
image_label = f"{image_name}:{tag}"
|
||||
image_url = _image_web_url(image_name)
|
||||
if image_url:
|
||||
image_html = (
|
||||
f'<a href="{image_url}" target="_blank" rel="noopener noreferrer" '
|
||||
f'class="link link-hover">'
|
||||
f'<code class="text-xs bg-base-200 px-1 rounded">{image_label}</code></a>'
|
||||
)
|
||||
else:
|
||||
image_html = f'<code class="text-xs bg-base-200 px-1 rounded">{image_label}</code>'
|
||||
# Render as single line to avoid whitespace nodes in DOM
|
||||
row_id = f"c-{c.host}-{c.name}"
|
||||
class_attr = f' class="{row_class}"' if row_class else ""
|
||||
return (
|
||||
f'<tr id="{row_id}" data-host="{c.host}"{class_attr}><td class="text-xs opacity-50">{idx}</td>'
|
||||
f'<td data-sort="{stack.lower()}"><a href="/stack/{stack}" class="link link-hover link-primary" hx-boost="true">{stack}</a></td>'
|
||||
f'<td data-sort="{service.lower()}" class="text-xs opacity-70">{service}</td>'
|
||||
f"<td>{actions}</td>"
|
||||
f'<td data-sort="{c.host.lower()}"><span class="badge badge-outline badge-xs">{c.host}</span></td>'
|
||||
f'<td data-sort="{c.image.lower()}">{image_html}</td>'
|
||||
f"{update_cell}"
|
||||
f'<td data-sort="{c.status.lower()}"><span class="{_status_class(c.status)}">{c.status}</span></td>'
|
||||
f'<td data-sort="{uptime_sec}" class="text-xs text-right font-mono">{c.uptime or "-"}</td>'
|
||||
f'<td data-sort="{cpu}" class="text-right font-mono"><div class="flex flex-col items-end gap-0.5"><div class="w-12 h-2 bg-base-300 rounded-full overflow-hidden"><div class="h-full {cpu_class}" style="width: {min(cpu, 100)}%"></div></div><span class="text-xs">{cpu:.0f}%</span></div></td>'
|
||||
f'<td data-sort="{c.memory_usage}" class="text-right font-mono"><div class="flex flex-col items-end gap-0.5"><div class="w-12 h-2 bg-base-300 rounded-full overflow-hidden"><div class="h-full {mem_class}" style="width: {min(mem, 100)}%"></div></div><span class="text-xs">{format_bytes(c.memory_usage)}</span></div></td>'
|
||||
f'<td data-sort="{c.network_rx + c.network_tx}" class="text-xs text-right font-mono">↓{format_bytes(c.network_rx)} ↑{format_bytes(c.network_tx)}</td>'
|
||||
"</tr>"
|
||||
)
|
||||
|
||||
|
||||
def _render_actions(stack: str) -> str:
|
||||
"""Render actions dropdown for a container row."""
|
||||
return f"""<button class="btn btn-circle btn-ghost btn-xs" onclick="openActionMenu(event, '{stack}')" aria-label="Actions for {stack}">
|
||||
<svg class="h-4 w-4"><use href="#icon-menu" /></svg>
|
||||
</button>"""
|
||||
|
||||
|
||||
def _parse_uptime_seconds(uptime: str) -> int:
|
||||
"""Parse uptime string to seconds for sorting."""
|
||||
if not uptime:
|
||||
return 0
|
||||
uptime = uptime.lower().strip()
|
||||
# Handle "a/an" as 1
|
||||
uptime = uptime.replace("an ", "1 ").replace("a ", "1 ")
|
||||
|
||||
total = 0
|
||||
multipliers = {
|
||||
"second": 1,
|
||||
"minute": 60,
|
||||
"hour": 3600,
|
||||
"day": 86400,
|
||||
"week": 604800,
|
||||
"month": 2592000,
|
||||
"year": 31536000,
|
||||
}
|
||||
for match in re.finditer(r"(\d+)\s*(\w+)", uptime):
|
||||
num = int(match.group(1))
|
||||
unit = match.group(2).rstrip("s") # Remove plural 's'
|
||||
total += num * multipliers.get(unit, 0)
|
||||
return total
|
||||
|
||||
|
||||
@router.get("/api/containers/rows", response_class=HTMLResponse)
|
||||
async def get_containers_rows() -> HTMLResponse:
|
||||
"""Get container table rows as HTML for HTMX.
|
||||
|
||||
Each cell has data-sort attribute for instant client-side sorting.
|
||||
"""
|
||||
config = get_config()
|
||||
|
||||
if not config.glances_stack:
|
||||
return HTMLResponse(
|
||||
'<tr><td colspan="12" class="text-center text-error">Glances not configured</td></tr>'
|
||||
)
|
||||
|
||||
containers = await fetch_all_container_stats(config)
|
||||
|
||||
if not containers:
|
||||
return HTMLResponse(
|
||||
'<tr><td colspan="12" class="text-center py-4 opacity-60">No containers found</td></tr>'
|
||||
)
|
||||
|
||||
rows = "\n".join(_render_row(c, i + 1) for i, c in enumerate(containers))
|
||||
return HTMLResponse(rows)
|
||||
|
||||
|
||||
@router.get("/api/containers/rows/{host_name}", response_class=HTMLResponse)
|
||||
async def get_containers_rows_by_host(host_name: str) -> HTMLResponse:
|
||||
"""Get container rows for a specific host.
|
||||
|
||||
Returns immediately with Glances data. Stack/service are inferred from
|
||||
container names for instant display (no SSH wait).
|
||||
"""
|
||||
import logging # noqa: PLC0415
|
||||
import time # noqa: PLC0415
|
||||
|
||||
from compose_farm.executor import get_container_compose_labels # noqa: PLC0415
|
||||
from compose_farm.glances import _get_glances_address, fetch_container_stats # noqa: PLC0415
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = get_config()
|
||||
|
||||
if host_name not in config.hosts:
|
||||
return HTMLResponse("")
|
||||
|
||||
host = config.hosts[host_name]
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
glances_address = _get_glances_address(host_name, host, config.glances_stack, local_host)
|
||||
|
||||
t0 = time.monotonic()
|
||||
containers, error = await fetch_container_stats(host_name, glances_address)
|
||||
t1 = time.monotonic()
|
||||
fetch_ms = (t1 - t0) * 1000
|
||||
|
||||
if containers is None:
|
||||
logger.error(
|
||||
"Failed to fetch stats for %s in %.1fms: %s",
|
||||
host_name,
|
||||
fetch_ms,
|
||||
error,
|
||||
)
|
||||
return HTMLResponse(
|
||||
f'<tr id="error-{host_name}" class="text-error" data-host="{host_name}">'
|
||||
f'<td colspan="12" class="text-center py-2">{host_name}: {error}</td></tr>'
|
||||
)
|
||||
|
||||
if not containers:
|
||||
return HTMLResponse("") # No rows for this host
|
||||
|
||||
labels = await get_container_compose_labels(config, host_name)
|
||||
for c in containers:
|
||||
stack, service = labels.get(c.name, ("", ""))
|
||||
if not stack or not service:
|
||||
stack, service = _infer_stack_service(c.name)
|
||||
c.stack, c.service = stack, service
|
||||
|
||||
# Only show containers from stacks in config (filters out orphaned/unknown stacks)
|
||||
containers = [c for c in containers if not c.stack or c.stack in config.stacks]
|
||||
|
||||
# Use placeholder index (will be renumbered by JS after all hosts load)
|
||||
rows = "\n".join(_render_row(c, "-") for c in containers)
|
||||
t2 = time.monotonic()
|
||||
render_ms = (t2 - t1) * 1000
|
||||
|
||||
logger.info(
|
||||
"Loaded %d rows for %s in %.1fms (fetch) + %.1fms (render)",
|
||||
len(containers),
|
||||
host_name,
|
||||
fetch_ms,
|
||||
render_ms,
|
||||
)
|
||||
return HTMLResponse(rows)
|
||||
|
||||
|
||||
def _render_update_badge(result: TagCheckResult) -> str:
|
||||
if result.error:
|
||||
return _DASH_HTML
|
||||
if result.available_updates:
|
||||
updates = result.available_updates
|
||||
count = len(updates)
|
||||
title = f"Newer: {', '.join(updates[:3])}" + ("..." if count > 3 else "") # noqa: PLR2004
|
||||
tip = html.escape(title, quote=True)
|
||||
return (
|
||||
f'<span class="tooltip" data-tip="{tip}">'
|
||||
f'<span class="badge badge-warning badge-xs cursor-help">{count} new</span>'
|
||||
"</span>"
|
||||
)
|
||||
return '<span class="tooltip" data-tip="Up to date"><span class="text-success text-xs">✓</span></span>'
|
||||
|
||||
|
||||
@router.post("/api/containers/check-updates", response_class=JSONResponse)
|
||||
async def check_container_updates_batch(request: Request) -> JSONResponse:
|
||||
"""Batch update checks for a list of images.
|
||||
|
||||
Payload: {"items": [{"image": "...", "tag": "..."}, ...]}
|
||||
Returns: {"results": [{"image": "...", "tag": "...", "html": "..."}, ...]}
|
||||
"""
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
payload = await request.json()
|
||||
items = payload.get("items", []) if isinstance(payload, dict) else []
|
||||
if not items:
|
||||
return JSONResponse({"results": []})
|
||||
|
||||
results = []
|
||||
|
||||
from compose_farm.registry import check_image_updates # noqa: PLC0415
|
||||
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
for item in items:
|
||||
image = item.get("image", "")
|
||||
tag = item.get("tag", "")
|
||||
full_image = f"{image}:{tag}"
|
||||
if not image or not tag:
|
||||
results.append({"image": image, "tag": tag, "html": _DASH_HTML})
|
||||
continue
|
||||
|
||||
# NOTE: Tag-based checks cannot detect digest changes for moving tags
|
||||
# like "latest". A future improvement could compare remote vs local
|
||||
# digests using dockerfarm-log.toml (from `cf refresh`) or a per-host
|
||||
# digest lookup.
|
||||
|
||||
cached_html: str | None = _update_check_cache.get(full_image)
|
||||
if cached_html is not None:
|
||||
results.append({"image": image, "tag": tag, "html": cached_html})
|
||||
continue
|
||||
|
||||
try:
|
||||
result = await check_image_updates(full_image, client)
|
||||
html = _render_update_badge(result)
|
||||
_update_check_cache.set(full_image, html)
|
||||
except Exception:
|
||||
_update_check_cache.set(full_image, _DASH_HTML, ttl_seconds=60.0)
|
||||
html = _DASH_HTML
|
||||
|
||||
results.append({"image": image, "tag": tag, "html": html})
|
||||
|
||||
return JSONResponse({"results": results})
|
||||
@@ -7,7 +7,7 @@ from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from pydantic import ValidationError
|
||||
|
||||
from compose_farm.compose import get_container_name
|
||||
from compose_farm.compose import extract_services, get_container_name, parse_compose_data
|
||||
from compose_farm.paths import find_config_path
|
||||
from compose_farm.state import (
|
||||
get_orphaned_stacks,
|
||||
@@ -166,9 +166,9 @@ async def stack_detail(request: Request, name: str) -> HTMLResponse:
|
||||
containers: dict[str, dict[str, str]] = {}
|
||||
shell_host = current_host[0] if isinstance(current_host, list) else current_host
|
||||
if compose_content:
|
||||
compose_data = yaml.safe_load(compose_content) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if isinstance(raw_services, dict):
|
||||
compose_data = parse_compose_data(compose_content)
|
||||
raw_services = extract_services(compose_data)
|
||||
if raw_services:
|
||||
services = list(raw_services.keys())
|
||||
# Build container info for shell access (only if stack is running)
|
||||
if shell_host:
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
// ANSI escape codes for terminal output
|
||||
const ANSI = {
|
||||
RED: '\x1b[31m',
|
||||
GREEN: '\x1b[32m',
|
||||
DIM: '\x1b[2m',
|
||||
RESET: '\x1b[0m',
|
||||
CRLF: '\r\n'
|
||||
@@ -122,7 +121,6 @@ function whenXtermReady(callback, maxAttempts = 20) {
|
||||
};
|
||||
tryInit(maxAttempts);
|
||||
}
|
||||
window.whenXtermReady = whenXtermReady;
|
||||
|
||||
// ============================================================================
|
||||
// TERMINAL
|
||||
@@ -196,6 +194,7 @@ function initTerminal(elementId, taskId) {
|
||||
term.write(event.data);
|
||||
if (event.data.includes('[Done]') || event.data.includes('[Failed]')) {
|
||||
localStorage.removeItem(taskKey);
|
||||
refreshDashboard();
|
||||
}
|
||||
};
|
||||
ws.onclose = () => setTerminalLoading(false);
|
||||
@@ -209,8 +208,6 @@ function initTerminal(elementId, taskId) {
|
||||
return { term, ws };
|
||||
}
|
||||
|
||||
window.initTerminal = initTerminal;
|
||||
|
||||
/**
|
||||
* Initialize an interactive exec terminal
|
||||
*/
|
||||
@@ -336,10 +333,14 @@ function loadMonaco(callback) {
|
||||
monacoLoading = true;
|
||||
|
||||
// Load the Monaco loader script
|
||||
// Use local paths when running from vendored wheel, CDN otherwise
|
||||
const monacoBase = window.CF_VENDORED
|
||||
? '/static/vendor/monaco'
|
||||
: 'https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs';
|
||||
const script = document.createElement('script');
|
||||
script.src = 'https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/loader.js';
|
||||
script.src = monacoBase + '/loader.js';
|
||||
script.onload = function() {
|
||||
require.config({ paths: { vs: 'https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs' }});
|
||||
require.config({ paths: { vs: monacoBase }});
|
||||
require(['vs/editor/editor.main'], function() {
|
||||
monacoLoaded = true;
|
||||
monacoLoading = false;
|
||||
@@ -432,7 +433,7 @@ function initMonacoEditors() {
|
||||
* Save all editors
|
||||
*/
|
||||
async function saveAllEditors() {
|
||||
const saveBtn = document.getElementById('save-btn') || document.getElementById('save-config-btn');
|
||||
const saveBtn = getSaveButton();
|
||||
const results = [];
|
||||
|
||||
for (const [id, editor] of Object.entries(editors)) {
|
||||
@@ -468,12 +469,16 @@ async function saveAllEditors() {
|
||||
* Initialize save button handler
|
||||
*/
|
||||
function initSaveButton() {
|
||||
const saveBtn = document.getElementById('save-btn') || document.getElementById('save-config-btn');
|
||||
const saveBtn = getSaveButton();
|
||||
if (!saveBtn) return;
|
||||
|
||||
saveBtn.onclick = saveAllEditors;
|
||||
}
|
||||
|
||||
function getSaveButton() {
|
||||
return document.getElementById('save-btn') || document.getElementById('save-config-btn');
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// UI HELPERS
|
||||
// ============================================================================
|
||||
@@ -490,7 +495,9 @@ function refreshDashboard() {
|
||||
* Filter sidebar stacks by name and host
|
||||
*/
|
||||
function sidebarFilter() {
|
||||
const q = (document.getElementById('sidebar-filter')?.value || '').toLowerCase();
|
||||
const input = document.getElementById('sidebar-filter');
|
||||
const clearBtn = document.getElementById('sidebar-filter-clear');
|
||||
const q = (input?.value || '').toLowerCase();
|
||||
const h = document.getElementById('sidebar-host-select')?.value || '';
|
||||
let n = 0;
|
||||
document.querySelectorAll('#sidebar-stacks li').forEach(li => {
|
||||
@@ -499,9 +506,26 @@ function sidebarFilter() {
|
||||
if (show) n++;
|
||||
});
|
||||
document.getElementById('sidebar-count').textContent = '(' + n + ')';
|
||||
// Show/hide clear button based on input value
|
||||
if (clearBtn) {
|
||||
clearBtn.classList.toggle('hidden', !q);
|
||||
}
|
||||
}
|
||||
window.sidebarFilter = sidebarFilter;
|
||||
|
||||
/**
|
||||
* Clear sidebar filter input and refresh list
|
||||
*/
|
||||
function clearSidebarFilter() {
|
||||
const input = document.getElementById('sidebar-filter');
|
||||
if (input) {
|
||||
input.value = '';
|
||||
input.focus();
|
||||
}
|
||||
sidebarFilter();
|
||||
}
|
||||
window.clearSidebarFilter = clearSidebarFilter;
|
||||
|
||||
// Play intro animation on command palette button
|
||||
function playFabIntro() {
|
||||
const fab = document.getElementById('cmd-fab');
|
||||
@@ -547,7 +571,6 @@ function playFabIntro() {
|
||||
let commands = [];
|
||||
let filtered = [];
|
||||
let selected = 0;
|
||||
let originalTheme = null; // Store theme when palette opens for preview/restore
|
||||
|
||||
const post = (url) => () => htmx.ajax('POST', url, {swap: 'none'});
|
||||
const nav = (url, afterNav) => () => {
|
||||
@@ -571,20 +594,21 @@ function playFabIntro() {
|
||||
}
|
||||
htmx.ajax('POST', `/api/${endpoint}`, {swap: 'none'});
|
||||
};
|
||||
// Get saved theme from localStorage (source of truth)
|
||||
const getSavedTheme = () => localStorage.getItem(THEME_KEY) || 'dark';
|
||||
|
||||
// Apply theme and save to localStorage
|
||||
const setTheme = (theme) => () => {
|
||||
document.documentElement.setAttribute('data-theme', theme);
|
||||
localStorage.setItem(THEME_KEY, theme);
|
||||
};
|
||||
// Preview theme without saving (for hover)
|
||||
// Preview theme without saving (for hover). Guards against undefined/invalid themes.
|
||||
const previewTheme = (theme) => {
|
||||
document.documentElement.setAttribute('data-theme', theme);
|
||||
if (theme) document.documentElement.setAttribute('data-theme', theme);
|
||||
};
|
||||
// Restore original theme (when closing without selection)
|
||||
// Restore theme from localStorage (source of truth)
|
||||
const restoreTheme = () => {
|
||||
if (originalTheme) {
|
||||
document.documentElement.setAttribute('data-theme', originalTheme);
|
||||
}
|
||||
document.documentElement.setAttribute('data-theme', getSavedTheme());
|
||||
};
|
||||
// Generate color swatch HTML for a theme
|
||||
const themeSwatch = (theme) => `<span class="flex gap-0.5" data-theme="${theme}"><span class="w-2 h-4 rounded-l bg-primary"></span><span class="w-2 h-4 bg-secondary"></span><span class="w-2 h-4 bg-accent"></span><span class="w-2 h-4 rounded-r bg-neutral"></span></span>`;
|
||||
@@ -604,13 +628,14 @@ function playFabIntro() {
|
||||
cmd('action', 'Apply', 'Make reality match config', dashboardAction('apply'), icons.check),
|
||||
cmd('action', 'Refresh', 'Update state from reality', dashboardAction('refresh'), icons.refresh_cw),
|
||||
cmd('action', 'Pull All', 'Pull latest images for all stacks', dashboardAction('pull-all'), icons.cloud_download),
|
||||
cmd('action', 'Update All', 'Update all stacks', dashboardAction('update-all'), icons.refresh_cw),
|
||||
cmd('action', 'Update All', 'Update all stacks except web', dashboardAction('update-all'), icons.refresh_cw),
|
||||
cmd('app', 'Theme', 'Change color theme', openThemePicker, icons.palette),
|
||||
cmd('app', 'Dashboard', 'Go to dashboard', nav('/'), icons.home),
|
||||
cmd('app', 'Live Stats', 'View all containers across hosts', nav('/live-stats'), icons.box),
|
||||
cmd('app', 'Console', 'Go to console', nav('/console'), icons.terminal),
|
||||
cmd('app', 'Edit Config', 'Edit compose-farm.yaml', nav('/console#editor'), icons.file_code),
|
||||
cmd('app', 'Docs', 'Open documentation', openExternal('https://compose-farm.nijho.lt/'), icons.book_open),
|
||||
cmd('app', 'Repo', 'Open GitHub repository', openExternal('https://github.com/basnijholt/compose-farm'), icons.external_link),
|
||||
cmd('app', 'GitHub Repo', 'Open GitHub repository', openExternal('https://github.com/basnijholt/compose-farm'), icons.external_link),
|
||||
];
|
||||
|
||||
// Add stack-specific actions if on a stack page
|
||||
@@ -623,7 +648,7 @@ function playFabIntro() {
|
||||
stackCmd('Down', 'Stop', 'down', icons.square),
|
||||
stackCmd('Restart', 'Restart', 'restart', icons.rotate_cw),
|
||||
stackCmd('Pull', 'Pull', 'pull', icons.cloud_download),
|
||||
stackCmd('Update', 'Pull + restart', 'update', icons.refresh_cw),
|
||||
stackCmd('Update', 'Pull + recreate', 'update', icons.refresh_cw),
|
||||
stackCmd('Logs', 'View logs for', 'logs', icons.file_text),
|
||||
);
|
||||
|
||||
@@ -716,26 +741,24 @@ function playFabIntro() {
|
||||
// Scroll selected item into view
|
||||
const sel = list.querySelector(`[data-idx="${selected}"]`);
|
||||
if (sel) sel.scrollIntoView({ block: 'nearest' });
|
||||
// Preview theme if selected item is a theme command
|
||||
// Preview theme if selected item is a theme command, otherwise restore saved
|
||||
const selectedCmd = filtered[selected];
|
||||
if (selectedCmd?.themeId) {
|
||||
previewTheme(selectedCmd.themeId);
|
||||
} else if (originalTheme) {
|
||||
// Restore original when navigating away from theme commands
|
||||
previewTheme(originalTheme);
|
||||
} else {
|
||||
restoreTheme();
|
||||
}
|
||||
}
|
||||
|
||||
function open(initialFilter = '') {
|
||||
// Store original theme for preview/restore
|
||||
originalTheme = document.documentElement.getAttribute('data-theme') || 'dark';
|
||||
buildCommands();
|
||||
selected = 0;
|
||||
input.value = initialFilter;
|
||||
filter();
|
||||
// If opening theme picker, select current theme
|
||||
if (initialFilter.startsWith('theme:')) {
|
||||
const currentIdx = filtered.findIndex(c => c.themeId === originalTheme);
|
||||
const savedTheme = getSavedTheme();
|
||||
const currentIdx = filtered.findIndex(c => c.themeId === savedTheme);
|
||||
if (currentIdx >= 0) selected = currentIdx;
|
||||
}
|
||||
render();
|
||||
@@ -743,18 +766,9 @@ function playFabIntro() {
|
||||
input.focus();
|
||||
}
|
||||
|
||||
function close() {
|
||||
dialog.close();
|
||||
restoreTheme();
|
||||
}
|
||||
|
||||
function exec() {
|
||||
const cmd = filtered[selected];
|
||||
if (cmd) {
|
||||
if (cmd.themeId) {
|
||||
// Theme command commits the previewed choice.
|
||||
originalTheme = null;
|
||||
}
|
||||
dialog.close();
|
||||
cmd.action();
|
||||
}
|
||||
@@ -794,19 +808,14 @@ function playFabIntro() {
|
||||
if (a) previewTheme(a.dataset.themeId);
|
||||
});
|
||||
|
||||
// Mouse leaving list restores to selected item's theme (or original)
|
||||
// Mouse leaving list restores to selected item's theme (or saved)
|
||||
list.addEventListener('mouseleave', () => {
|
||||
const cmd = filtered[selected];
|
||||
previewTheme(cmd?.themeId || originalTheme);
|
||||
previewTheme(cmd?.themeId || getSavedTheme());
|
||||
});
|
||||
|
||||
// Restore theme when dialog closes without selection (Escape, backdrop click)
|
||||
dialog.addEventListener('close', () => {
|
||||
if (originalTheme) {
|
||||
restoreTheme();
|
||||
originalTheme = null;
|
||||
}
|
||||
});
|
||||
// Restore theme from localStorage when dialog closes
|
||||
dialog.addEventListener('close', restoreTheme);
|
||||
|
||||
// FAB click to open
|
||||
if (fab) fab.addEventListener('click', () => open());
|
||||
@@ -869,6 +878,119 @@ function initPage() {
|
||||
initMonacoEditors();
|
||||
initSaveButton();
|
||||
updateShortcutKeys();
|
||||
initLiveStats();
|
||||
initSharedActionMenu();
|
||||
maybeRunStackAction();
|
||||
}
|
||||
|
||||
function navigateToStack(stack, action = null) {
|
||||
const url = action ? `/stack/${stack}?action=${action}` : `/stack/${stack}`;
|
||||
window.location.href = url;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize shared action menu for container rows
|
||||
*/
|
||||
function initSharedActionMenu() {
|
||||
const menuEl = document.getElementById('shared-action-menu');
|
||||
if (!menuEl) return;
|
||||
if (menuEl.dataset.bound === '1') return;
|
||||
menuEl.dataset.bound = '1';
|
||||
|
||||
let hoverTimeout = null;
|
||||
|
||||
function showMenuForButton(btn, stack) {
|
||||
menuEl.dataset.stack = stack;
|
||||
|
||||
// Position menu relative to button
|
||||
const rect = btn.getBoundingClientRect();
|
||||
menuEl.classList.remove('hidden');
|
||||
menuEl.style.visibility = 'hidden';
|
||||
const menuRect = menuEl.getBoundingClientRect();
|
||||
|
||||
const left = rect.right - menuRect.width + window.scrollX;
|
||||
const top = rect.bottom + window.scrollY;
|
||||
|
||||
menuEl.style.top = `${top}px`;
|
||||
menuEl.style.left = `${left}px`;
|
||||
menuEl.style.visibility = '';
|
||||
|
||||
if (typeof liveStats !== 'undefined') liveStats.dropdownOpen = true;
|
||||
}
|
||||
|
||||
function closeMenu() {
|
||||
menuEl.classList.add('hidden');
|
||||
if (typeof liveStats !== 'undefined') liveStats.dropdownOpen = false;
|
||||
menuEl.dataset.stack = '';
|
||||
}
|
||||
|
||||
function scheduleClose() {
|
||||
if (hoverTimeout) clearTimeout(hoverTimeout);
|
||||
hoverTimeout = setTimeout(closeMenu, 100);
|
||||
}
|
||||
|
||||
function cancelClose() {
|
||||
if (hoverTimeout) {
|
||||
clearTimeout(hoverTimeout);
|
||||
hoverTimeout = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Button hover: show menu (event delegation on tbody)
|
||||
const tbody = document.getElementById('container-rows');
|
||||
if (tbody) {
|
||||
tbody.addEventListener('mouseenter', (e) => {
|
||||
const btn = e.target.closest('button[onclick^="openActionMenu"]');
|
||||
if (!btn) return;
|
||||
|
||||
// Extract stack from onclick attribute
|
||||
const match = btn.getAttribute('onclick')?.match(/openActionMenu\(event,\s*'([^']+)'\)/);
|
||||
if (!match) return;
|
||||
|
||||
cancelClose();
|
||||
showMenuForButton(btn, match[1]);
|
||||
}, true);
|
||||
|
||||
tbody.addEventListener('mouseleave', (e) => {
|
||||
const btn = e.target.closest('button[onclick^="openActionMenu"]');
|
||||
if (btn) scheduleClose();
|
||||
}, true);
|
||||
}
|
||||
|
||||
// Keep menu open while hovering over it
|
||||
menuEl.addEventListener('mouseenter', cancelClose);
|
||||
menuEl.addEventListener('mouseleave', scheduleClose);
|
||||
|
||||
// Click action in menu
|
||||
menuEl.addEventListener('click', (e) => {
|
||||
const link = e.target.closest('a[data-action]');
|
||||
const stack = menuEl.dataset.stack;
|
||||
if (!link || !stack) return;
|
||||
|
||||
e.preventDefault();
|
||||
navigateToStack(stack, link.dataset.action);
|
||||
closeMenu();
|
||||
});
|
||||
|
||||
// Also support click on button (for touch/accessibility)
|
||||
window.openActionMenu = function(event, stack) {
|
||||
event.stopPropagation();
|
||||
showMenuForButton(event.currentTarget, stack);
|
||||
};
|
||||
|
||||
// Close on outside click
|
||||
document.body.addEventListener('click', (e) => {
|
||||
if (!menuEl.classList.contains('hidden') &&
|
||||
!menuEl.contains(e.target) &&
|
||||
!e.target.closest('button[onclick^="openActionMenu"]')) {
|
||||
closeMenu();
|
||||
}
|
||||
});
|
||||
|
||||
// Close on Escape
|
||||
document.body.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'Escape') closeMenu();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -889,6 +1011,30 @@ function tryReconnectToTask(path) {
|
||||
});
|
||||
}
|
||||
|
||||
function maybeRunStackAction() {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
const stackEl = document.querySelector('[data-stack-name]');
|
||||
const stackName = stackEl?.dataset?.stackName;
|
||||
if (!stackName) return;
|
||||
|
||||
const action = params.get('action');
|
||||
if (!action) return;
|
||||
|
||||
const button = document.querySelector(`button[hx-post="/api/stack/${stackName}/${action}"]`);
|
||||
if (!button) return;
|
||||
|
||||
params.delete('action');
|
||||
const newQuery = params.toString();
|
||||
const newUrl = newQuery ? `${window.location.pathname}?${newQuery}` : window.location.pathname;
|
||||
history.replaceState({}, '', newUrl);
|
||||
|
||||
if (window.htmx) {
|
||||
htmx.trigger(button, 'click');
|
||||
} else {
|
||||
button.click();
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize on page load
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
initPage();
|
||||
@@ -930,3 +1076,443 @@ document.body.addEventListener('htmx:afterRequest', function(evt) {
|
||||
// Not valid JSON, ignore
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// LIVE STATS PAGE
|
||||
// ============================================================================
|
||||
|
||||
// State persists across SPA navigation (intervals must be cleared on re-init)
|
||||
let liveStats = {
|
||||
sortCol: 9,
|
||||
sortAsc: false,
|
||||
lastUpdate: 0,
|
||||
dropdownOpen: false,
|
||||
scrolling: false,
|
||||
scrollTimer: null,
|
||||
loadingHosts: new Set(),
|
||||
eventsBound: false,
|
||||
intervals: [],
|
||||
updateCheckTimes: new Map(),
|
||||
autoRefresh: true
|
||||
};
|
||||
|
||||
const REFRESH_INTERVAL = 5000;
|
||||
const UPDATE_CHECK_TTL = 120000;
|
||||
const NUMERIC_COLS = new Set([8, 9, 10, 11]); // uptime, cpu, mem, net
|
||||
|
||||
function filterTable() {
|
||||
const textFilter = document.getElementById('filter-input')?.value.toLowerCase() || '';
|
||||
const hostFilter = document.getElementById('host-filter')?.value || '';
|
||||
const rows = document.querySelectorAll('#container-rows tr');
|
||||
let visible = 0;
|
||||
let total = 0;
|
||||
|
||||
rows.forEach(row => {
|
||||
// Skip loading/empty/error rows (they have colspan)
|
||||
if (row.cells[0]?.colSpan > 1) return;
|
||||
total++;
|
||||
const matchesText = !textFilter || row.textContent.toLowerCase().includes(textFilter);
|
||||
const matchesHost = !hostFilter || row.dataset.host === hostFilter;
|
||||
const show = matchesText && matchesHost;
|
||||
row.style.display = show ? '' : 'none';
|
||||
if (show) visible++;
|
||||
});
|
||||
|
||||
const countEl = document.getElementById('container-count');
|
||||
if (countEl) {
|
||||
const isFiltering = textFilter || hostFilter;
|
||||
countEl.textContent = total > 0
|
||||
? (isFiltering ? `${visible} of ${total} containers` : `${total} containers`)
|
||||
: '';
|
||||
}
|
||||
}
|
||||
window.filterTable = filterTable;
|
||||
|
||||
function sortTable(col) {
|
||||
if (liveStats.sortCol === col) {
|
||||
liveStats.sortAsc = !liveStats.sortAsc;
|
||||
} else {
|
||||
liveStats.sortCol = col;
|
||||
liveStats.sortAsc = false;
|
||||
}
|
||||
updateSortIndicators();
|
||||
doSort();
|
||||
}
|
||||
window.sortTable = sortTable;
|
||||
|
||||
function updateSortIndicators() {
|
||||
document.querySelectorAll('thead th').forEach((th, i) => {
|
||||
const span = th.querySelector('.sort-indicator');
|
||||
if (span) {
|
||||
span.textContent = (i === liveStats.sortCol) ? (liveStats.sortAsc ? '↑' : '↓') : '';
|
||||
span.style.opacity = (i === liveStats.sortCol) ? '1' : '0.3';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function doSort() {
|
||||
const tbody = document.getElementById('container-rows');
|
||||
if (!tbody) return;
|
||||
|
||||
const rows = Array.from(tbody.querySelectorAll('tr'));
|
||||
if (rows.length === 0) return;
|
||||
if (rows.length === 1 && rows[0].cells[0]?.colSpan > 1) return; // Empty state row
|
||||
|
||||
const isNumeric = NUMERIC_COLS.has(liveStats.sortCol);
|
||||
rows.sort((a, b) => {
|
||||
// Pin placeholders/empty rows to the bottom
|
||||
const aLoading = a.classList.contains('loading-row') || a.classList.contains('host-empty') || a.cells[0]?.colSpan > 1;
|
||||
const bLoading = b.classList.contains('loading-row') || b.classList.contains('host-empty') || b.cells[0]?.colSpan > 1;
|
||||
if (aLoading && !bLoading) return 1;
|
||||
if (!aLoading && bLoading) return -1;
|
||||
if (aLoading && bLoading) return 0;
|
||||
|
||||
const aVal = a.cells[liveStats.sortCol]?.dataset?.sort ?? '';
|
||||
const bVal = b.cells[liveStats.sortCol]?.dataset?.sort ?? '';
|
||||
const cmp = isNumeric ? aVal - bVal : aVal.localeCompare(bVal);
|
||||
return liveStats.sortAsc ? cmp : -cmp;
|
||||
});
|
||||
|
||||
let index = 1;
|
||||
const fragment = document.createDocumentFragment();
|
||||
rows.forEach((row) => {
|
||||
if (row.cells.length > 1) {
|
||||
row.cells[0].textContent = index++;
|
||||
}
|
||||
fragment.appendChild(row);
|
||||
});
|
||||
tbody.appendChild(fragment);
|
||||
}
|
||||
|
||||
function isLoading() {
|
||||
return liveStats.loadingHosts.size > 0;
|
||||
}
|
||||
|
||||
function getLiveStatsHosts() {
|
||||
const tbody = document.getElementById('container-rows');
|
||||
if (!tbody) return [];
|
||||
const dataHosts = tbody.dataset.hosts || '';
|
||||
return dataHosts.split(',').map(h => h.trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
function buildHostRow(host, message, className) {
|
||||
return (
|
||||
`<tr class="${className}" data-host="${host}">` +
|
||||
`<td colspan="12" class="text-center py-2">` +
|
||||
`<span class="text-sm opacity-60">${message}</span>` +
|
||||
`</td></tr>`
|
||||
);
|
||||
}
|
||||
|
||||
async function checkUpdatesForHost(host) {
|
||||
// Update checks always run - they only update small cells, not disruptive
|
||||
const last = liveStats.updateCheckTimes.get(host) || 0;
|
||||
if (Date.now() - last < UPDATE_CHECK_TTL) return;
|
||||
|
||||
const cells = Array.from(
|
||||
document.querySelectorAll(`tr[data-host="${host}"] td.update-cell[data-image][data-tag]`)
|
||||
);
|
||||
if (cells.length === 0) return;
|
||||
|
||||
const items = [];
|
||||
const seen = new Set();
|
||||
cells.forEach(cell => {
|
||||
const image = decodeURIComponent(cell.dataset.image || '');
|
||||
const tag = decodeURIComponent(cell.dataset.tag || '');
|
||||
const key = `${image}:${tag}`;
|
||||
if (!image || seen.has(key)) return;
|
||||
seen.add(key);
|
||||
items.push({ image, tag });
|
||||
});
|
||||
|
||||
if (items.length === 0) return;
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/containers/check-updates', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ items })
|
||||
});
|
||||
if (!response.ok) return;
|
||||
const data = await response.json();
|
||||
const results = Array.isArray(data?.results) ? data.results : [];
|
||||
const htmlMap = new Map();
|
||||
results.forEach(result => {
|
||||
const key = `${result.image}:${result.tag}`;
|
||||
htmlMap.set(key, result.html);
|
||||
});
|
||||
|
||||
cells.forEach(cell => {
|
||||
const image = decodeURIComponent(cell.dataset.image || '');
|
||||
const tag = decodeURIComponent(cell.dataset.tag || '');
|
||||
const key = `${image}:${tag}`;
|
||||
const html = htmlMap.get(key);
|
||||
if (html && cell.innerHTML !== html) {
|
||||
cell.innerHTML = html;
|
||||
}
|
||||
});
|
||||
|
||||
liveStats.updateCheckTimes.set(host, Date.now());
|
||||
} catch (e) {
|
||||
console.error('Update check failed:', e);
|
||||
}
|
||||
}
|
||||
|
||||
function replaceHostRows(host, html) {
|
||||
const tbody = document.getElementById('container-rows');
|
||||
if (!tbody) return;
|
||||
|
||||
// Remove loading indicator for this host if present
|
||||
const loadingRow = tbody.querySelector(`tr.loading-row[data-host="${host}"]`);
|
||||
if (loadingRow) loadingRow.remove();
|
||||
|
||||
const template = document.createElement('template');
|
||||
template.innerHTML = html.trim();
|
||||
let newRows = Array.from(template.content.children).filter(el => el.tagName === 'TR');
|
||||
|
||||
if (newRows.length === 0) {
|
||||
// Only show empty message if we don't have any rows for this host
|
||||
const existing = tbody.querySelector(`tr[data-host="${host}"]:not(.loading-row)`);
|
||||
if (!existing) {
|
||||
template.innerHTML = buildHostRow(host, `No containers on ${host}`, 'host-empty');
|
||||
newRows = Array.from(template.content.children);
|
||||
}
|
||||
}
|
||||
|
||||
// Track which IDs we've seen in this update
|
||||
const newIds = new Set();
|
||||
|
||||
newRows.forEach(newRow => {
|
||||
const id = newRow.id;
|
||||
if (id) newIds.add(id);
|
||||
|
||||
if (id) {
|
||||
const existing = document.getElementById(id);
|
||||
if (existing) {
|
||||
// Morph in place if Idiomorph is available, otherwise replace
|
||||
if (typeof Idiomorph !== 'undefined') {
|
||||
Idiomorph.morph(existing, newRow);
|
||||
} else {
|
||||
existing.replaceWith(newRow);
|
||||
}
|
||||
|
||||
// Re-process HTMX if needed (though inner content usually carries attributes)
|
||||
const morphedRow = document.getElementById(id);
|
||||
if (window.htmx) htmx.process(morphedRow);
|
||||
|
||||
// Trigger refresh animation
|
||||
if (morphedRow) {
|
||||
morphedRow.classList.add('row-updated');
|
||||
setTimeout(() => morphedRow.classList.remove('row-updated'), 500);
|
||||
}
|
||||
} else {
|
||||
// New row - append (will be sorted later)
|
||||
tbody.appendChild(newRow);
|
||||
if (window.htmx) htmx.process(newRow);
|
||||
// Animate new rows too
|
||||
newRow.classList.add('row-updated');
|
||||
setTimeout(() => newRow.classList.remove('row-updated'), 500);
|
||||
}
|
||||
} else {
|
||||
// Fallback for rows without ID (like error/empty messages)
|
||||
// Just append them, cleaning up previous generic rows handled below
|
||||
tbody.appendChild(newRow);
|
||||
}
|
||||
});
|
||||
|
||||
// Remove orphaned rows for this host (rows that exist in DOM but not in new response)
|
||||
// Be careful not to remove rows that were just added (if they lack IDs)
|
||||
const currentHostRows = Array.from(tbody.querySelectorAll(`tr[data-host="${host}"]`));
|
||||
currentHostRows.forEach(row => {
|
||||
// Skip if it's one of the new rows we just appended (check presence in newRows?)
|
||||
// Actually, if we just appended it, it is in DOM.
|
||||
// We rely on ID matching.
|
||||
// Error/Empty rows usually don't have ID, but we handle them by clearing old ones?
|
||||
// Let's assume data rows have IDs.
|
||||
if (row.id && !newIds.has(row.id)) {
|
||||
row.remove();
|
||||
}
|
||||
// Also remove old empty/error messages if we now have data
|
||||
if (!row.id && newRows.length > 0 && newRows[0].id) {
|
||||
row.remove();
|
||||
}
|
||||
});
|
||||
|
||||
liveStats.loadingHosts.delete(host);
|
||||
checkUpdatesForHost(host);
|
||||
scheduleRowUpdate();
|
||||
}
|
||||
|
||||
async function loadHostRows(host) {
|
||||
liveStats.loadingHosts.add(host);
|
||||
try {
|
||||
const response = await fetch(`/api/containers/rows/${encodeURIComponent(host)}`);
|
||||
const html = response.ok ? await response.text() : '';
|
||||
replaceHostRows(host, html);
|
||||
} catch (e) {
|
||||
console.error(`Failed to load ${host}:`, e);
|
||||
const msg = e.message || String(e);
|
||||
// Fallback to simpler error display if replaceHostRows fails (e.g. Idiomorph missing)
|
||||
try {
|
||||
replaceHostRows(host, buildHostRow(host, `Error: ${msg}`, 'text-error'));
|
||||
} catch (err2) {
|
||||
// Last resort: find row and force innerHTML
|
||||
const tbody = document.getElementById('container-rows');
|
||||
const row = tbody?.querySelector(`tr[data-host="${host}"]`);
|
||||
if (row) row.innerHTML = `<td colspan="12" class="text-center text-error">Error: ${msg}</td>`;
|
||||
}
|
||||
} finally {
|
||||
liveStats.loadingHosts.delete(host);
|
||||
}
|
||||
}
|
||||
|
||||
function refreshLiveStats() {
|
||||
if (liveStats.dropdownOpen || liveStats.scrolling) return;
|
||||
const hosts = getLiveStatsHosts();
|
||||
if (hosts.length === 0) return;
|
||||
liveStats.lastUpdate = Date.now();
|
||||
hosts.forEach(loadHostRows);
|
||||
}
|
||||
window.refreshLiveStats = refreshLiveStats;
|
||||
|
||||
function toggleAutoRefresh() {
|
||||
liveStats.autoRefresh = !liveStats.autoRefresh;
|
||||
const timer = document.getElementById('refresh-timer');
|
||||
if (timer) {
|
||||
timer.classList.toggle('btn-error', !liveStats.autoRefresh);
|
||||
timer.classList.toggle('btn-outline', liveStats.autoRefresh);
|
||||
}
|
||||
if (liveStats.autoRefresh) {
|
||||
// Re-enabling: trigger immediate refresh
|
||||
refreshLiveStats();
|
||||
} else {
|
||||
// Disabling: ensure update checks run for current data
|
||||
const hosts = getLiveStatsHosts();
|
||||
hosts.forEach(host => checkUpdatesForHost(host));
|
||||
}
|
||||
}
|
||||
window.toggleAutoRefresh = toggleAutoRefresh;
|
||||
|
||||
function initLiveStats() {
|
||||
if (!document.getElementById('refresh-timer')) return;
|
||||
|
||||
// Clear previous intervals (important for SPA navigation)
|
||||
liveStats.intervals.forEach(clearInterval);
|
||||
liveStats.intervals = [];
|
||||
liveStats.lastUpdate = Date.now();
|
||||
liveStats.dropdownOpen = false;
|
||||
liveStats.scrolling = false;
|
||||
if (liveStats.scrollTimer) clearTimeout(liveStats.scrollTimer);
|
||||
liveStats.scrollTimer = null;
|
||||
liveStats.loadingHosts.clear();
|
||||
liveStats.updateCheckTimes = new Map();
|
||||
liveStats.autoRefresh = true;
|
||||
|
||||
if (!liveStats.eventsBound) {
|
||||
liveStats.eventsBound = true;
|
||||
|
||||
// Dropdown pauses refresh
|
||||
document.body.addEventListener('click', e => {
|
||||
liveStats.dropdownOpen = !!e.target.closest('.dropdown');
|
||||
});
|
||||
document.body.addEventListener('focusin', e => {
|
||||
if (e.target.closest('.dropdown')) liveStats.dropdownOpen = true;
|
||||
});
|
||||
document.body.addEventListener('focusout', () => {
|
||||
setTimeout(() => {
|
||||
liveStats.dropdownOpen = !!document.activeElement?.closest('.dropdown');
|
||||
}, 150);
|
||||
});
|
||||
document.body.addEventListener('keydown', e => {
|
||||
if (e.key === 'Escape') liveStats.dropdownOpen = false;
|
||||
});
|
||||
|
||||
// Pause refresh while scrolling (helps on slow mobile browsers)
|
||||
window.addEventListener('scroll', () => {
|
||||
liveStats.scrolling = true;
|
||||
if (liveStats.scrollTimer) clearTimeout(liveStats.scrollTimer);
|
||||
liveStats.scrollTimer = setTimeout(() => {
|
||||
liveStats.scrolling = false;
|
||||
}, 200);
|
||||
}, { passive: true });
|
||||
}
|
||||
|
||||
// Auto-refresh every 5 seconds (skip if disabled, loading, or dropdown open)
|
||||
liveStats.intervals.push(setInterval(() => {
|
||||
if (!liveStats.autoRefresh) return;
|
||||
if (liveStats.dropdownOpen || liveStats.scrolling || isLoading()) return;
|
||||
refreshLiveStats();
|
||||
}, REFRESH_INTERVAL));
|
||||
|
||||
// Timer display (updates every 100ms)
|
||||
liveStats.intervals.push(setInterval(() => {
|
||||
const timer = document.getElementById('refresh-timer');
|
||||
if (!timer) {
|
||||
liveStats.intervals.forEach(clearInterval);
|
||||
return;
|
||||
}
|
||||
|
||||
const loading = isLoading();
|
||||
const paused = liveStats.dropdownOpen || liveStats.scrolling;
|
||||
const elapsed = Date.now() - liveStats.lastUpdate;
|
||||
window.refreshPaused = paused || loading || !liveStats.autoRefresh;
|
||||
|
||||
// Update refresh timer button
|
||||
let text;
|
||||
if (!liveStats.autoRefresh) {
|
||||
text = 'OFF';
|
||||
} else if (paused) {
|
||||
text = '❚❚';
|
||||
} else {
|
||||
const remaining = Math.max(0, REFRESH_INTERVAL - elapsed);
|
||||
text = loading ? '↻ …' : `↻ ${Math.ceil(remaining / 1000)}s`;
|
||||
}
|
||||
if (timer.textContent !== text) {
|
||||
timer.textContent = text;
|
||||
}
|
||||
|
||||
// Update "last updated" display
|
||||
const lastUpdatedEl = document.getElementById('last-updated');
|
||||
if (lastUpdatedEl) {
|
||||
const secs = Math.floor(elapsed / 1000);
|
||||
const updatedText = secs < 5 ? 'Updated just now' : `Updated ${secs}s ago`;
|
||||
if (lastUpdatedEl.textContent !== updatedText) {
|
||||
lastUpdatedEl.textContent = updatedText;
|
||||
}
|
||||
}
|
||||
}, 100));
|
||||
|
||||
updateSortIndicators();
|
||||
refreshLiveStats();
|
||||
}
|
||||
|
||||
function scheduleRowUpdate() {
|
||||
// Sort and filter immediately to prevent flicker
|
||||
doSort();
|
||||
filterTable();
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// STACKS BY HOST FILTER
|
||||
// ============================================================================
|
||||
|
||||
function sbhFilter() {
|
||||
const query = (document.getElementById('sbh-filter')?.value || '').toLowerCase();
|
||||
const hostFilter = document.getElementById('sbh-host-select')?.value || '';
|
||||
|
||||
document.querySelectorAll('.sbh-group').forEach(group => {
|
||||
if (hostFilter && group.dataset.h !== hostFilter) {
|
||||
group.hidden = true;
|
||||
return;
|
||||
}
|
||||
|
||||
let visibleCount = 0;
|
||||
group.querySelectorAll('li[data-s]').forEach(li => {
|
||||
const show = !query || li.dataset.s.includes(query);
|
||||
li.hidden = !show;
|
||||
if (show) visibleCount++;
|
||||
});
|
||||
group.hidden = visibleCount === 0;
|
||||
});
|
||||
}
|
||||
window.sbhFilter = sbhFilter;
|
||||
|
||||
@@ -13,8 +13,6 @@ from compose_farm.ssh_keys import get_ssh_auth_sock
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
# Environment variable to identify the web stack (for self-update detection)
|
||||
CF_WEB_STACK = os.environ.get("CF_WEB_STACK", "")
|
||||
|
||||
# ANSI escape codes for terminal output
|
||||
RED = "\x1b[31m"
|
||||
@@ -95,16 +93,17 @@ async def run_cli_streaming(
|
||||
tasks[task_id]["completed_at"] = time.time()
|
||||
|
||||
|
||||
def _is_self_update(stack: str, command: str) -> bool:
|
||||
def _is_self_update(config: Config, stack: str, command: str) -> bool:
|
||||
"""Check if this is a self-update (updating the web stack itself).
|
||||
|
||||
Self-updates need special handling because running 'down' on the container
|
||||
we're running in would kill the process before 'up' can execute.
|
||||
"""
|
||||
if not CF_WEB_STACK or stack != CF_WEB_STACK:
|
||||
web_stack = config.get_web_stack()
|
||||
if not web_stack or stack != web_stack:
|
||||
return False
|
||||
# Commands that involve 'down' need SSH: update, restart, down
|
||||
return command in ("update", "restart", "down")
|
||||
# Commands that involve 'down' need SSH: update, down
|
||||
return command in ("update", "down")
|
||||
|
||||
|
||||
async def _run_cli_via_ssh(
|
||||
@@ -114,7 +113,8 @@ async def _run_cli_via_ssh(
|
||||
) -> None:
|
||||
"""Run a cf CLI command via SSH for self-updates (survives container restart)."""
|
||||
try:
|
||||
host = config.get_host(CF_WEB_STACK)
|
||||
web_stack = config.get_web_stack()
|
||||
host = config.get_host(web_stack)
|
||||
cf_cmd = f"cf {' '.join(args)} --config={config.config_path}"
|
||||
# Include task_id to prevent collision with concurrent updates
|
||||
log_file = f"/tmp/cf-self-update-{task_id}.log" # noqa: S108
|
||||
@@ -170,7 +170,7 @@ async def run_compose_streaming(
|
||||
cli_args = [cli_cmd, stack, *extra_args]
|
||||
|
||||
# Use SSH for self-updates to survive container restart
|
||||
if _is_self_update(stack, cli_cmd):
|
||||
if _is_self_update(config, stack, cli_cmd):
|
||||
await _run_cli_via_ssh(config, cli_args, task_id)
|
||||
else:
|
||||
await run_cli_streaming(config, cli_args, task_id)
|
||||
|
||||
@@ -26,6 +26,23 @@
|
||||
</script>
|
||||
</head>
|
||||
<body class="min-h-screen bg-base-200">
|
||||
<svg style="display: none">
|
||||
<symbol id="icon-menu" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="12" cy="5" r="1" /><circle cx="12" cy="12" r="1" /><circle cx="12" cy="19" r="1" />
|
||||
</symbol>
|
||||
<symbol id="icon-restart" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15" />
|
||||
</symbol>
|
||||
<symbol id="icon-pull" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-4l-4 4m0 0l-4-4m4 4V4" />
|
||||
</symbol>
|
||||
<symbol id="icon-update" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
|
||||
</symbol>
|
||||
<symbol id="icon-logs" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M9 12h6m-6 4h6m2 5H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z" />
|
||||
</symbol>
|
||||
</svg>
|
||||
<div class="drawer lg:drawer-open">
|
||||
<input id="drawer-toggle" type="checkbox" class="drawer-toggle" />
|
||||
|
||||
@@ -80,6 +97,8 @@
|
||||
|
||||
<!-- Scripts - HTMX first -->
|
||||
<script src="https://unpkg.com/htmx.org@2.0.4" data-vendor="htmx.js"></script>
|
||||
<script src="https://unpkg.com/idiomorph/dist/idiomorph.min.js" data-vendor="idiomorph.js"></script>
|
||||
<script src="https://unpkg.com/idiomorph/dist/idiomorph-ext.min.js" data-vendor="idiomorph-ext.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.js" data-vendor="xterm.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.js" data-vendor="xterm-fit.js"></script>
|
||||
<script src="/static/app.js"></script>
|
||||
|
||||
97
src/compose_farm/web/templates/containers.html
Normal file
97
src/compose_farm/web/templates/containers.html
Normal file
@@ -0,0 +1,97 @@
|
||||
{% extends "base.html" %}
|
||||
{% from "partials/components.html" import page_header %}
|
||||
{% from "partials/icons.html" import refresh_cw %}
|
||||
{% block title %}Live Stats - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="max-w-7xl">
|
||||
{{ page_header("Live Stats", "All running containers across hosts") }}
|
||||
|
||||
{% if not glances_enabled %}
|
||||
<div class="alert alert-warning mb-6">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="stroke-current shrink-0 h-6 w-6" fill="none" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-3L13.732 4c-.77-1.333-2.694-1.333-3.464 0L3.34 16c-.77 1.333.192 3 1.732 3z" /></svg>
|
||||
<div>
|
||||
<h3 class="font-bold">Glances not configured</h3>
|
||||
<div class="text-xs">Add <code class="bg-base-300 px-1 rounded">glances_stack: glances</code> to your config and deploy Glances on all hosts.</div>
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
|
||||
<!-- Action Bar -->
|
||||
<div class="flex flex-wrap items-center gap-4 mb-6">
|
||||
<div class="tooltip" data-tip="Refresh now">
|
||||
<button class="btn btn-outline btn-sm" type="button" onclick="refreshLiveStats()">
|
||||
{{ refresh_cw() }} Refresh
|
||||
</button>
|
||||
</div>
|
||||
<div class="tooltip" data-tip="Click to toggle auto-refresh">
|
||||
<button class="btn btn-outline btn-sm font-mono w-20 justify-center"
|
||||
id="refresh-timer" onclick="toggleAutoRefresh()">↻</button>
|
||||
</div>
|
||||
<input type="text" id="filter-input" placeholder="Filter containers..."
|
||||
class="input input-bordered input-sm w-64" onkeyup="filterTable()">
|
||||
<select id="host-filter" class="select select-bordered select-sm" onchange="filterTable()">
|
||||
<option value="">All hosts</option>
|
||||
{% for host in hosts %}<option value="{{ host }}">{{ host }}</option>{% endfor %}
|
||||
</select>
|
||||
<span id="container-count" class="text-sm text-base-content/60"></span>
|
||||
<span id="last-updated" class="text-sm text-base-content/40 ml-auto"></span>
|
||||
</div>
|
||||
|
||||
<!-- Container Table -->
|
||||
<div class="card bg-base-100 shadow overflow-x-auto">
|
||||
<table class="table table-zebra table-sm w-full">
|
||||
<thead class="sticky top-0 bg-base-200">
|
||||
<tr>
|
||||
<th class="w-8">#</th>
|
||||
<th class="cursor-pointer" onclick="sortTable(1)">Stack<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer" onclick="sortTable(2)">Service<span class="sort-indicator"></span></th>
|
||||
<th></th>
|
||||
<th class="cursor-pointer" onclick="sortTable(4)">Host<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer" onclick="sortTable(5)">Image<span class="sort-indicator"></span></th>
|
||||
<th class="w-16">Update</th>
|
||||
<th class="cursor-pointer" onclick="sortTable(7)">Status<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer text-right" onclick="sortTable(8)">Uptime<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer text-right" onclick="sortTable(9)">CPU<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer text-right" onclick="sortTable(10)">Mem<span class="sort-indicator"></span></th>
|
||||
<th class="cursor-pointer text-right" onclick="sortTable(11)">Net I/O<span class="sort-indicator"></span></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="container-rows" data-hosts="{{ hosts | join(',') }}">
|
||||
{% for host in hosts %}
|
||||
<tr class="loading-row" data-host="{{ host }}">
|
||||
<td colspan="12" class="text-center py-2">
|
||||
<span class="loading loading-spinner loading-xs"></span>
|
||||
<span class="text-sm opacity-60">Loading {{ host }}...</span>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
<!-- Shared Action Menu -->
|
||||
<ul id="shared-action-menu" class="menu menu-sm bg-base-200 rounded-box shadow-lg w-36 absolute z-50 p-2 hidden">
|
||||
<li><a data-action="restart"><svg class="h-4 w-4"><use href="#icon-restart" /></svg>Restart</a></li>
|
||||
<li><a data-action="pull"><svg class="h-4 w-4"><use href="#icon-pull" /></svg>Pull</a></li>
|
||||
<li><a data-action="update"><svg class="h-4 w-4"><use href="#icon-update" /></svg>Update</a></li>
|
||||
<li><a data-action="logs"><svg class="h-4 w-4"><use href="#icon-logs" /></svg>Logs</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
{% if glances_enabled %}
|
||||
<style>
|
||||
.sort-indicator { display: inline-block; width: 1em; text-align: center; opacity: 0.5; }
|
||||
.high-usage { background-color: oklch(var(--er) / 0.15) !important; }
|
||||
/* Refresh animation */
|
||||
@keyframes row-pulse {
|
||||
0% { background-color: oklch(var(--p) / 0.2); }
|
||||
100% { background-color: transparent; }
|
||||
}
|
||||
.row-updated { animation: row-pulse 0.5s ease-out; }
|
||||
</style>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
@@ -18,7 +18,7 @@
|
||||
{{ action_btn("Apply", "/api/apply", "primary", "Make reality match config", check()) }}
|
||||
{{ action_btn("Refresh", "/api/refresh", "outline", "Update state from reality", refresh_cw()) }}
|
||||
{{ action_btn("Pull All", "/api/pull-all", "outline", "Pull latest images for all stacks", cloud_download()) }}
|
||||
{{ action_btn("Update All", "/api/update-all", "outline", "Update all stacks (pull + build + down + up)", rotate_cw()) }}
|
||||
{{ action_btn("Update All", "/api/update-all", "outline", "Update all stacks except web (only recreates if changed)", rotate_cw()) }}
|
||||
<div class="tooltip" data-tip="Save compose-farm.yaml config file"><button id="save-config-btn" class="btn btn-outline">{{ save() }} Save Config</button></div>
|
||||
</div>
|
||||
|
||||
@@ -53,6 +53,13 @@
|
||||
{% include "partials/stacks_by_host.html" %}
|
||||
</div>
|
||||
|
||||
<!-- Host Resources (Glances) -->
|
||||
<div id="glances-stats"
|
||||
hx-get="/api/glances"
|
||||
hx-trigger="load, cf:refresh from:body, every 30s"
|
||||
hx-swap="innerHTML">
|
||||
</div>
|
||||
|
||||
<!-- Hosts Configuration -->
|
||||
{% call collapse("Hosts (" ~ (hosts | length) ~ ")", icon=server()) %}
|
||||
{% call table() %}
|
||||
|
||||
66
src/compose_farm/web/templates/partials/glances.html
Normal file
66
src/compose_farm/web/templates/partials/glances.html
Normal file
@@ -0,0 +1,66 @@
|
||||
{# Glances resource stats display #}
|
||||
{% from "partials/icons.html" import cpu, memory_stick, gauge, server, activity, hard_drive, arrow_down_up, refresh_cw %}
|
||||
|
||||
{% macro progress_bar(percent, color="primary") %}
|
||||
<div class="flex items-center gap-2 min-w-32">
|
||||
<progress class="progress progress-{{ color }} flex-1" value="{{ percent }}" max="100"></progress>
|
||||
<span class="text-xs w-10 text-right">{{ "%.1f"|format(percent) }}%</span>
|
||||
</div>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro format_rate(bytes_per_sec) %}
|
||||
{%- if bytes_per_sec >= 1048576 -%}
|
||||
{{ "%.1f"|format(bytes_per_sec / 1048576) }} MB/s
|
||||
{%- elif bytes_per_sec >= 1024 -%}
|
||||
{{ "%.1f"|format(bytes_per_sec / 1024) }} KB/s
|
||||
{%- else -%}
|
||||
{{ "%.0f"|format(bytes_per_sec) }} B/s
|
||||
{%- endif -%}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro host_row(host_stats) %}
|
||||
<tr>
|
||||
<td class="font-medium">{{ server(14) }} {{ host_stats.host }}</td>
|
||||
{% if host_stats.error %}
|
||||
<td colspan="5" class="text-error text-xs">{{ host_stats.error }}</td>
|
||||
{% else %}
|
||||
<td>{{ progress_bar(host_stats.cpu_percent, "info") }}</td>
|
||||
<td>{{ progress_bar(host_stats.mem_percent, "success") }}</td>
|
||||
<td>{{ progress_bar(host_stats.disk_percent, "warning") }}</td>
|
||||
<td class="text-xs font-mono">↓{{ format_rate(host_stats.net_rx_rate) }} ↑{{ format_rate(host_stats.net_tx_rate) }}</td>
|
||||
<td class="text-sm">{{ "%.1f"|format(host_stats.load) }}</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
{% endmacro %}
|
||||
|
||||
<div class="card bg-base-100 shadow mt-4 mb-4">
|
||||
<div class="card-body p-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<h2 class="card-title text-base gap-2">{{ activity(18) }} Host Resources</h2>
|
||||
<button class="btn btn-ghost btn-xs opacity-50 hover:opacity-100"
|
||||
hx-get="/api/glances" hx-target="#glances-stats" hx-swap="innerHTML"
|
||||
title="Refresh">
|
||||
{{ refresh_cw(14) }}
|
||||
</button>
|
||||
</div>
|
||||
<div class="overflow-x-auto">
|
||||
<table class="table table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Host</th>
|
||||
<th>{{ cpu(14) }} CPU</th>
|
||||
<th>{{ memory_stick(14) }} Memory</th>
|
||||
<th>{{ hard_drive(14) }} Disk</th>
|
||||
<th>{{ arrow_down_up(14) }} Net</th>
|
||||
<th>{{ gauge(14) }} Load</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for host_name, host_stats in stats.items() %}
|
||||
{{ host_row(host_stats) }}
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -159,6 +159,12 @@
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro x(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M18 6 6 18"/><path d="m6 6 12 12"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro alert_triangle(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="m21.73 18-8-14a2 2 0 0 0-3.48 0l-8 14A2 2 0 0 0 4 21h16a2 2 0 0 0 1.73-3"/><path d="M12 9v4"/><path d="M12 17h.01"/>
|
||||
@@ -176,3 +182,46 @@
|
||||
<path d="M15 3h6v6"/><path d="M10 14 21 3"/><path d="M18 13v6a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h6"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{# Resource monitoring icons #}
|
||||
{% macro cpu(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect width="16" height="16" x="4" y="4" rx="2"/><rect width="6" height="6" x="9" y="9" rx="1"/><path d="M15 2v2"/><path d="M15 20v2"/><path d="M2 15h2"/><path d="M2 9h2"/><path d="M20 15h2"/><path d="M20 9h2"/><path d="M9 2v2"/><path d="M9 20v2"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro memory_stick(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M6 19v-3"/><path d="M10 19v-3"/><path d="M14 19v-3"/><path d="M18 19v-3"/><path d="M8 11V9"/><path d="M16 11V9"/><path d="M12 11V9"/><path d="M2 15h20"/><path d="M2 7a2 2 0 0 1 2-2h16a2 2 0 0 1 2 2v1.1a2 2 0 0 0 0 3.837V17a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2v-5.1a2 2 0 0 0 0-3.837z"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro gauge(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="m12 14 4-4"/><path d="M3.34 19a10 10 0 1 1 17.32 0"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro activity(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M22 12h-2.48a2 2 0 0 0-1.93 1.46l-2.35 8.36a.25.25 0 0 1-.48 0L9.24 2.18a.25.25 0 0 0-.48 0l-2.35 8.36A2 2 0 0 1 4.49 12H2"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro arrow_down_up(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="m3 16 4 4 4-4"/><path d="M7 20V4"/><path d="m21 8-4-4-4 4"/><path d="M17 4v16"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro hard_drive(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<line x1="22" x2="2" y1="12" y2="12"/><path d="M5.45 5.11 2 12v6a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2v-6l-3.45-6.89A2 2 0 0 0 16.76 4H7.24a2 2 0 0 0-1.79 1.11z"/><line x1="6" x2="6.01" y1="16" y2="16"/><line x1="10" x2="10.01" y1="16" y2="16"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
{% macro box(size=16) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="{{ size }}" height="{{ size }}" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M21 8a2 2 0 0 0-1-1.73l-7-4a2 2 0 0 0-2 0l-7 4A2 2 0 0 0 3 8v8a2 2 0 0 0 1 1.73l7 4a2 2 0 0 0 2 0l7-4A2 2 0 0 0 21 16Z"/><path d="m3.3 7 8.7 5 8.7-5"/><path d="M12 22V12"/>
|
||||
</svg>
|
||||
{% endmacro %}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{% from "partials/icons.html" import home, search, terminal %}
|
||||
{% from "partials/icons.html" import home, search, terminal, box, x %}
|
||||
<!-- Navigation Links -->
|
||||
<div class="mb-4">
|
||||
<ul class="menu" hx-boost="true" hx-target="#main-content" hx-select="#main-content" hx-swap="outerHTML">
|
||||
<li><a href="/" class="font-semibold">{{ home() }} Dashboard</a></li>
|
||||
<li><a href="/live-stats" class="font-semibold">{{ box() }} Live Stats</a></li>
|
||||
<li><a href="/console" class="font-semibold">{{ terminal() }} Console</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -12,7 +13,7 @@
|
||||
<h4 class="text-xs uppercase tracking-wide text-base-content/60 px-3 py-1">Stacks <span class="opacity-50" id="sidebar-count">({{ stacks | length }})</span></h4>
|
||||
<div class="px-2 mb-2 flex flex-col gap-1">
|
||||
<label class="input input-xs flex items-center gap-2 bg-base-200">
|
||||
{{ search(14) }}<input type="text" id="sidebar-filter" placeholder="Filter..." onkeyup="sidebarFilter()" />
|
||||
{{ search(14) }}<input type="text" id="sidebar-filter" placeholder="Filter..." oninput="sidebarFilter()" /><button type="button" id="sidebar-filter-clear" class="hidden opacity-50 hover:opacity-100 cursor-pointer" onclick="clearSidebarFilter()">{{ x(12) }}</button>
|
||||
</label>
|
||||
<select id="sidebar-host-select" class="select select-xs bg-base-200 w-full" onchange="sidebarFilter()">
|
||||
<option value="">All hosts</option>
|
||||
|
||||
@@ -20,20 +20,4 @@
|
||||
{% else %}
|
||||
<p class="text-base-content/60 italic">No stacks currently running.</p>
|
||||
{% endfor %}
|
||||
<script>
|
||||
function sbhFilter() {
|
||||
const q = (document.getElementById('sbh-filter')?.value || '').toLowerCase();
|
||||
const h = document.getElementById('sbh-host-select')?.value || '';
|
||||
document.querySelectorAll('.sbh-group').forEach(g => {
|
||||
if (h && g.dataset.h !== h) { g.hidden = true; return; }
|
||||
let n = 0;
|
||||
g.querySelectorAll('li[data-s]').forEach(li => {
|
||||
const show = !q || li.dataset.s.includes(q);
|
||||
li.hidden = !show;
|
||||
if (show) n++;
|
||||
});
|
||||
g.hidden = !n;
|
||||
});
|
||||
}
|
||||
</script>
|
||||
{% endcall %}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
{% block title %}{{ name }} - Compose Farm{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="max-w-5xl" data-services="{{ services | join(',') }}" data-containers='{{ containers | tojson }}' data-website-urls='{{ website_urls | tojson }}'>
|
||||
<div class="max-w-5xl" data-stack-name="{{ name }}" data-services="{{ services | join(',') }}" data-containers='{{ containers | tojson }}' data-website-urls='{{ website_urls | tojson }}'>
|
||||
<div class="mb-6">
|
||||
<h1 class="text-3xl font-bold rainbow-hover">{{ name }}</h1>
|
||||
<div class="flex flex-wrap items-center gap-2 mt-2">
|
||||
@@ -22,8 +22,8 @@
|
||||
<!-- Lifecycle -->
|
||||
{{ action_btn("Up", "/api/stack/" ~ name ~ "/up", "primary", "Start stack (docker compose up -d)", play()) }}
|
||||
{{ action_btn("Down", "/api/stack/" ~ name ~ "/down", "outline", "Stop stack (docker compose down)", square()) }}
|
||||
{{ action_btn("Restart", "/api/stack/" ~ name ~ "/restart", "secondary", "Restart stack (down + up)", rotate_cw()) }}
|
||||
{{ action_btn("Update", "/api/stack/" ~ name ~ "/update", "accent", "Update to latest (pull + build + down + up)", download()) }}
|
||||
{{ action_btn("Restart", "/api/stack/" ~ name ~ "/restart", "secondary", "Restart running containers", rotate_cw()) }}
|
||||
{{ action_btn("Update", "/api/stack/" ~ name ~ "/update", "accent", "Update to latest (only recreates if changed)", download()) }}
|
||||
|
||||
<div class="divider divider-horizontal mx-0"></div>
|
||||
|
||||
|
||||
122
src/compose_farm/web/vendor-assets.json
Normal file
122
src/compose_farm/web/vendor-assets.json
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$comment": "CDN assets vendored into production builds and cached for tests",
|
||||
"assets": [
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/daisyui@5",
|
||||
"filename": "daisyui.css",
|
||||
"content_type": "text/css",
|
||||
"package": "daisyui"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/daisyui@5/themes.css",
|
||||
"filename": "daisyui-themes.css",
|
||||
"content_type": "text/css",
|
||||
"package": "daisyui"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/@tailwindcss/browser@4",
|
||||
"filename": "tailwind.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "tailwindcss"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/css/xterm.css",
|
||||
"filename": "xterm.css",
|
||||
"content_type": "text/css",
|
||||
"package": "xterm"
|
||||
},
|
||||
{
|
||||
"url": "https://unpkg.com/htmx.org@2.0.4",
|
||||
"filename": "htmx.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "htmx"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.js",
|
||||
"filename": "xterm.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "xterm"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.js",
|
||||
"filename": "xterm-fit.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "xterm"
|
||||
},
|
||||
{
|
||||
"url": "https://unpkg.com/idiomorph/dist/idiomorph.min.js",
|
||||
"filename": "idiomorph.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "idiomorph"
|
||||
},
|
||||
{
|
||||
"url": "https://unpkg.com/idiomorph/dist/idiomorph-ext.min.js",
|
||||
"filename": "idiomorph-ext.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "idiomorph"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/loader.js",
|
||||
"filename": "monaco/loader.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "monaco-editor"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.js",
|
||||
"filename": "monaco/editor/editor.main.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "monaco-editor"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/editor/editor.main.css",
|
||||
"filename": "monaco/editor/editor.main.css",
|
||||
"content_type": "text/css",
|
||||
"package": "monaco-editor"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/worker/workerMain.js",
|
||||
"filename": "monaco/base/worker/workerMain.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "monaco-editor"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/basic-languages/yaml/yaml.js",
|
||||
"filename": "monaco/basic-languages/yaml/yaml.js",
|
||||
"content_type": "application/javascript",
|
||||
"package": "monaco-editor"
|
||||
},
|
||||
{
|
||||
"url": "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.2/min/vs/base/browser/ui/codicons/codicon/codicon.ttf",
|
||||
"filename": "monaco/base/browser/ui/codicons/codicon/codicon.ttf",
|
||||
"content_type": "font/ttf",
|
||||
"package": "monaco-editor"
|
||||
}
|
||||
],
|
||||
"licenses": {
|
||||
"htmx": {
|
||||
"type": "MIT",
|
||||
"url": "https://raw.githubusercontent.com/bigskysoftware/htmx/master/LICENSE"
|
||||
},
|
||||
"idiomorph": {
|
||||
"type": "BSD-2-Clause",
|
||||
"url": "https://raw.githubusercontent.com/bigskysoftware/idiomorph/main/LICENSE"
|
||||
},
|
||||
"xterm": {
|
||||
"type": "MIT",
|
||||
"url": "https://raw.githubusercontent.com/xtermjs/xterm.js/master/LICENSE"
|
||||
},
|
||||
"daisyui": {
|
||||
"type": "MIT",
|
||||
"url": "https://raw.githubusercontent.com/saadeghi/daisyui/master/LICENSE"
|
||||
},
|
||||
"tailwindcss": {
|
||||
"type": "MIT",
|
||||
"url": "https://raw.githubusercontent.com/tailwindlabs/tailwindcss/master/LICENSE"
|
||||
},
|
||||
"monaco-editor": {
|
||||
"type": "MIT",
|
||||
"url": "https://raw.githubusercontent.com/microsoft/monaco-editor/main/LICENSE.txt"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,8 +18,8 @@ from typing import TYPE_CHECKING, Any
|
||||
import asyncssh
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
|
||||
from compose_farm.executor import is_local, ssh_connect_kwargs
|
||||
from compose_farm.web.deps import get_config
|
||||
from compose_farm.executor import ssh_connect_kwargs
|
||||
from compose_farm.web.deps import get_config, is_local_host
|
||||
from compose_farm.web.streaming import CRLF, DIM, GREEN, RED, RESET, tasks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -188,7 +188,7 @@ async def _run_exec_session(
|
||||
await websocket.send_text(f"{RED}Host '{host_name}' not found{RESET}{CRLF}")
|
||||
return
|
||||
|
||||
if is_local(host):
|
||||
if is_local_host(host_name, host, config):
|
||||
# Local: use argv list (no shell interpretation)
|
||||
argv = ["docker", "exec", "-it", container, "/bin/sh", "-c", SHELL_FALLBACK]
|
||||
await _run_local_exec(websocket, argv)
|
||||
@@ -239,7 +239,7 @@ async def _run_shell_session(
|
||||
# Start interactive shell in home directory
|
||||
shell_cmd = "cd ~ && exec bash -i || exec sh -i"
|
||||
|
||||
if is_local(host):
|
||||
if is_local_host(host_name, host, config):
|
||||
# Local: use argv list with shell -c to interpret the command
|
||||
argv = ["/bin/sh", "-c", shell_cmd]
|
||||
await _run_local_exec(websocket, argv)
|
||||
|
||||
168
tests/test_cli_monitoring.py
Normal file
168
tests/test_cli_monitoring.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""Tests for CLI monitoring commands (stats)."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import typer
|
||||
|
||||
from compose_farm.cli.monitoring import _build_summary_table, stats
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.glances import ContainerStats
|
||||
|
||||
|
||||
def _make_config(tmp_path: Path, glances_stack: str | None = None) -> Config:
|
||||
"""Create a minimal config for testing."""
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
|
||||
return Config(
|
||||
compose_dir=tmp_path / "compose",
|
||||
hosts={"host1": Host(address="localhost")},
|
||||
stacks={"svc1": "host1"},
|
||||
config_path=config_path,
|
||||
glances_stack=glances_stack,
|
||||
)
|
||||
|
||||
|
||||
class TestStatsCommand:
|
||||
"""Tests for the stats command."""
|
||||
|
||||
def test_stats_containers_requires_glances_config(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--containers fails if glances_stack is not configured."""
|
||||
cfg = _make_config(tmp_path, glances_stack=None)
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
pytest.raises(typer.Exit) as exc_info,
|
||||
):
|
||||
stats(live=False, containers=True, host=None, config=None)
|
||||
|
||||
assert exc_info.value.exit_code == 1
|
||||
captured = capsys.readouterr()
|
||||
assert "Glances not configured" in captured.err
|
||||
|
||||
def test_stats_containers_success(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--containers fetches and displays container stats."""
|
||||
cfg = _make_config(tmp_path, glances_stack="glances")
|
||||
|
||||
mock_containers = [
|
||||
ContainerStats(
|
||||
name="nginx",
|
||||
host="host1",
|
||||
status="running",
|
||||
image="nginx:latest",
|
||||
cpu_percent=10.5,
|
||||
memory_usage=100 * 1024 * 1024,
|
||||
memory_limit=1024 * 1024 * 1024,
|
||||
memory_percent=10.0,
|
||||
network_rx=1000,
|
||||
network_tx=2000,
|
||||
uptime="1h",
|
||||
ports="80->80",
|
||||
engine="docker",
|
||||
stack="web",
|
||||
service="nginx",
|
||||
)
|
||||
]
|
||||
|
||||
async def mock_fetch_async(
|
||||
cfg: Config, hosts: list[str] | None = None
|
||||
) -> list[ContainerStats]:
|
||||
return mock_containers
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.glances.fetch_all_container_stats", side_effect=mock_fetch_async
|
||||
) as mock_fetch,
|
||||
):
|
||||
stats(live=False, containers=True, host=None, config=None)
|
||||
|
||||
mock_fetch.assert_called_once_with(cfg, hosts=None)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
# Verify table output
|
||||
assert "nginx" in captured.out
|
||||
assert "host1" in captured.out
|
||||
assert "runni" in captured.out
|
||||
assert "10.5%" in captured.out
|
||||
|
||||
def test_stats_containers_empty(
|
||||
self, tmp_path: Path, capsys: pytest.CaptureFixture[str]
|
||||
) -> None:
|
||||
"""--containers handles empty result gracefully."""
|
||||
cfg = _make_config(tmp_path, glances_stack="glances")
|
||||
|
||||
async def mock_fetch_empty(
|
||||
cfg: Config, hosts: list[str] | None = None
|
||||
) -> list[ContainerStats]:
|
||||
return []
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch("compose_farm.glances.fetch_all_container_stats", side_effect=mock_fetch_empty),
|
||||
):
|
||||
with pytest.raises(typer.Exit) as exc_info:
|
||||
stats(live=False, containers=True, host=None, config=None)
|
||||
|
||||
assert exc_info.value.exit_code == 0
|
||||
|
||||
captured = capsys.readouterr()
|
||||
assert "No containers found" in captured.err
|
||||
|
||||
def test_stats_containers_host_filter(self, tmp_path: Path) -> None:
|
||||
"""--host limits container queries in --containers mode."""
|
||||
cfg = _make_config(tmp_path, glances_stack="glances")
|
||||
|
||||
async def mock_fetch_async(
|
||||
cfg: Config, hosts: list[str] | None = None
|
||||
) -> list[ContainerStats]:
|
||||
return []
|
||||
|
||||
with (
|
||||
patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg),
|
||||
patch(
|
||||
"compose_farm.glances.fetch_all_container_stats", side_effect=mock_fetch_async
|
||||
) as mock_fetch,
|
||||
pytest.raises(typer.Exit),
|
||||
):
|
||||
stats(live=False, containers=True, host="host1", config=None)
|
||||
|
||||
mock_fetch.assert_called_once_with(cfg, hosts=["host1"])
|
||||
|
||||
def test_stats_summary_respects_host_filter(self, tmp_path: Path) -> None:
|
||||
"""--host filters summary counts to the selected host."""
|
||||
compose_dir = tmp_path / "compose"
|
||||
for name in ("svc1", "svc2", "svc3"):
|
||||
stack_dir = compose_dir / name
|
||||
stack_dir.mkdir(parents=True)
|
||||
(stack_dir / "compose.yaml").write_text("services: {}\n")
|
||||
|
||||
config_path = tmp_path / "compose-farm.yaml"
|
||||
config_path.write_text("")
|
||||
|
||||
cfg = Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={
|
||||
"host1": Host(address="localhost"),
|
||||
"host2": Host(address="127.0.0.2"),
|
||||
},
|
||||
stacks={"svc1": "host1", "svc2": "host2", "svc3": "host1"},
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
state: dict[str, str | list[str]] = {"svc1": "host1", "svc2": "host2"}
|
||||
table = _build_summary_table(cfg, state, pending=[], host_filter="host1")
|
||||
labels = table.columns[0]._cells
|
||||
values = table.columns[1]._cells
|
||||
summary = dict(zip(labels, values, strict=True))
|
||||
|
||||
assert summary["Total hosts"] == "1"
|
||||
assert summary["Stacks (configured)"] == "2"
|
||||
assert summary["Stacks (tracked)"] == "1"
|
||||
assert summary["Compose files on disk"] == "2"
|
||||
@@ -11,9 +11,7 @@ import time
|
||||
import pytest
|
||||
|
||||
# Thresholds in seconds, per OS
|
||||
if sys.platform == "win32":
|
||||
CLI_STARTUP_THRESHOLD = 2.0
|
||||
elif sys.platform == "darwin":
|
||||
if sys.platform == "darwin":
|
||||
CLI_STARTUP_THRESHOLD = 0.35
|
||||
else: # Linux
|
||||
CLI_STARTUP_THRESHOLD = 0.25
|
||||
|
||||
@@ -78,6 +78,76 @@ class TestConfig:
|
||||
# Defaults to compose.yaml when no file exists
|
||||
assert path == Path("/opt/compose/plex/compose.yaml")
|
||||
|
||||
def test_get_web_stack_returns_env_var(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""get_web_stack returns CF_WEB_STACK env var."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert config.get_web_stack() == "compose-farm"
|
||||
|
||||
def test_get_web_stack_returns_empty_when_not_set(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""get_web_stack returns empty string when env var not set."""
|
||||
monkeypatch.delenv("CF_WEB_STACK", raising=False)
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert config.get_web_stack() == ""
|
||||
|
||||
def test_get_local_host_from_web_stack_returns_host(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""get_local_host_from_web_stack returns the web stack host in container."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6"), "nuc": Host(address="192.168.1.2")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert config.get_local_host_from_web_stack() == "nas"
|
||||
|
||||
def test_get_local_host_from_web_stack_returns_none_outside_container(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""get_local_host_from_web_stack returns None when not in container."""
|
||||
monkeypatch.delenv("CF_WEB_STACK", raising=False)
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert config.get_local_host_from_web_stack() is None
|
||||
|
||||
def test_get_local_host_from_web_stack_returns_none_for_unknown_stack(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""get_local_host_from_web_stack returns None if web stack not in stacks."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "unknown-stack")
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"plex": "nas"},
|
||||
)
|
||||
assert config.get_local_host_from_web_stack() is None
|
||||
|
||||
def test_get_local_host_from_web_stack_returns_none_for_multi_host(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""get_local_host_from_web_stack returns None if web stack runs on multiple hosts."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6"), "nuc": Host(address="192.168.1.2")},
|
||||
stacks={"compose-farm": ["nas", "nuc"]},
|
||||
)
|
||||
assert config.get_local_host_from_web_stack() is None
|
||||
|
||||
|
||||
class TestLoadConfig:
|
||||
"""Tests for load_config function."""
|
||||
|
||||
@@ -9,10 +9,12 @@ from typer.testing import CliRunner
|
||||
|
||||
from compose_farm.cli import app
|
||||
from compose_farm.cli.config import (
|
||||
_detect_domain,
|
||||
_generate_template,
|
||||
_get_config_file,
|
||||
_get_editor,
|
||||
)
|
||||
from compose_farm.config import Config, Host
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -228,3 +230,138 @@ class TestConfigValidate:
|
||||
# Error goes to stderr
|
||||
output = result.stdout + (result.stderr or "")
|
||||
assert "Config file not found" in output or "not found" in output.lower()
|
||||
|
||||
|
||||
class TestDetectDomain:
|
||||
"""Tests for _detect_domain function."""
|
||||
|
||||
def test_returns_none_for_empty_stacks(self) -> None:
|
||||
cfg = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={},
|
||||
)
|
||||
result = _detect_domain(cfg)
|
||||
assert result is None
|
||||
|
||||
def test_skips_local_domains(self, tmp_path: Path) -> None:
|
||||
# Create a minimal compose file with .local domain
|
||||
stack_dir = tmp_path / "test"
|
||||
stack_dir.mkdir()
|
||||
compose = stack_dir / "compose.yaml"
|
||||
compose.write_text(
|
||||
"""
|
||||
name: test
|
||||
services:
|
||||
web:
|
||||
image: nginx
|
||||
labels:
|
||||
- "traefik.http.routers.test-local.rule=Host(`test.local`)"
|
||||
"""
|
||||
)
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"test": "nas"},
|
||||
)
|
||||
result = _detect_domain(cfg)
|
||||
# .local should be skipped
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestConfigInitEnv:
|
||||
"""Tests for cf config init-env command."""
|
||||
|
||||
def test_init_env_creates_file(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
env_file = tmp_path / ".env"
|
||||
|
||||
result = runner.invoke(
|
||||
app, ["config", "init-env", "-p", str(config_file), "-o", str(env_file)]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert env_file.exists()
|
||||
content = env_file.read_text()
|
||||
assert "CF_COMPOSE_DIR=/opt/compose" in content
|
||||
assert "CF_UID=" in content
|
||||
assert "CF_GID=" in content
|
||||
|
||||
def test_init_env_force_overwrites(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
env_file = tmp_path / ".env"
|
||||
env_file.write_text("OLD_CONTENT=true")
|
||||
|
||||
result = runner.invoke(
|
||||
app, ["config", "init-env", "-p", str(config_file), "-o", str(env_file), "-f"]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
content = env_file.read_text()
|
||||
assert "OLD_CONTENT" not in content
|
||||
assert "CF_COMPOSE_DIR" in content
|
||||
|
||||
def test_init_env_prompts_on_existing(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_file = tmp_path / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
env_file = tmp_path / ".env"
|
||||
env_file.write_text("KEEP_THIS=true")
|
||||
|
||||
result = runner.invoke(
|
||||
app,
|
||||
["config", "init-env", "-p", str(config_file), "-o", str(env_file)],
|
||||
input="n\n",
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Aborted" in result.stdout
|
||||
assert env_file.read_text() == "KEEP_THIS=true"
|
||||
|
||||
def test_init_env_defaults_to_current_dir(
|
||||
self,
|
||||
runner: CliRunner,
|
||||
tmp_path: Path,
|
||||
valid_config_data: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.delenv("CF_CONFIG", raising=False)
|
||||
config_dir = tmp_path / "config"
|
||||
config_dir.mkdir()
|
||||
config_file = config_dir / "compose-farm.yaml"
|
||||
config_file.write_text(yaml.dump(valid_config_data))
|
||||
|
||||
# Create a separate working directory
|
||||
work_dir = tmp_path / "workdir"
|
||||
work_dir.mkdir()
|
||||
monkeypatch.chdir(work_dir)
|
||||
|
||||
result = runner.invoke(app, ["config", "init-env", "-p", str(config_file)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# Should create .env in current directory, not config directory
|
||||
env_file = work_dir / ".env"
|
||||
assert env_file.exists()
|
||||
assert not (config_dir / ".env").exists()
|
||||
|
||||
268
tests/test_containers.py
Normal file
268
tests/test_containers.py
Normal file
@@ -0,0 +1,268 @@
|
||||
"""Tests for Containers page routes."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.glances import ContainerStats, format_bytes
|
||||
from compose_farm.web.app import create_app
|
||||
from compose_farm.web.routes.containers import (
|
||||
_infer_stack_service,
|
||||
_parse_image,
|
||||
_parse_uptime_seconds,
|
||||
)
|
||||
|
||||
# Byte size constants for tests
|
||||
KB = 1024
|
||||
MB = KB * 1024
|
||||
GB = MB * 1024
|
||||
|
||||
|
||||
class TestFormatBytes:
|
||||
"""Tests for format_bytes function (uses humanize library)."""
|
||||
|
||||
def test_bytes(self) -> None:
|
||||
assert format_bytes(500) == "500 Bytes"
|
||||
assert format_bytes(0) == "0 Bytes"
|
||||
|
||||
def test_kilobytes(self) -> None:
|
||||
assert format_bytes(KB) == "1.0 KiB"
|
||||
assert format_bytes(KB * 5) == "5.0 KiB"
|
||||
assert format_bytes(KB + 512) == "1.5 KiB"
|
||||
|
||||
def test_megabytes(self) -> None:
|
||||
assert format_bytes(MB) == "1.0 MiB"
|
||||
assert format_bytes(MB * 100) == "100.0 MiB"
|
||||
assert format_bytes(MB * 512) == "512.0 MiB"
|
||||
|
||||
def test_gigabytes(self) -> None:
|
||||
assert format_bytes(GB) == "1.0 GiB"
|
||||
assert format_bytes(GB * 2) == "2.0 GiB"
|
||||
|
||||
|
||||
class TestParseImage:
|
||||
"""Tests for _parse_image function."""
|
||||
|
||||
def test_simple_image_with_tag(self) -> None:
|
||||
assert _parse_image("nginx:latest") == ("nginx", "latest")
|
||||
assert _parse_image("redis:7") == ("redis", "7")
|
||||
|
||||
def test_image_without_tag(self) -> None:
|
||||
assert _parse_image("nginx") == ("nginx", "latest")
|
||||
|
||||
def test_registry_image(self) -> None:
|
||||
assert _parse_image("ghcr.io/user/repo:v1.0") == ("ghcr.io/user/repo", "v1.0")
|
||||
assert _parse_image("docker.io/library/nginx:alpine") == (
|
||||
"docker.io/library/nginx",
|
||||
"alpine",
|
||||
)
|
||||
|
||||
def test_image_with_port_in_registry(self) -> None:
|
||||
# Registry with port should not be confused with tag
|
||||
assert _parse_image("localhost:5000/myimage") == ("localhost:5000/myimage", "latest")
|
||||
|
||||
|
||||
class TestParseUptimeSeconds:
|
||||
"""Tests for _parse_uptime_seconds function."""
|
||||
|
||||
def test_seconds(self) -> None:
|
||||
assert _parse_uptime_seconds("17 seconds") == 17
|
||||
assert _parse_uptime_seconds("1 second") == 1
|
||||
|
||||
def test_minutes(self) -> None:
|
||||
assert _parse_uptime_seconds("5 minutes") == 300
|
||||
assert _parse_uptime_seconds("1 minute") == 60
|
||||
|
||||
def test_hours(self) -> None:
|
||||
assert _parse_uptime_seconds("2 hours") == 7200
|
||||
assert _parse_uptime_seconds("an hour") == 3600
|
||||
assert _parse_uptime_seconds("1 hour") == 3600
|
||||
|
||||
def test_days(self) -> None:
|
||||
assert _parse_uptime_seconds("3 days") == 259200
|
||||
assert _parse_uptime_seconds("a day") == 86400
|
||||
|
||||
def test_empty(self) -> None:
|
||||
assert _parse_uptime_seconds("") == 0
|
||||
assert _parse_uptime_seconds("-") == 0
|
||||
|
||||
|
||||
class TestInferStackService:
|
||||
"""Tests for _infer_stack_service function."""
|
||||
|
||||
def test_underscore_separator(self) -> None:
|
||||
assert _infer_stack_service("mystack_web_1") == ("mystack", "web")
|
||||
assert _infer_stack_service("app_db_1") == ("app", "db")
|
||||
|
||||
def test_hyphen_separator(self) -> None:
|
||||
assert _infer_stack_service("mystack-web-1") == ("mystack", "web")
|
||||
assert _infer_stack_service("compose-farm-api-1") == ("compose", "farm-api")
|
||||
|
||||
def test_simple_name(self) -> None:
|
||||
# No separator - use name for both
|
||||
assert _infer_stack_service("nginx") == ("nginx", "nginx")
|
||||
assert _infer_stack_service("traefik") == ("traefik", "traefik")
|
||||
|
||||
def test_single_part_with_separator(self) -> None:
|
||||
# Edge case: separator with empty second part
|
||||
assert _infer_stack_service("single_") == ("single", "")
|
||||
|
||||
|
||||
class TestContainersPage:
|
||||
"""Tests for containers page endpoint."""
|
||||
|
||||
@pytest.fixture
|
||||
def client(self) -> TestClient:
|
||||
app = create_app()
|
||||
return TestClient(app)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config(self) -> Config:
|
||||
return Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={
|
||||
"nas": Host(address="192.168.1.6"),
|
||||
"nuc": Host(address="192.168.1.2"),
|
||||
},
|
||||
stacks={"test": "nas"},
|
||||
glances_stack="glances",
|
||||
)
|
||||
|
||||
def test_containers_page_without_glances(self, client: TestClient) -> None:
|
||||
"""Test containers page shows warning when Glances not configured."""
|
||||
with patch("compose_farm.web.routes.containers.get_config") as mock:
|
||||
mock.return_value = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"test": "nas"},
|
||||
glances_stack=None,
|
||||
)
|
||||
response = client.get("/live-stats")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "Glances not configured" in response.text
|
||||
|
||||
def test_containers_page_with_glances(self, client: TestClient, mock_config: Config) -> None:
|
||||
"""Test containers page loads when Glances is configured."""
|
||||
with patch("compose_farm.web.routes.containers.get_config") as mock:
|
||||
mock.return_value = mock_config
|
||||
response = client.get("/live-stats")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "Live Stats" in response.text
|
||||
assert "container-rows" in response.text
|
||||
|
||||
|
||||
class TestContainersRowsAPI:
|
||||
"""Tests for containers rows HTML endpoint."""
|
||||
|
||||
@pytest.fixture
|
||||
def client(self) -> TestClient:
|
||||
app = create_app()
|
||||
return TestClient(app)
|
||||
|
||||
def test_rows_without_glances(self, client: TestClient) -> None:
|
||||
"""Test rows endpoint returns error when Glances not configured."""
|
||||
with patch("compose_farm.web.routes.containers.get_config") as mock:
|
||||
mock.return_value = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"test": "nas"},
|
||||
glances_stack=None,
|
||||
)
|
||||
response = client.get("/api/containers/rows")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "Glances not configured" in response.text
|
||||
|
||||
def test_rows_returns_html(self, client: TestClient) -> None:
|
||||
"""Test rows endpoint returns HTML table rows."""
|
||||
mock_containers = [
|
||||
ContainerStats(
|
||||
name="nginx",
|
||||
host="nas",
|
||||
status="running",
|
||||
image="nginx:latest",
|
||||
cpu_percent=5.5,
|
||||
memory_usage=104857600,
|
||||
memory_limit=1073741824,
|
||||
memory_percent=9.77,
|
||||
network_rx=1000,
|
||||
network_tx=500,
|
||||
uptime="2 hours",
|
||||
ports="80->80/tcp",
|
||||
engine="docker",
|
||||
stack="web",
|
||||
service="nginx",
|
||||
),
|
||||
]
|
||||
|
||||
with (
|
||||
patch("compose_farm.web.routes.containers.get_config") as mock_config,
|
||||
patch(
|
||||
"compose_farm.web.routes.containers.fetch_all_container_stats",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_fetch,
|
||||
):
|
||||
mock_config.return_value = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"test": "nas"},
|
||||
glances_stack="glances",
|
||||
)
|
||||
mock_fetch.return_value = mock_containers
|
||||
|
||||
response = client.get("/api/containers/rows")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "<tr " in response.text # <tr id="..."> has attributes
|
||||
assert "nginx" in response.text
|
||||
assert "running" in response.text
|
||||
|
||||
def test_rows_have_data_sort_attributes(self, client: TestClient) -> None:
|
||||
"""Test rows have data-sort attributes for client-side sorting."""
|
||||
mock_containers = [
|
||||
ContainerStats(
|
||||
name="alpha",
|
||||
host="nas",
|
||||
status="running",
|
||||
image="nginx:latest",
|
||||
cpu_percent=10.0,
|
||||
memory_usage=100,
|
||||
memory_limit=1000,
|
||||
memory_percent=10.0,
|
||||
network_rx=100,
|
||||
network_tx=100,
|
||||
uptime="1 hour",
|
||||
ports="",
|
||||
engine="docker",
|
||||
stack="alpha",
|
||||
service="web",
|
||||
),
|
||||
]
|
||||
|
||||
with (
|
||||
patch("compose_farm.web.routes.containers.get_config") as mock_config,
|
||||
patch(
|
||||
"compose_farm.web.routes.containers.fetch_all_container_stats",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_fetch,
|
||||
):
|
||||
mock_config.return_value = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={"nas": Host(address="192.168.1.6")},
|
||||
stacks={"test": "nas"},
|
||||
glances_stack="glances",
|
||||
)
|
||||
mock_fetch.return_value = mock_containers
|
||||
|
||||
response = client.get("/api/containers/rows")
|
||||
assert response.status_code == 200
|
||||
# Check that cells have data-sort attributes
|
||||
assert 'data-sort="alpha"' in response.text # stack
|
||||
assert 'data-sort="web"' in response.text # service
|
||||
assert 'data-sort="3600"' in response.text # uptime (1 hour = 3600s)
|
||||
assert 'data-sort="10' in response.text # cpu
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -11,10 +12,12 @@ from compose_farm.executor import (
|
||||
_run_local_command,
|
||||
check_networks_exist,
|
||||
check_paths_exist,
|
||||
check_stack_running,
|
||||
get_running_stacks_on_host,
|
||||
is_local,
|
||||
run_command,
|
||||
run_compose,
|
||||
run_compose_on_host,
|
||||
run_on_stacks,
|
||||
)
|
||||
|
||||
@@ -106,6 +109,108 @@ class TestRunCompose:
|
||||
# Command may fail due to no docker, but structure is correct
|
||||
assert result.stack == "test-service"
|
||||
|
||||
async def test_run_compose_uses_cd_pattern(self, tmp_path: Path) -> None:
|
||||
"""Verify run_compose uses 'cd <dir> && docker compose' pattern."""
|
||||
config = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"remote": Host(address="192.168.1.100")},
|
||||
stacks={"mystack": "remote"},
|
||||
)
|
||||
|
||||
mock_result = CommandResult(stack="mystack", exit_code=0, success=True)
|
||||
with patch("compose_farm.executor.run_command", new_callable=AsyncMock) as mock_run:
|
||||
mock_run.return_value = mock_result
|
||||
await run_compose(config, "mystack", "up -d", stream=False)
|
||||
|
||||
# Verify the command uses cd pattern with quoted path
|
||||
mock_run.assert_called_once()
|
||||
call_args = mock_run.call_args
|
||||
command = call_args[0][1] # Second positional arg is command
|
||||
assert command == f'cd "{tmp_path}/mystack" && docker compose up -d'
|
||||
|
||||
async def test_run_compose_works_without_local_compose_file(self, tmp_path: Path) -> None:
|
||||
"""Verify compose works even when compose file doesn't exist locally.
|
||||
|
||||
This is the bug from issue #162 - when running cf from a machine without
|
||||
NFS mounts, the compose file doesn't exist locally but should still work
|
||||
on the remote host.
|
||||
"""
|
||||
config = Config(
|
||||
compose_dir=tmp_path, # No compose files exist here
|
||||
hosts={"remote": Host(address="192.168.1.100")},
|
||||
stacks={"mystack": "remote"},
|
||||
)
|
||||
|
||||
# Verify no compose file exists locally
|
||||
assert not (tmp_path / "mystack" / "compose.yaml").exists()
|
||||
assert not (tmp_path / "mystack" / "compose.yml").exists()
|
||||
|
||||
mock_result = CommandResult(stack="mystack", exit_code=0, success=True)
|
||||
with patch("compose_farm.executor.run_command", new_callable=AsyncMock) as mock_run:
|
||||
mock_run.return_value = mock_result
|
||||
result = await run_compose(config, "mystack", "ps", stream=False)
|
||||
|
||||
# Should succeed - docker compose on remote will find the file
|
||||
assert result.success
|
||||
# Command should use cd pattern, not -f with a specific file
|
||||
command = mock_run.call_args[0][1]
|
||||
assert "cd " in command
|
||||
assert " && docker compose " in command
|
||||
assert "-f " not in command # Should NOT use -f flag
|
||||
|
||||
async def test_run_compose_on_host_uses_cd_pattern(self, tmp_path: Path) -> None:
|
||||
"""Verify run_compose_on_host uses 'cd <dir> && docker compose' pattern."""
|
||||
config = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"host1": Host(address="192.168.1.1")},
|
||||
stacks={"mystack": "host1"},
|
||||
)
|
||||
|
||||
mock_result = CommandResult(stack="mystack", exit_code=0, success=True)
|
||||
with patch("compose_farm.executor.run_command", new_callable=AsyncMock) as mock_run:
|
||||
mock_run.return_value = mock_result
|
||||
await run_compose_on_host(config, "mystack", "host1", "down", stream=False)
|
||||
|
||||
command = mock_run.call_args[0][1]
|
||||
assert command == f'cd "{tmp_path}/mystack" && docker compose down'
|
||||
|
||||
async def test_check_stack_running_uses_cd_pattern(self, tmp_path: Path) -> None:
|
||||
"""Verify check_stack_running uses 'cd <dir> && docker compose' pattern."""
|
||||
config = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"host1": Host(address="192.168.1.1")},
|
||||
stacks={"mystack": "host1"},
|
||||
)
|
||||
|
||||
mock_result = CommandResult(stack="mystack", exit_code=0, success=True, stdout="abc123\n")
|
||||
with patch("compose_farm.executor.run_command", new_callable=AsyncMock) as mock_run:
|
||||
mock_run.return_value = mock_result
|
||||
result = await check_stack_running(config, "mystack", "host1")
|
||||
|
||||
assert result is True
|
||||
command = mock_run.call_args[0][1]
|
||||
assert command == f'cd "{tmp_path}/mystack" && docker compose ps --status running -q'
|
||||
|
||||
async def test_run_compose_quotes_paths_with_spaces(self, tmp_path: Path) -> None:
|
||||
"""Verify paths with spaces are properly quoted."""
|
||||
compose_dir = tmp_path / "my compose dir"
|
||||
compose_dir.mkdir()
|
||||
|
||||
config = Config(
|
||||
compose_dir=compose_dir,
|
||||
hosts={"remote": Host(address="192.168.1.100")},
|
||||
stacks={"my-stack": "remote"},
|
||||
)
|
||||
|
||||
mock_result = CommandResult(stack="my-stack", exit_code=0, success=True)
|
||||
with patch("compose_farm.executor.run_command", new_callable=AsyncMock) as mock_run:
|
||||
mock_run.return_value = mock_result
|
||||
await run_compose(config, "my-stack", "up -d", stream=False)
|
||||
|
||||
command = mock_run.call_args[0][1]
|
||||
# Path should be quoted to handle spaces
|
||||
assert f'cd "{compose_dir}/my-stack"' in command
|
||||
|
||||
|
||||
class TestRunOnStacks:
|
||||
"""Tests for parallel stack execution."""
|
||||
|
||||
403
tests/test_glances.py
Normal file
403
tests/test_glances.py
Normal file
@@ -0,0 +1,403 @@
|
||||
"""Tests for Glances integration."""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.glances import (
|
||||
DEFAULT_GLANCES_PORT,
|
||||
ContainerStats,
|
||||
HostStats,
|
||||
_get_glances_address,
|
||||
fetch_all_container_stats,
|
||||
fetch_all_host_stats,
|
||||
fetch_container_stats,
|
||||
fetch_host_stats,
|
||||
)
|
||||
|
||||
|
||||
class TestHostStats:
|
||||
"""Tests for HostStats dataclass."""
|
||||
|
||||
def test_host_stats_creation(self) -> None:
|
||||
stats = HostStats(
|
||||
host="nas",
|
||||
cpu_percent=25.5,
|
||||
mem_percent=50.0,
|
||||
swap_percent=10.0,
|
||||
load=2.5,
|
||||
disk_percent=75.0,
|
||||
)
|
||||
assert stats.host == "nas"
|
||||
assert stats.cpu_percent == 25.5
|
||||
assert stats.mem_percent == 50.0
|
||||
assert stats.disk_percent == 75.0
|
||||
assert stats.error is None
|
||||
|
||||
def test_host_stats_from_error(self) -> None:
|
||||
stats = HostStats.from_error("nas", "Connection refused")
|
||||
assert stats.host == "nas"
|
||||
assert stats.cpu_percent == 0
|
||||
assert stats.mem_percent == 0
|
||||
assert stats.error == "Connection refused"
|
||||
|
||||
|
||||
class TestFetchHostStats:
|
||||
"""Tests for fetch_host_stats function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_host_stats_success(self) -> None:
|
||||
quicklook_response = httpx.Response(
|
||||
200,
|
||||
json={
|
||||
"cpu": 25.5,
|
||||
"mem": 50.0,
|
||||
"swap": 5.0,
|
||||
"load": 2.5,
|
||||
},
|
||||
)
|
||||
fs_response = httpx.Response(
|
||||
200,
|
||||
json=[
|
||||
{"mnt_point": "/", "percent": 65.0},
|
||||
{"mnt_point": "/mnt/data", "percent": 80.0},
|
||||
],
|
||||
)
|
||||
|
||||
async def mock_get(url: str) -> httpx.Response:
|
||||
if "quicklook" in url:
|
||||
return quicklook_response
|
||||
return fs_response
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(side_effect=mock_get)
|
||||
|
||||
stats = await fetch_host_stats("nas", "192.168.1.6")
|
||||
|
||||
assert stats.host == "nas"
|
||||
assert stats.cpu_percent == 25.5
|
||||
assert stats.mem_percent == 50.0
|
||||
assert stats.swap_percent == 5.0
|
||||
assert stats.load == 2.5
|
||||
assert stats.disk_percent == 65.0 # Root filesystem
|
||||
assert stats.error is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_host_stats_http_error(self) -> None:
|
||||
mock_response = httpx.Response(500)
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(return_value=mock_response)
|
||||
|
||||
stats = await fetch_host_stats("nas", "192.168.1.6")
|
||||
|
||||
assert stats.host == "nas"
|
||||
assert stats.error == "HTTP 500"
|
||||
assert stats.cpu_percent == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_host_stats_timeout(self) -> None:
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(side_effect=httpx.TimeoutException("timeout"))
|
||||
|
||||
stats = await fetch_host_stats("nas", "192.168.1.6")
|
||||
|
||||
assert stats.host == "nas"
|
||||
assert stats.error == "timeout"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_host_stats_connection_error(self) -> None:
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(
|
||||
side_effect=httpx.ConnectError("Connection refused")
|
||||
)
|
||||
|
||||
stats = await fetch_host_stats("nas", "192.168.1.6")
|
||||
|
||||
assert stats.host == "nas"
|
||||
assert stats.error is not None
|
||||
assert "Connection refused" in stats.error
|
||||
|
||||
|
||||
class TestFetchAllHostStats:
|
||||
"""Tests for fetch_all_host_stats function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_all_host_stats(self) -> None:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={
|
||||
"nas": Host(address="192.168.1.6"),
|
||||
"nuc": Host(address="192.168.1.2"),
|
||||
},
|
||||
stacks={"test": "nas"},
|
||||
)
|
||||
|
||||
quicklook_response = httpx.Response(
|
||||
200,
|
||||
json={
|
||||
"cpu": 25.5,
|
||||
"mem": 50.0,
|
||||
"swap": 5.0,
|
||||
"load": 2.5,
|
||||
},
|
||||
)
|
||||
fs_response = httpx.Response(
|
||||
200,
|
||||
json=[{"mnt_point": "/", "percent": 70.0}],
|
||||
)
|
||||
|
||||
async def mock_get(url: str) -> httpx.Response:
|
||||
if "quicklook" in url:
|
||||
return quicklook_response
|
||||
return fs_response
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(side_effect=mock_get)
|
||||
|
||||
stats = await fetch_all_host_stats(config)
|
||||
|
||||
assert "nas" in stats
|
||||
assert "nuc" in stats
|
||||
assert stats["nas"].cpu_percent == 25.5
|
||||
assert stats["nuc"].cpu_percent == 25.5
|
||||
assert stats["nas"].disk_percent == 70.0
|
||||
|
||||
|
||||
class TestDefaultPort:
|
||||
"""Tests for default Glances port constant."""
|
||||
|
||||
def test_default_port(self) -> None:
|
||||
assert DEFAULT_GLANCES_PORT == 61208
|
||||
|
||||
|
||||
class TestContainerStats:
|
||||
"""Tests for ContainerStats dataclass."""
|
||||
|
||||
def test_container_stats_creation(self) -> None:
|
||||
stats = ContainerStats(
|
||||
name="nginx",
|
||||
host="nas",
|
||||
status="running",
|
||||
image="nginx:latest",
|
||||
cpu_percent=5.5,
|
||||
memory_usage=104857600, # 100MB
|
||||
memory_limit=1073741824, # 1GB
|
||||
memory_percent=9.77,
|
||||
network_rx=1000000,
|
||||
network_tx=500000,
|
||||
uptime="2 hours",
|
||||
ports="80->80/tcp",
|
||||
engine="docker",
|
||||
)
|
||||
assert stats.name == "nginx"
|
||||
assert stats.host == "nas"
|
||||
assert stats.cpu_percent == 5.5
|
||||
|
||||
|
||||
class TestFetchContainerStats:
|
||||
"""Tests for fetch_container_stats function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_container_stats_success(self) -> None:
|
||||
mock_response = httpx.Response(
|
||||
200,
|
||||
json=[
|
||||
{
|
||||
"name": "nginx",
|
||||
"status": "running",
|
||||
"image": ["nginx:latest"],
|
||||
"cpu_percent": 5.5,
|
||||
"memory_usage": 104857600,
|
||||
"memory_limit": 1073741824,
|
||||
"network": {"cumulative_rx": 1000, "cumulative_tx": 500},
|
||||
"uptime": "2 hours",
|
||||
"ports": "80->80/tcp",
|
||||
"engine": "docker",
|
||||
},
|
||||
{
|
||||
"name": "redis",
|
||||
"status": "running",
|
||||
"image": ["redis:7"],
|
||||
"cpu_percent": 1.2,
|
||||
"memory_usage": 52428800,
|
||||
"memory_limit": 1073741824,
|
||||
"network": {},
|
||||
"uptime": "3 hours",
|
||||
"ports": "",
|
||||
"engine": "docker",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(return_value=mock_response)
|
||||
|
||||
containers, error = await fetch_container_stats("nas", "192.168.1.6")
|
||||
|
||||
assert error is None
|
||||
assert containers is not None
|
||||
assert len(containers) == 2
|
||||
assert containers[0].name == "nginx"
|
||||
assert containers[0].host == "nas"
|
||||
assert containers[0].cpu_percent == 5.5
|
||||
assert containers[1].name == "redis"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_container_stats_empty_on_error(self) -> None:
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(side_effect=httpx.TimeoutException("timeout"))
|
||||
|
||||
containers, error = await fetch_container_stats("nas", "192.168.1.6")
|
||||
|
||||
assert containers is None
|
||||
assert error == "Connection timed out"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_container_stats_handles_string_image(self) -> None:
|
||||
"""Test that image field works as string (not just list)."""
|
||||
mock_response = httpx.Response(
|
||||
200,
|
||||
json=[
|
||||
{
|
||||
"name": "test",
|
||||
"status": "running",
|
||||
"image": "myimage:v1", # String instead of list
|
||||
"cpu_percent": 0,
|
||||
"memory_usage": 0,
|
||||
"memory_limit": 1,
|
||||
"network": {},
|
||||
"uptime": "",
|
||||
"ports": "",
|
||||
"engine": "docker",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(return_value=mock_response)
|
||||
|
||||
containers, error = await fetch_container_stats("nas", "192.168.1.6")
|
||||
|
||||
assert error is None
|
||||
assert containers is not None
|
||||
assert len(containers) == 1
|
||||
assert containers[0].image == "myimage:v1"
|
||||
|
||||
|
||||
class TestFetchAllContainerStats:
|
||||
"""Tests for fetch_all_container_stats function."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_all_container_stats(self) -> None:
|
||||
config = Config(
|
||||
compose_dir=Path("/opt/compose"),
|
||||
hosts={
|
||||
"nas": Host(address="192.168.1.6"),
|
||||
"nuc": Host(address="192.168.1.2"),
|
||||
},
|
||||
stacks={"test": "nas"},
|
||||
)
|
||||
|
||||
mock_response = httpx.Response(
|
||||
200,
|
||||
json=[
|
||||
{
|
||||
"name": "nginx",
|
||||
"status": "running",
|
||||
"image": ["nginx:latest"],
|
||||
"cpu_percent": 5.5,
|
||||
"memory_usage": 104857600,
|
||||
"memory_limit": 1073741824,
|
||||
"network": {},
|
||||
"uptime": "2 hours",
|
||||
"ports": "",
|
||||
"engine": "docker",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_client:
|
||||
mock_client.return_value.__aenter__ = AsyncMock(return_value=mock_client.return_value)
|
||||
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.return_value.get = AsyncMock(return_value=mock_response)
|
||||
|
||||
containers = await fetch_all_container_stats(config)
|
||||
|
||||
# 2 hosts x 1 container each = 2 containers
|
||||
assert len(containers) == 2
|
||||
hosts = {c.host for c in containers}
|
||||
assert "nas" in hosts
|
||||
assert "nuc" in hosts
|
||||
|
||||
|
||||
class TestGetGlancesAddress:
|
||||
"""Tests for _get_glances_address function."""
|
||||
|
||||
def test_returns_host_address_outside_container(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Without CF_WEB_STACK, always return host address."""
|
||||
monkeypatch.delenv("CF_WEB_STACK", raising=False)
|
||||
host = Host(address="192.168.1.6")
|
||||
result = _get_glances_address("nas", host, "glances")
|
||||
assert result == "192.168.1.6"
|
||||
|
||||
def test_returns_host_address_without_glances_container(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""In container without glances_stack config, return host address."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
host = Host(address="192.168.1.6")
|
||||
result = _get_glances_address("nas", host, None)
|
||||
assert result == "192.168.1.6"
|
||||
|
||||
def test_returns_container_name_for_web_stack_host(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Local host uses container name in container mode."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
host = Host(address="192.168.1.6")
|
||||
result = _get_glances_address("nas", host, "glances", local_host="nas")
|
||||
assert result == "glances"
|
||||
|
||||
def test_returns_host_address_for_non_local_host(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Non-local hosts use their IP address even in container mode."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
host = Host(address="192.168.1.2")
|
||||
result = _get_glances_address("nuc", host, "glances", local_host="nas")
|
||||
assert result == "192.168.1.2"
|
||||
|
||||
def test_fallback_to_is_local_detection(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Without explicit local host, falls back to is_local detection."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
# Use localhost which should be detected as local
|
||||
host = Host(address="localhost")
|
||||
result = _get_glances_address("local", host, "glances")
|
||||
assert result == "glances"
|
||||
|
||||
def test_remote_host_not_affected_by_container_mode(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Remote hosts always use their IP, even in container mode."""
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
host = Host(address="192.168.1.100")
|
||||
result = _get_glances_address("remote", host, "glances")
|
||||
assert result == "192.168.1.100"
|
||||
@@ -14,6 +14,7 @@ from compose_farm.executor import CommandResult
|
||||
from compose_farm.operations import (
|
||||
_migrate_stack,
|
||||
build_discovery_results,
|
||||
build_up_cmd,
|
||||
)
|
||||
|
||||
|
||||
@@ -95,23 +96,47 @@ class TestMigrationCommands:
|
||||
assert pull_idx < build_idx
|
||||
|
||||
|
||||
class TestBuildUpCmd:
|
||||
"""Tests for build_up_cmd helper."""
|
||||
|
||||
def test_basic(self) -> None:
|
||||
"""Basic up command without flags."""
|
||||
assert build_up_cmd() == "up -d"
|
||||
|
||||
def test_with_pull(self) -> None:
|
||||
"""Up command with pull flag."""
|
||||
assert build_up_cmd(pull=True) == "up -d --pull always"
|
||||
|
||||
def test_with_build(self) -> None:
|
||||
"""Up command with build flag."""
|
||||
assert build_up_cmd(build=True) == "up -d --build"
|
||||
|
||||
def test_with_pull_and_build(self) -> None:
|
||||
"""Up command with both flags."""
|
||||
assert build_up_cmd(pull=True, build=True) == "up -d --pull always --build"
|
||||
|
||||
def test_with_service(self) -> None:
|
||||
"""Up command targeting a specific service."""
|
||||
assert build_up_cmd(service="web") == "up -d web"
|
||||
|
||||
def test_with_all_options(self) -> None:
|
||||
"""Up command with all options."""
|
||||
assert (
|
||||
build_up_cmd(pull=True, build=True, service="web") == "up -d --pull always --build web"
|
||||
)
|
||||
|
||||
|
||||
class TestUpdateCommandSequence:
|
||||
"""Tests for update command sequence."""
|
||||
|
||||
def test_update_command_sequence_includes_build(self) -> None:
|
||||
"""Update command should use pull --ignore-buildable and build."""
|
||||
# This is a static check of the command sequence in lifecycle.py
|
||||
# The actual command sequence is defined in the update function
|
||||
|
||||
def test_update_delegates_to_up_with_pull_and_build(self) -> None:
|
||||
"""Update command should delegate to up with pull=True and build=True."""
|
||||
source = inspect.getsource(lifecycle.update)
|
||||
|
||||
# Verify the command sequence includes pull --ignore-buildable
|
||||
assert "pull --ignore-buildable" in source
|
||||
# Verify build is included
|
||||
assert '"build"' in source or "'build'" in source
|
||||
# Verify the sequence is pull, build, down, up
|
||||
assert "down" in source
|
||||
assert "up -d" in source
|
||||
# Verify update calls up with pull=True and build=True
|
||||
assert "up(" in source
|
||||
assert "pull=True" in source
|
||||
assert "build=True" in source
|
||||
|
||||
|
||||
class TestBuildDiscoveryResults:
|
||||
|
||||
182
tests/test_registry.py
Normal file
182
tests/test_registry.py
Normal file
@@ -0,0 +1,182 @@
|
||||
"""Tests for registry module."""
|
||||
|
||||
from compose_farm.registry import (
|
||||
DOCKER_HUB_ALIASES,
|
||||
ImageRef,
|
||||
RegistryClient,
|
||||
TagCheckResult,
|
||||
_find_updates,
|
||||
_parse_version,
|
||||
)
|
||||
|
||||
|
||||
class TestImageRef:
|
||||
"""Tests for ImageRef parsing."""
|
||||
|
||||
def test_parse_simple_image(self) -> None:
|
||||
"""Test parsing simple image name."""
|
||||
ref = ImageRef.parse("nginx")
|
||||
assert ref.registry == "docker.io"
|
||||
assert ref.namespace == "library"
|
||||
assert ref.name == "nginx"
|
||||
assert ref.tag == "latest"
|
||||
|
||||
def test_parse_image_with_tag(self) -> None:
|
||||
"""Test parsing image with tag."""
|
||||
ref = ImageRef.parse("nginx:1.25")
|
||||
assert ref.registry == "docker.io"
|
||||
assert ref.namespace == "library"
|
||||
assert ref.name == "nginx"
|
||||
assert ref.tag == "1.25"
|
||||
|
||||
def test_parse_image_with_namespace(self) -> None:
|
||||
"""Test parsing image with namespace."""
|
||||
ref = ImageRef.parse("linuxserver/jellyfin:latest")
|
||||
assert ref.registry == "docker.io"
|
||||
assert ref.namespace == "linuxserver"
|
||||
assert ref.name == "jellyfin"
|
||||
assert ref.tag == "latest"
|
||||
|
||||
def test_parse_ghcr_image(self) -> None:
|
||||
"""Test parsing GitHub Container Registry image."""
|
||||
ref = ImageRef.parse("ghcr.io/user/repo:v1.0.0")
|
||||
assert ref.registry == "ghcr.io"
|
||||
assert ref.namespace == "user"
|
||||
assert ref.name == "repo"
|
||||
assert ref.tag == "v1.0.0"
|
||||
|
||||
def test_parse_image_with_digest(self) -> None:
|
||||
"""Test parsing image with digest."""
|
||||
ref = ImageRef.parse("nginx:latest@sha256:abc123")
|
||||
assert ref.registry == "docker.io"
|
||||
assert ref.name == "nginx"
|
||||
assert ref.tag == "latest"
|
||||
assert ref.digest == "sha256:abc123"
|
||||
|
||||
def test_full_name_with_namespace(self) -> None:
|
||||
"""Test full_name property with namespace."""
|
||||
ref = ImageRef.parse("linuxserver/jellyfin")
|
||||
assert ref.full_name == "linuxserver/jellyfin"
|
||||
|
||||
def test_full_name_without_namespace(self) -> None:
|
||||
"""Test full_name property for official images."""
|
||||
ref = ImageRef.parse("nginx")
|
||||
assert ref.full_name == "library/nginx"
|
||||
|
||||
def test_display_name_official_image(self) -> None:
|
||||
"""Test display_name for official Docker Hub images."""
|
||||
ref = ImageRef.parse("nginx:latest")
|
||||
assert ref.display_name == "nginx"
|
||||
|
||||
def test_display_name_hub_with_namespace(self) -> None:
|
||||
"""Test display_name for Docker Hub images with namespace."""
|
||||
ref = ImageRef.parse("linuxserver/jellyfin")
|
||||
assert ref.display_name == "linuxserver/jellyfin"
|
||||
|
||||
def test_display_name_other_registry(self) -> None:
|
||||
"""Test display_name for other registries."""
|
||||
ref = ImageRef.parse("ghcr.io/user/repo")
|
||||
assert ref.display_name == "ghcr.io/user/repo"
|
||||
|
||||
|
||||
class TestParseVersion:
|
||||
"""Tests for version parsing."""
|
||||
|
||||
def test_parse_semver(self) -> None:
|
||||
"""Test parsing semantic version."""
|
||||
assert _parse_version("1.2.3") == (1, 2, 3)
|
||||
|
||||
def test_parse_version_with_v_prefix(self) -> None:
|
||||
"""Test parsing version with v prefix."""
|
||||
assert _parse_version("v1.2.3") == (1, 2, 3)
|
||||
assert _parse_version("V1.2.3") == (1, 2, 3)
|
||||
|
||||
def test_parse_two_part_version(self) -> None:
|
||||
"""Test parsing two-part version."""
|
||||
assert _parse_version("1.25") == (1, 25)
|
||||
|
||||
def test_parse_single_number(self) -> None:
|
||||
"""Test parsing single number version."""
|
||||
assert _parse_version("7") == (7,)
|
||||
|
||||
def test_parse_invalid_version(self) -> None:
|
||||
"""Test parsing non-version tags."""
|
||||
assert _parse_version("latest") is None
|
||||
assert _parse_version("stable") is None
|
||||
assert _parse_version("alpine") is None
|
||||
|
||||
|
||||
class TestFindUpdates:
|
||||
"""Tests for finding available updates."""
|
||||
|
||||
def test_find_updates_with_newer_versions(self) -> None:
|
||||
"""Test finding newer versions."""
|
||||
current = "1.0.0"
|
||||
tags = ["0.9.0", "1.0.0", "1.1.0", "2.0.0"]
|
||||
updates = _find_updates(current, tags)
|
||||
assert updates == ["2.0.0", "1.1.0"]
|
||||
|
||||
def test_find_updates_no_newer(self) -> None:
|
||||
"""Test when already on latest."""
|
||||
current = "2.0.0"
|
||||
tags = ["1.0.0", "1.5.0", "2.0.0"]
|
||||
updates = _find_updates(current, tags)
|
||||
assert updates == []
|
||||
|
||||
def test_find_updates_non_version_tag(self) -> None:
|
||||
"""Test with non-version current tag."""
|
||||
current = "latest"
|
||||
tags = ["1.0.0", "2.0.0"]
|
||||
updates = _find_updates(current, tags)
|
||||
# Can't determine updates for non-version tags
|
||||
assert updates == []
|
||||
|
||||
|
||||
class TestRegistryClient:
|
||||
"""Tests for unified registry client."""
|
||||
|
||||
def test_docker_hub_normalization(self) -> None:
|
||||
"""Test Docker Hub aliases are normalized."""
|
||||
for alias in DOCKER_HUB_ALIASES:
|
||||
client = RegistryClient(alias)
|
||||
assert client.registry == "docker.io"
|
||||
assert client.registry_url == "https://registry-1.docker.io"
|
||||
|
||||
def test_ghcr_client(self) -> None:
|
||||
"""Test GitHub Container Registry client."""
|
||||
client = RegistryClient("ghcr.io")
|
||||
assert client.registry == "ghcr.io"
|
||||
assert client.registry_url == "https://ghcr.io"
|
||||
|
||||
def test_generic_registry(self) -> None:
|
||||
"""Test generic registry client."""
|
||||
client = RegistryClient("quay.io")
|
||||
assert client.registry == "quay.io"
|
||||
assert client.registry_url == "https://quay.io"
|
||||
|
||||
|
||||
class TestTagCheckResult:
|
||||
"""Tests for TagCheckResult."""
|
||||
|
||||
def test_create_result(self) -> None:
|
||||
"""Test creating a result."""
|
||||
ref = ImageRef.parse("nginx:1.25")
|
||||
result = TagCheckResult(
|
||||
image=ref,
|
||||
current_digest="sha256:abc",
|
||||
available_updates=["1.26", "1.27"],
|
||||
)
|
||||
assert result.image.name == "nginx"
|
||||
assert result.available_updates == ["1.26", "1.27"]
|
||||
assert result.error is None
|
||||
|
||||
def test_result_with_error(self) -> None:
|
||||
"""Test result with error."""
|
||||
ref = ImageRef.parse("nginx")
|
||||
result = TagCheckResult(
|
||||
image=ref,
|
||||
current_digest="",
|
||||
error="Connection refused",
|
||||
)
|
||||
assert result.error == "Connection refused"
|
||||
assert result.available_updates == []
|
||||
@@ -219,7 +219,7 @@ class TestSshConnectKwargs:
|
||||
assert result["client_keys"] == [str(key_path)]
|
||||
|
||||
def test_includes_both_agent_and_key(self, tmp_path: Path) -> None:
|
||||
"""Include both agent_path and client_keys when both available."""
|
||||
"""Prioritize client_keys over agent_path when both available."""
|
||||
host = Host(address="example.com")
|
||||
key_path = tmp_path / "compose-farm"
|
||||
|
||||
@@ -229,7 +229,8 @@ class TestSshConnectKwargs:
|
||||
):
|
||||
result = ssh_connect_kwargs(host)
|
||||
|
||||
assert result["agent_path"] == "/tmp/agent.sock"
|
||||
# Agent should be ignored in favor of the dedicated key
|
||||
assert "agent_path" not in result
|
||||
assert result["client_keys"] == [str(key_path)]
|
||||
|
||||
def test_custom_port(self) -> None:
|
||||
|
||||
@@ -67,6 +67,16 @@ class TestSaveState:
|
||||
assert "plex: nas01" in content
|
||||
assert "jellyfin: nas02" in content
|
||||
|
||||
def test_save_state_sorts_host_lists(self, config: Config) -> None:
|
||||
"""Saves state with sorted host lists for consistent output."""
|
||||
# Pass hosts in unsorted order
|
||||
save_state(config, {"glances": ["pc", "nas", "hp", "anton"]})
|
||||
|
||||
state_file = config.get_state_path()
|
||||
content = state_file.read_text()
|
||||
# Hosts should be sorted alphabetically
|
||||
assert "- anton\n - hp\n - nas\n - pc" in content
|
||||
|
||||
|
||||
class TestGetStackHost:
|
||||
"""Tests for get_stack_host function."""
|
||||
|
||||
@@ -338,6 +338,26 @@ def test_parse_external_networks_missing_compose(tmp_path: Path) -> None:
|
||||
assert networks == []
|
||||
|
||||
|
||||
def test_parse_external_networks_with_name_field(tmp_path: Path) -> None:
|
||||
"""Network with 'name' field uses actual name, not key."""
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"host1": Host(address="192.168.1.10")},
|
||||
stacks={"app": "host1"},
|
||||
)
|
||||
compose_path = tmp_path / "app" / "compose.yaml"
|
||||
_write_compose(
|
||||
compose_path,
|
||||
{
|
||||
"services": {"app": {"image": "nginx"}},
|
||||
"networks": {"default": {"name": "compose-net", "external": True}},
|
||||
},
|
||||
)
|
||||
|
||||
networks = parse_external_networks(cfg, "app")
|
||||
assert networks == ["compose-net"]
|
||||
|
||||
|
||||
class TestExtractWebsiteUrls:
|
||||
"""Test extract_website_urls function."""
|
||||
|
||||
|
||||
@@ -101,6 +101,83 @@ class TestGetStackComposePath:
|
||||
assert "not found" in exc_info.value.detail
|
||||
|
||||
|
||||
class TestIsLocalHost:
|
||||
"""Tests for is_local_host helper."""
|
||||
|
||||
def test_returns_true_when_web_stack_host_matches(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""is_local_host returns True when host matches web stack host."""
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.web.deps import is_local_host
|
||||
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
hosts={"nas": Host(address="10.99.99.1"), "nuc": Host(address="10.99.99.2")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
host = config.hosts["nas"]
|
||||
assert is_local_host("nas", host, config) is True
|
||||
|
||||
def test_returns_false_when_web_stack_host_differs(
|
||||
self, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""is_local_host returns False when host does not match web stack host."""
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.web.deps import is_local_host
|
||||
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
hosts={"nas": Host(address="10.99.99.1"), "nuc": Host(address="10.99.99.2")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
host = config.hosts["nuc"]
|
||||
# nuc is not local, and not matching the web stack host
|
||||
assert is_local_host("nuc", host, config) is False
|
||||
|
||||
|
||||
class TestGetLocalHost:
|
||||
"""Tests for get_local_host helper."""
|
||||
|
||||
def test_returns_web_stack_host(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""get_local_host returns the web stack host when in container."""
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.web.deps import get_local_host
|
||||
|
||||
monkeypatch.setenv("CF_WEB_STACK", "compose-farm")
|
||||
config = Config(
|
||||
hosts={"nas": Host(address="10.99.99.1"), "nuc": Host(address="10.99.99.2")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert get_local_host(config) == "nas"
|
||||
|
||||
def test_ignores_unknown_web_stack(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""get_local_host ignores web stack if it's not in stacks."""
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.web.deps import get_local_host
|
||||
|
||||
monkeypatch.setenv("CF_WEB_STACK", "unknown-stack")
|
||||
# Use address that won't match local machine to avoid is_local() fallback
|
||||
config = Config(
|
||||
hosts={"nas": Host(address="10.99.99.1")},
|
||||
stacks={"test": "nas"},
|
||||
)
|
||||
# Should fall back to auto-detection (which won't match anything here)
|
||||
assert get_local_host(config) is None
|
||||
|
||||
def test_returns_none_outside_container(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""get_local_host returns None when CF_WEB_STACK not set."""
|
||||
from compose_farm.config import Config, Host
|
||||
from compose_farm.web.deps import get_local_host
|
||||
|
||||
monkeypatch.delenv("CF_WEB_STACK", raising=False)
|
||||
config = Config(
|
||||
hosts={"nas": Host(address="10.99.99.1")},
|
||||
stacks={"compose-farm": "nas"},
|
||||
)
|
||||
assert get_local_host(config) is None
|
||||
|
||||
|
||||
class TestRenderContainers:
|
||||
"""Tests for container template rendering."""
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Run with: uv run pytest tests/web/test_htmx_browser.py -v --no-cov
|
||||
|
||||
CDN assets are cached locally (in .pytest_cache/vendor/) to eliminate network
|
||||
variability. If a test fails with "Uncached CDN request", add the URL to
|
||||
compose_farm.web.cdn.CDN_ASSETS.
|
||||
src/compose_farm/web/vendor-assets.json.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -90,7 +90,7 @@ def page(page: Page, vendor_cache: Path) -> Page:
|
||||
return
|
||||
# Uncached CDN request - abort with helpful error
|
||||
route.abort("failed")
|
||||
msg = f"Uncached CDN request: {url}\n\nAdd this URL to CDN_ASSETS in tests/web/test_htmx_browser.py"
|
||||
msg = f"Uncached CDN request: {url}\n\nAdd this URL to src/compose_farm/web/vendor-assets.json"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
page.route(re.compile(r"https://(cdn\.jsdelivr\.net|unpkg\.com)/.*"), handle_cdn)
|
||||
@@ -134,6 +134,13 @@ def test_config(tmp_path_factory: pytest.TempPathFactory) -> Path:
|
||||
else:
|
||||
(svc / "compose.yaml").write_text(f"services:\n {name}:\n image: test/{name}\n")
|
||||
|
||||
# Create glances stack (required for containers page)
|
||||
glances_dir = compose_dir / "glances"
|
||||
glances_dir.mkdir()
|
||||
(glances_dir / "compose.yaml").write_text(
|
||||
"services:\n glances:\n image: nicolargo/glances\n"
|
||||
)
|
||||
|
||||
# Create config with multiple hosts
|
||||
config = tmp / "compose-farm.yaml"
|
||||
config.write_text(f"""
|
||||
@@ -151,6 +158,8 @@ stacks:
|
||||
nextcloud: server-2
|
||||
jellyfin: server-2
|
||||
redis: server-1
|
||||
glances: all
|
||||
glances_stack: glances
|
||||
""")
|
||||
|
||||
# Create state (plex and nextcloud running, grafana and jellyfin not started)
|
||||
@@ -245,7 +254,7 @@ class TestHTMXSidebarLoading:
|
||||
|
||||
# Verify actual stacks from test config appear
|
||||
stacks = page.locator("#sidebar-stacks li")
|
||||
assert stacks.count() == 5 # plex, grafana, nextcloud, jellyfin, redis
|
||||
assert stacks.count() == 6 # plex, grafana, nextcloud, jellyfin, redis, glances
|
||||
|
||||
# Check specific stacks are present
|
||||
content = page.locator("#sidebar-stacks").inner_text()
|
||||
@@ -348,7 +357,7 @@ class TestDashboardContent:
|
||||
|
||||
# From test config: 2 hosts, 5 stacks, 2 running (plex, nextcloud)
|
||||
assert "2" in stats # hosts count
|
||||
assert "5" in stats # stacks count
|
||||
assert "6" in stats # stacks count
|
||||
|
||||
def test_pending_shows_not_started_stacks(self, page: Page, server_url: str) -> None:
|
||||
"""Pending operations shows grafana and jellyfin as not started."""
|
||||
@@ -476,9 +485,9 @@ class TestSidebarFilter:
|
||||
page.goto(server_url)
|
||||
page.wait_for_selector("#sidebar-stacks", timeout=TIMEOUT)
|
||||
|
||||
# Initially all 4 stacks visible
|
||||
# Initially all 6 stacks visible
|
||||
visible_items = page.locator("#sidebar-stacks li:not([hidden])")
|
||||
assert visible_items.count() == 5
|
||||
assert visible_items.count() == 6
|
||||
|
||||
# Type in filter to match only "plex"
|
||||
self._filter_sidebar(page, "plex")
|
||||
@@ -493,9 +502,9 @@ class TestSidebarFilter:
|
||||
page.goto(server_url)
|
||||
page.wait_for_selector("#sidebar-stacks", timeout=TIMEOUT)
|
||||
|
||||
# Initial count should be (5)
|
||||
# Initial count should be (6)
|
||||
count_badge = page.locator("#sidebar-count")
|
||||
assert "(5)" in count_badge.inner_text()
|
||||
assert "(6)" in count_badge.inner_text()
|
||||
|
||||
# Filter to show only stacks containing "x" (plex, nextcloud)
|
||||
self._filter_sidebar(page, "x")
|
||||
@@ -524,13 +533,14 @@ class TestSidebarFilter:
|
||||
# Select server-1 from dropdown
|
||||
page.locator("#sidebar-host-select").select_option("server-1")
|
||||
|
||||
# Only plex, grafana, and redis (server-1 stacks) should be visible
|
||||
# plex, grafana, redis (server-1), and glances (all) should be visible
|
||||
visible = page.locator("#sidebar-stacks li:not([hidden])")
|
||||
assert visible.count() == 3
|
||||
assert visible.count() == 4
|
||||
|
||||
content = visible.all_inner_texts()
|
||||
assert any("plex" in s for s in content)
|
||||
assert any("grafana" in s for s in content)
|
||||
assert any("glances" in s for s in content)
|
||||
assert not any("nextcloud" in s for s in content)
|
||||
assert not any("jellyfin" in s for s in content)
|
||||
|
||||
@@ -562,7 +572,7 @@ class TestSidebarFilter:
|
||||
self._filter_sidebar(page, "")
|
||||
|
||||
# All stacks visible again
|
||||
assert page.locator("#sidebar-stacks li:not([hidden])").count() == 5
|
||||
assert page.locator("#sidebar-stacks li:not([hidden])").count() == 6
|
||||
|
||||
|
||||
class TestCommandPalette:
|
||||
@@ -884,7 +894,7 @@ class TestContentStability:
|
||||
|
||||
# Remember sidebar state
|
||||
initial_count = page.locator("#sidebar-stacks li").count()
|
||||
assert initial_count == 5
|
||||
assert initial_count == 6
|
||||
|
||||
# Navigate away
|
||||
page.locator("#sidebar-stacks a", has_text="plex").click()
|
||||
@@ -2329,3 +2339,227 @@ class TestTerminalNavigationIsolation:
|
||||
# Terminal should still be collapsed (no task to reconnect to)
|
||||
terminal_toggle = page.locator("#terminal-toggle")
|
||||
assert not terminal_toggle.is_checked(), "Terminal should remain collapsed after navigation"
|
||||
|
||||
|
||||
class TestContainersPagePause:
|
||||
"""Test containers page auto-refresh pause mechanism.
|
||||
|
||||
The containers page auto-refreshes every 3 seconds. When a user opens
|
||||
an action dropdown, refresh should pause to prevent the dropdown from
|
||||
closing unexpectedly.
|
||||
"""
|
||||
|
||||
# Mock HTML for container rows with action dropdowns
|
||||
MOCK_ROWS_HTML = """
|
||||
<tr>
|
||||
<td>1</td>
|
||||
<td data-sort="plex"><a href="/stack/plex" class="link">plex</a></td>
|
||||
<td data-sort="server">server</td>
|
||||
<td><div class="dropdown dropdown-end">
|
||||
<label tabindex="0" class="btn btn-circle btn-ghost btn-xs"><svg class="h-4 w-4"></svg></label>
|
||||
<ul tabindex="0" class="dropdown-content menu menu-sm bg-base-200 rounded-box shadow-lg w-36 z-50 p-2">
|
||||
<li><a hx-post="/api/stack/plex/restart">Restart</a></li>
|
||||
</ul>
|
||||
</div></td>
|
||||
<td data-sort="nas"><span class="badge">nas</span></td>
|
||||
<td data-sort="nginx:latest"><code>nginx:latest</code></td>
|
||||
<td data-sort="running"><span class="badge badge-success">running</span></td>
|
||||
<td data-sort="3600">1 hour</td>
|
||||
<td data-sort="5"><progress class="progress" value="5" max="100"></progress><span>5%</span></td>
|
||||
<td data-sort="104857600"><progress class="progress" value="10" max="100"></progress><span>100MB</span></td>
|
||||
<td data-sort="1000">↓1KB ↑1KB</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>2</td>
|
||||
<td data-sort="redis"><a href="/stack/redis" class="link">redis</a></td>
|
||||
<td data-sort="redis">redis</td>
|
||||
<td><div class="dropdown dropdown-end">
|
||||
<label tabindex="0" class="btn btn-circle btn-ghost btn-xs"><svg class="h-4 w-4"></svg></label>
|
||||
<ul tabindex="0" class="dropdown-content menu menu-sm bg-base-200 rounded-box shadow-lg w-36 z-50 p-2">
|
||||
<li><a hx-post="/api/stack/redis/restart">Restart</a></li>
|
||||
</ul>
|
||||
</div></td>
|
||||
<td data-sort="nas"><span class="badge">nas</span></td>
|
||||
<td data-sort="redis:7"><code>redis:7</code></td>
|
||||
<td data-sort="running"><span class="badge badge-success">running</span></td>
|
||||
<td data-sort="7200">2 hours</td>
|
||||
<td data-sort="1"><progress class="progress" value="1" max="100"></progress><span>1%</span></td>
|
||||
<td data-sort="52428800"><progress class="progress" value="5" max="100"></progress><span>50MB</span></td>
|
||||
<td data-sort="500">↓500B ↑500B</td>
|
||||
</tr>
|
||||
"""
|
||||
|
||||
def test_dropdown_pauses_refresh(self, page: Page, server_url: str) -> None:
|
||||
"""Opening action dropdown pauses auto-refresh.
|
||||
|
||||
Bug: focusin event triggers pause, but focusout fires shortly after
|
||||
when focus moves within the dropdown, causing refresh to resume
|
||||
while dropdown is still visually open.
|
||||
"""
|
||||
# Mock container rows and update checks
|
||||
page.route(
|
||||
"**/api/containers/rows/*",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="text/html",
|
||||
body=self.MOCK_ROWS_HTML,
|
||||
),
|
||||
)
|
||||
page.route(
|
||||
"**/api/containers/check-updates",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="application/json",
|
||||
body='{"results": []}',
|
||||
),
|
||||
)
|
||||
|
||||
page.goto(f"{server_url}/live-stats")
|
||||
|
||||
# Wait for container rows to load
|
||||
page.wait_for_function(
|
||||
"document.querySelectorAll('#container-rows tr:not(.loading-row)').length > 0",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Wait for timer to start
|
||||
page.wait_for_function(
|
||||
"document.getElementById('refresh-timer')?.textContent?.includes('↻')",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Click on a dropdown to open it
|
||||
dropdown_label = page.locator(".dropdown label").first
|
||||
dropdown_label.click()
|
||||
|
||||
# Wait a moment for focusin to trigger
|
||||
page.wait_for_timeout(200)
|
||||
|
||||
# Verify pause is engaged
|
||||
timer_text = page.locator("#refresh-timer").inner_text()
|
||||
|
||||
assert timer_text == "❚❚", (
|
||||
f"Refresh should be paused after clicking dropdown. timer='{timer_text}'"
|
||||
)
|
||||
assert "❚❚" in timer_text, f"Timer should show pause icon, got '{timer_text}'"
|
||||
|
||||
def test_refresh_stays_paused_while_dropdown_open(self, page: Page, server_url: str) -> None:
|
||||
"""Refresh remains paused for duration dropdown is open (>5s refresh interval).
|
||||
|
||||
This is the critical test for the pause bug: refresh should stay paused
|
||||
for longer than the 3-second refresh interval while dropdown is open.
|
||||
"""
|
||||
# Mock container rows and update checks
|
||||
page.route(
|
||||
"**/api/containers/rows/*",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="text/html",
|
||||
body=self.MOCK_ROWS_HTML,
|
||||
),
|
||||
)
|
||||
page.route(
|
||||
"**/api/containers/check-updates",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="application/json",
|
||||
body='{"results": []}',
|
||||
),
|
||||
)
|
||||
|
||||
page.goto(f"{server_url}/live-stats")
|
||||
|
||||
# Wait for container rows to load
|
||||
page.wait_for_function(
|
||||
"document.querySelectorAll('#container-rows tr:not(.loading-row)').length > 0",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Wait for timer to start
|
||||
page.wait_for_function(
|
||||
"document.getElementById('refresh-timer')?.textContent?.includes('↻')",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Record a marker in the first row to detect if refresh happened
|
||||
page.evaluate("""
|
||||
const firstRow = document.querySelector('#container-rows tr');
|
||||
if (firstRow) firstRow.dataset.testMarker = 'original';
|
||||
""")
|
||||
|
||||
# Click dropdown to pause
|
||||
dropdown_label = page.locator(".dropdown label").first
|
||||
dropdown_label.click()
|
||||
page.wait_for_timeout(200)
|
||||
|
||||
# Confirm paused
|
||||
assert page.locator("#refresh-timer").inner_text() == "❚❚"
|
||||
|
||||
# Wait longer than the 5-second refresh interval
|
||||
page.wait_for_timeout(6000)
|
||||
|
||||
# Check if still paused
|
||||
timer_text = page.locator("#refresh-timer").inner_text()
|
||||
|
||||
# Check if the row was replaced (marker would be gone)
|
||||
marker = page.evaluate("""
|
||||
document.querySelector('#container-rows tr')?.dataset?.testMarker
|
||||
""")
|
||||
|
||||
assert timer_text == "❚❚", f"Refresh should still be paused after 6s. timer='{timer_text}'"
|
||||
assert marker == "original", (
|
||||
"Table was refreshed while dropdown was open - pause mechanism failed"
|
||||
)
|
||||
|
||||
def test_refresh_resumes_after_dropdown_closes(self, page: Page, server_url: str) -> None:
|
||||
"""Refresh resumes after dropdown is closed."""
|
||||
# Mock container rows and update checks
|
||||
page.route(
|
||||
"**/api/containers/rows/*",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="text/html",
|
||||
body=self.MOCK_ROWS_HTML,
|
||||
),
|
||||
)
|
||||
page.route(
|
||||
"**/api/containers/check-updates",
|
||||
lambda route: route.fulfill(
|
||||
status=200,
|
||||
content_type="application/json",
|
||||
body='{"results": []}',
|
||||
),
|
||||
)
|
||||
|
||||
page.goto(f"{server_url}/live-stats")
|
||||
|
||||
# Wait for container rows to load
|
||||
page.wait_for_function(
|
||||
"document.querySelectorAll('#container-rows tr:not(.loading-row)').length > 0",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Wait for timer to start
|
||||
page.wait_for_function(
|
||||
"document.getElementById('refresh-timer')?.textContent?.includes('↻')",
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
|
||||
# Click dropdown to pause
|
||||
dropdown_label = page.locator(".dropdown label").first
|
||||
dropdown_label.click()
|
||||
page.wait_for_timeout(200)
|
||||
|
||||
assert page.locator("#refresh-timer").inner_text() == "❚❚"
|
||||
|
||||
# Close dropdown by pressing Escape or clicking elsewhere
|
||||
page.keyboard.press("Escape")
|
||||
page.wait_for_timeout(300) # Wait for focusout timeout (150ms) + buffer
|
||||
|
||||
# Verify refresh resumed
|
||||
timer_text = page.locator("#refresh-timer").inner_text()
|
||||
|
||||
assert timer_text != "❚❚", (
|
||||
f"Refresh should resume after closing dropdown. timer='{timer_text}'"
|
||||
)
|
||||
assert "↻" in timer_text, f"Timer should show countdown, got '{timer_text}'"
|
||||
|
||||
13
uv.lock
generated
13
uv.lock
generated
@@ -234,6 +234,7 @@ source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "asyncssh" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "rich" },
|
||||
{ name = "typer" },
|
||||
@@ -242,6 +243,7 @@ dependencies = [
|
||||
[package.optional-dependencies]
|
||||
web = [
|
||||
{ name = "fastapi", extra = ["standard"] },
|
||||
{ name = "humanize" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
@@ -270,8 +272,10 @@ dev = [
|
||||
requires-dist = [
|
||||
{ name = "asyncssh", specifier = ">=2.14.0" },
|
||||
{ name = "fastapi", extras = ["standard"], marker = "extra == 'web'", specifier = ">=0.109.0" },
|
||||
{ name = "humanize", marker = "extra == 'web'", specifier = ">=4.0.0" },
|
||||
{ name = "jinja2", marker = "extra == 'web'", specifier = ">=3.1.0" },
|
||||
{ name = "pydantic", specifier = ">=2.0.0" },
|
||||
{ name = "python-dotenv", specifier = ">=1.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0" },
|
||||
{ name = "rich", specifier = ">=13.0.0" },
|
||||
{ name = "typer", specifier = ">=0.9.0" },
|
||||
@@ -781,6 +785,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "humanize"
|
||||
version = "4.15.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/66/a3921783d54be8a6870ac4ccffcd15c4dc0dd7fcce51c6d63b8c63935276/humanize-4.15.0.tar.gz", hash = "sha256:1dd098483eb1c7ee8e32eb2e99ad1910baefa4b75c3aff3a82f4d78688993b10", size = 83599, upload-time = "2025-12-20T20:16:13.19Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/7b/bca5613a0c3b542420cf92bd5e5fb8ebd5435ce1011a091f66bb7693285e/humanize-4.15.0-py3-none-any.whl", hash = "sha256:b1186eb9f5a9749cd9cb8565aee77919dd7c8d076161cf44d70e59e3301e1769", size = 132203, upload-time = "2025-12-20T20:16:11.67Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "identify"
|
||||
version = "2.6.15"
|
||||
|
||||
@@ -16,6 +16,7 @@ extra_javascript = ["javascripts/video-fix.js"]
|
||||
nav = [
|
||||
{ "Home" = "index.md" },
|
||||
{ "Getting Started" = "getting-started.md" },
|
||||
{ "Docker Deployment" = "docker-deployment.md" },
|
||||
{ "Configuration" = "configuration.md" },
|
||||
{ "Commands" = "commands.md" },
|
||||
{ "Web UI" = "web-ui.md" },
|
||||
@@ -25,6 +26,7 @@ nav = [
|
||||
]
|
||||
|
||||
[project.theme]
|
||||
custom_dir = "docs/overrides"
|
||||
language = "en"
|
||||
|
||||
features = [
|
||||
@@ -80,6 +82,9 @@ repo = "lucide/github"
|
||||
[project.extra]
|
||||
generator = false
|
||||
|
||||
[project.extra.analytics]
|
||||
provider = "custom"
|
||||
|
||||
[[project.extra.social]]
|
||||
icon = "fontawesome/brands/github"
|
||||
link = "https://github.com/basnijholt/compose-farm"
|
||||
|
||||
Reference in New Issue
Block a user