mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd1b04297e | ||
|
|
4d65702868 | ||
|
|
596a05e39d | ||
|
|
e1a8ceb9e6 | ||
|
|
ed450c65e5 | ||
|
|
0f84864a06 | ||
|
|
9c72e0937a | ||
|
|
74cc2f3245 | ||
|
|
940bd9585a | ||
|
|
dd60af61a8 | ||
|
|
2f3720949b | ||
|
|
1e3b1d71ed | ||
|
|
c159549a9e | ||
|
|
d65f4cf7f4 | ||
|
|
7ce2067fcb | ||
|
|
f32057aa7b | ||
|
|
c3e3aeb538 | ||
|
|
009f3b1403 | ||
|
|
51f74eab42 | ||
|
|
4acf797128 | ||
|
|
d167da9d63 | ||
|
|
a5eac339db | ||
|
|
9f3813eb72 | ||
|
|
b9ae0ad4d5 | ||
|
|
ca2a4dd6d9 | ||
|
|
fafdce5736 | ||
|
|
6436becff9 | ||
|
|
3460d8a3ea | ||
|
|
8dabc27272 | ||
|
|
5e08f1d712 | ||
|
|
8302f1d97a | ||
|
|
eac9338352 | ||
|
|
667931dc80 | ||
|
|
5890221528 | ||
|
|
c8fc3c2496 | ||
|
|
ffb7a32402 | ||
|
|
beb1630fcf | ||
|
|
2af48b2642 | ||
|
|
f69993eac8 | ||
|
|
9bdcd143cf | ||
|
|
9230e12eb0 | ||
|
|
2a923e6e81 | ||
|
|
5f2e081298 | ||
|
|
6fbc7430cb | ||
|
|
6fdb43e1e9 | ||
|
|
620e797671 | ||
|
|
031a2af6f3 | ||
|
|
f69eed7721 | ||
|
|
5a1fd4e29f | ||
|
|
26dea691ca | ||
|
|
56d64bfe7a | ||
|
|
5ddbdcdf9e | ||
|
|
dd16becad1 | ||
|
|
df683a223f | ||
|
|
fdb00e7655 | ||
|
|
90657a025f | ||
|
|
7ae8ea0229 | ||
|
|
612242eea9 | ||
|
|
ea650bff8a | ||
|
|
140bca4fd6 | ||
|
|
6dad6be8da | ||
|
|
d7f931e301 | ||
|
|
471936439e | ||
|
|
36e4bef46d | ||
|
|
2cac0bf263 | ||
|
|
3d07cbdff0 | ||
|
|
0f67c17281 | ||
|
|
bd22a1a55e | ||
|
|
cc54e89b33 | ||
|
|
f71e5cffd6 | ||
|
|
0e32729763 | ||
|
|
b0b501fa98 | ||
|
|
7e00596046 | ||
|
|
d1e4d9b05c | ||
|
|
3fbae630f9 | ||
|
|
3e3c919714 | ||
|
|
59b797a89d | ||
|
|
7caf006e07 | ||
|
|
45040b75f1 | ||
|
|
fa1c5c1044 | ||
|
|
67e832f687 | ||
|
|
da986fab6a | ||
|
|
5dd6e2ca05 | ||
|
|
16435065de | ||
|
|
5921b5e405 | ||
|
|
f0cd85b5f5 | ||
|
|
fe95443733 | ||
|
|
8df9288156 |
6
.envrc.example
Normal file
6
.envrc.example
Normal file
@@ -0,0 +1,6 @@
|
||||
# Run containers as current user (preserves file ownership on NFS mounts)
|
||||
# Copy this file to .envrc and run: direnv allow
|
||||
export CF_UID=$(id -u)
|
||||
export CF_GID=$(id -g)
|
||||
export CF_HOME=$HOME
|
||||
export CF_USER=$USER
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
run: uv run playwright install chromium --with-deps
|
||||
|
||||
- name: Run browser tests
|
||||
run: uv run pytest -m browser -v --no-cov
|
||||
run: uv run pytest -m browser -n auto -v
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
21
.github/workflows/docker.yml
vendored
21
.github/workflows/docker.yml
vendored
@@ -68,16 +68,35 @@ jobs:
|
||||
echo "✗ Timeout waiting for PyPI"
|
||||
exit 1
|
||||
|
||||
- name: Check if latest release
|
||||
id: latest
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
# Get latest release tag from GitHub (strip 'v' prefix)
|
||||
LATEST=$(gh release view --json tagName -q '.tagName' | sed 's/^v//')
|
||||
echo "Building version: $VERSION"
|
||||
echo "Latest release: $LATEST"
|
||||
if [ "$VERSION" = "$LATEST" ]; then
|
||||
echo "is_latest=true" >> $GITHUB_OUTPUT
|
||||
echo "✓ This is the latest release, will tag as :latest"
|
||||
else
|
||||
echo "is_latest=false" >> $GITHUB_OUTPUT
|
||||
echo "⚠ This is NOT the latest release, skipping :latest tag"
|
||||
fi
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Only tag as 'latest' if this is the latest release (prevents re-runs of old releases from overwriting)
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ steps.version.outputs.version }}
|
||||
type=semver,pattern={{major}},value=v${{ steps.version.outputs.version }}
|
||||
type=raw,value=latest
|
||||
type=raw,value=latest,enable=${{ steps.latest.outputs.is_latest }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
|
||||
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Upload artifact
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
path: "./site"
|
||||
|
||||
|
||||
4
.github/workflows/update-readme.yml
vendored
4
.github/workflows/update-readme.yml
vendored
@@ -26,7 +26,9 @@ jobs:
|
||||
env:
|
||||
TERM: dumb
|
||||
NO_COLOR: 1
|
||||
TERMINAL_WIDTH: 90
|
||||
COLUMNS: 90 # POSIX terminal width for Rich
|
||||
TERMINAL_WIDTH: 90 # Typer MAX_WIDTH for help panels
|
||||
_TYPER_FORCE_DISABLE_TERMINAL: 1 # Prevent Typer forcing terminal mode in CI
|
||||
run: |
|
||||
uvx --with . markdown-code-runner README.md
|
||||
sed -i 's/[[:space:]]*$//' README.md
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -37,6 +37,7 @@ ENV/
|
||||
.coverage
|
||||
.pytest_cache/
|
||||
htmlcov/
|
||||
.code/
|
||||
|
||||
# Local config (don't commit real configs)
|
||||
compose-farm.yaml
|
||||
@@ -44,3 +45,5 @@ compose-farm.yaml
|
||||
coverage.xml
|
||||
.env
|
||||
homepage/
|
||||
site/
|
||||
.playwright-mcp/
|
||||
|
||||
@@ -21,7 +21,7 @@ repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
- id: ruff-check
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
|
||||
@@ -1,94 +1,119 @@
|
||||
Review all documentation in this repository for accuracy, completeness, and consistency. Cross-reference documentation against the actual codebase to identify issues.
|
||||
Review documentation for accuracy, completeness, and consistency. Focus on things that require judgment—automated checks handle the rest.
|
||||
|
||||
## Scope
|
||||
## What's Already Automated
|
||||
|
||||
Review all documentation files:
|
||||
- docs/*.md (primary documentation)
|
||||
- README.md (repository landing page)
|
||||
- CLAUDE.md (development guidelines)
|
||||
- examples/README.md (example configurations)
|
||||
Don't waste time on these—CI and pre-commit hooks handle them:
|
||||
|
||||
## Review Checklist
|
||||
- **README help output**: `markdown-code-runner` regenerates `cf --help` blocks in CI
|
||||
- **README command table**: Pre-commit hook verifies commands are listed
|
||||
- **Linting/formatting**: Handled by pre-commit
|
||||
|
||||
### 1. Command Documentation
|
||||
## What This Review Is For
|
||||
|
||||
For each documented command, verify against the CLI source code:
|
||||
Focus on things that require judgment:
|
||||
|
||||
- Command exists in codebase
|
||||
- All options are documented with correct names, types, and defaults
|
||||
- Short options (-x) match long options (--xxx)
|
||||
- Examples would work as written
|
||||
- Check for undocumented commands or options
|
||||
1. **Accuracy**: Does the documentation match what the code actually does?
|
||||
2. **Completeness**: Are there undocumented features, options, or behaviors?
|
||||
3. **Clarity**: Would a new user understand this? Are examples realistic?
|
||||
4. **Consistency**: Do different docs contradict each other?
|
||||
5. **Freshness**: Has the code changed in ways the docs don't reflect?
|
||||
|
||||
Run `--help` for each command to verify.
|
||||
## Review Process
|
||||
|
||||
### 2. Configuration Documentation
|
||||
### 1. Check Recent Changes
|
||||
|
||||
Verify against Pydantic models in the config module:
|
||||
```bash
|
||||
# What changed recently that might need doc updates?
|
||||
git log --oneline -20 | grep -iE "feat|fix|add|remove|change|option"
|
||||
|
||||
- All config keys are documented
|
||||
- Types match Pydantic field types
|
||||
- Required vs optional fields are correct
|
||||
- Default values are accurate
|
||||
- Config file search order matches code
|
||||
- Example YAML is valid and uses current schema
|
||||
# What code files changed?
|
||||
git diff --name-only HEAD~20 | grep "\.py$"
|
||||
```
|
||||
|
||||
### 3. Architecture Documentation
|
||||
Look for new features, changed defaults, renamed options, or removed functionality.
|
||||
|
||||
Verify against actual directory structure:
|
||||
### 2. Verify docs/commands.md Options Tables
|
||||
|
||||
- File paths match actual source code location
|
||||
- All modules listed actually exist
|
||||
- No modules are missing from the list
|
||||
- Component descriptions match code functionality
|
||||
- CLI module list includes all command files
|
||||
The README auto-updates help output, but `docs/commands.md` has **manually maintained options tables**. These can drift.
|
||||
|
||||
### 4. State and Data Files
|
||||
For each command's options table, compare against `cf <command> --help`:
|
||||
- Are all options listed?
|
||||
- Are short flags correct?
|
||||
- Are defaults accurate?
|
||||
- Are descriptions accurate?
|
||||
|
||||
Verify against state and path modules:
|
||||
**Pay special attention to subcommands** (`cf config *`, `cf ssh *`)—these have their own options that are easy to miss.
|
||||
|
||||
- State file name and location are correct
|
||||
- State file format matches actual structure
|
||||
- Log file name and location are correct
|
||||
- What triggers state/log updates is accurate
|
||||
### 3. Verify docs/configuration.md
|
||||
|
||||
### 5. Installation Documentation
|
||||
Compare against Pydantic models in the source:
|
||||
|
||||
Verify against pyproject.toml:
|
||||
```bash
|
||||
# Find the config models
|
||||
grep -r "class.*BaseModel" src/ --include="*.py" -A 15
|
||||
```
|
||||
|
||||
- Python version requirement matches requires-python
|
||||
- Package name is correct
|
||||
- Optional dependencies are documented
|
||||
- CLI entry points are mentioned
|
||||
- Installation methods work as documented
|
||||
Check:
|
||||
- All config keys documented
|
||||
- Types and defaults match code
|
||||
- Config file search order is accurate
|
||||
- Example YAML would actually work
|
||||
|
||||
### 6. Feature Claims
|
||||
### 4. Verify docs/architecture.md and CLAUDE.md
|
||||
|
||||
For each claimed feature, verify it exists and works as described.
|
||||
```bash
|
||||
# What source files actually exist?
|
||||
git ls-files "src/**/*.py"
|
||||
```
|
||||
|
||||
### 7. Cross-Reference Consistency
|
||||
Check **both** `docs/architecture.md` and `CLAUDE.md` (Architecture section):
|
||||
- Listed files exist
|
||||
- No files are missing from the list
|
||||
- Descriptions match what the code does
|
||||
|
||||
Check for conflicts between documentation files:
|
||||
Both files have architecture listings that can drift independently.
|
||||
|
||||
- README vs docs/index.md (should be consistent)
|
||||
- CLAUDE.md vs actual code structure
|
||||
- Command tables match across files
|
||||
- Config examples are consistent
|
||||
### 5. Check Examples
|
||||
|
||||
For examples in any doc:
|
||||
- Would the YAML/commands actually work?
|
||||
- Are service names, paths, and options realistic?
|
||||
- Do examples use current syntax (not deprecated options)?
|
||||
|
||||
### 6. Cross-Reference Consistency
|
||||
|
||||
The same info appears in multiple places. Check for conflicts:
|
||||
- README.md vs docs/index.md
|
||||
- docs/commands.md vs CLAUDE.md command tables
|
||||
- Config examples across different docs
|
||||
|
||||
### 7. Self-Check This Prompt
|
||||
|
||||
This prompt can become outdated too. If you notice:
|
||||
- New automated checks that should be listed above
|
||||
- New doc files that need review guidelines
|
||||
- Patterns that caused issues
|
||||
|
||||
Include prompt updates in your fixes.
|
||||
|
||||
## Output Format
|
||||
|
||||
Provide findings in these categories:
|
||||
Categorize findings:
|
||||
|
||||
1. **Critical Issues**: Incorrect information that would cause user problems
|
||||
2. **Inaccuracies**: Technical errors, wrong defaults, incorrect paths
|
||||
3. **Missing Documentation**: Features/commands that exist but aren't documented
|
||||
4. **Outdated Content**: Information that was once true but no longer is
|
||||
5. **Inconsistencies**: Conflicts between different documentation files
|
||||
6. **Minor Issues**: Typos, formatting, unclear wording
|
||||
7. **Verified Accurate**: Sections confirmed to be correct
|
||||
1. **Critical**: Wrong info that would break user workflows
|
||||
2. **Inaccuracy**: Technical errors (wrong defaults, paths, types)
|
||||
3. **Missing**: Undocumented features or options
|
||||
4. **Outdated**: Was true, no longer is
|
||||
5. **Inconsistency**: Docs contradict each other
|
||||
6. **Minor**: Typos, unclear wording
|
||||
|
||||
For each issue, include:
|
||||
- File path and line number (if applicable)
|
||||
- What the documentation says
|
||||
- What the code actually does
|
||||
- Suggested fix
|
||||
For each issue, provide a ready-to-apply fix:
|
||||
|
||||
```
|
||||
### Issue: [Brief description]
|
||||
|
||||
- **File**: docs/commands.md:652
|
||||
- **Problem**: `cf ssh setup` has `--config` option but it's not documented
|
||||
- **Fix**: Add `--config, -c PATH` to the options table
|
||||
- **Verify**: `cf ssh setup --help`
|
||||
```
|
||||
|
||||
79
.prompts/duplication-audit.md
Normal file
79
.prompts/duplication-audit.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Duplication audit and generalization prompt
|
||||
|
||||
You are a coding agent working inside a repository. Your job is to find duplicated
|
||||
functionality (not just identical code) and propose a minimal, safe generalization.
|
||||
Keep it simple and avoid adding features.
|
||||
|
||||
## First steps
|
||||
|
||||
- Read project-specific instructions (AGENTS.md, CONTRIBUTING.md, or similar) and follow them.
|
||||
- If instructions mention tooling or style (e.g., preferred search tools), use those.
|
||||
- Ask a brief clarification if the request is ambiguous (for example: report only vs refactor).
|
||||
|
||||
## Objective
|
||||
|
||||
Identify and consolidate duplicated functionality across the codebase. Duplication includes:
|
||||
- Multiple functions that parse or validate the same data in slightly different ways
|
||||
- Repeated file reads or config parsing
|
||||
- Similar command building or subprocess execution paths
|
||||
- Near-identical error handling or logging patterns
|
||||
- Repeated data transforms that can become a shared helper
|
||||
|
||||
The goal is to propose a general, reusable abstraction that reduces duplication while
|
||||
preserving behavior. Keep changes minimal and easy to review.
|
||||
|
||||
## Search strategy
|
||||
|
||||
1) Map the hot paths
|
||||
- Scan entry points (CLI, web handlers, tasks, jobs) to see what they do repeatedly.
|
||||
- Look for cross-module patterns: same steps, different files.
|
||||
|
||||
2) Find duplicate operations
|
||||
- Use fast search tools (prefer `rg`) to find repeated keywords and patterns.
|
||||
- Check for repeated YAML/JSON parsing, env interpolation, file IO, command building,
|
||||
data validation, or response formatting.
|
||||
|
||||
3) Validate duplication is real
|
||||
- Confirm the functional intent matches (not just similar code).
|
||||
- Note any subtle differences that must be preserved.
|
||||
|
||||
4) Propose a minimal generalization
|
||||
- Suggest a shared helper, utility, or wrapper.
|
||||
- Avoid over-engineering. If only two call sites exist, keep the helper small.
|
||||
- Prefer pure functions and centralized IO if that already exists.
|
||||
|
||||
## Deliverables
|
||||
|
||||
Provide a concise report with:
|
||||
|
||||
1) Findings
|
||||
- List duplicated behaviors with file references and a short description of the
|
||||
shared functionality.
|
||||
- Explain why these are functionally the same (or nearly the same).
|
||||
|
||||
2) Proposed generalizations
|
||||
- For each duplication, propose a shared helper and where it should live.
|
||||
- Outline any behavior differences that need to be parameterized.
|
||||
|
||||
3) Impact and risk
|
||||
- Note any behavior risks, test needs, or migration steps.
|
||||
|
||||
If the user asked you to implement changes:
|
||||
- Make only the minimal edits needed to dedupe behavior.
|
||||
- Keep the public API stable unless explicitly requested.
|
||||
- Add small comments only when the logic is non-obvious.
|
||||
- Summarize what changed and why.
|
||||
|
||||
## Output format
|
||||
|
||||
- Start with a short summary of the top 1-3 duplications.
|
||||
- Then provide a list of findings, ordered by impact.
|
||||
- Include a small proposed refactor plan (step-by-step, no more than 5 steps).
|
||||
- End with any questions or assumptions.
|
||||
|
||||
## Guardrails
|
||||
|
||||
- Do not add new features or change behavior beyond deduplication.
|
||||
- Avoid deep refactors without explicit request.
|
||||
- Preserve existing style conventions and import rules.
|
||||
- If a duplication is better left alone (e.g., clarity, single usage), say so.
|
||||
16
.prompts/pr-review.md
Normal file
16
.prompts/pr-review.md
Normal file
@@ -0,0 +1,16 @@
|
||||
Review the pull request for:
|
||||
|
||||
- **Code cleanliness**: Is the implementation clean and well-structured?
|
||||
- **DRY principle**: Does it avoid duplication?
|
||||
- **Code reuse**: Are there parts that should be reused from other places?
|
||||
- **Organization**: Is everything in the right place?
|
||||
- **Consistency**: Is it in the same style as other parts of the codebase?
|
||||
- **Simplicity**: Is it not over-engineered? Remember KISS and YAGNI. No dead code paths and NO defensive programming.
|
||||
- **No pointless wrappers**: Identify functions/methods that just call another function and return its result. Callers should call the underlying function directly instead of going through unnecessary indirection.
|
||||
- **User experience**: Does it provide a good user experience?
|
||||
- **PR**: Is the PR description and title clear and informative?
|
||||
- **Tests**: Are there tests, and do they cover the changes adequately? Are they testing something meaningful or are they just trivial?
|
||||
- **Live tests**: Test the changes in a REAL live environment to ensure they work as expected, use the config in `/opt/stacks/compose-farm.yaml`.
|
||||
- **Rules**: Does the code follow the project's coding standards and guidelines as laid out in @CLAUDE.md?
|
||||
|
||||
Look at `git diff origin/main..HEAD` for the changes made in this pull request.
|
||||
51
.prompts/update-demos.md
Normal file
51
.prompts/update-demos.md
Normal file
@@ -0,0 +1,51 @@
|
||||
Update demo recordings to match the current compose-farm.yaml configuration.
|
||||
|
||||
## Key Gotchas
|
||||
|
||||
1. **Never `git checkout` without asking** - check for uncommitted changes first
|
||||
2. **Prefer `nas` stacks** - demos run locally on nas, SSH adds latency
|
||||
3. **Terminal captures keyboard** - use `blur()` to release focus before command palette
|
||||
4. **Clicking sidebar navigates away** - clicking h1 scrolls to top
|
||||
5. **Buttons have icons, not text** - use `[data-tip="..."]` selectors
|
||||
6. **`record.py` auto-restores config** - no manual cleanup needed after CLI demos
|
||||
|
||||
## Stacks Used in Demos
|
||||
|
||||
| Stack | CLI Demos | Web Demos | Notes |
|
||||
|-------|-----------|-----------|-------|
|
||||
| `audiobookshelf` | quickstart, migration, apply | - | Migrates nas→anton |
|
||||
| `grocy` | update | navigation, stack, workflow, console | - |
|
||||
| `immich` | logs, compose | shell | Multiple containers |
|
||||
| `dozzle` | - | workflow | - |
|
||||
|
||||
## CLI Demos
|
||||
|
||||
**Files:** `docs/demos/cli/*.tape`
|
||||
|
||||
Check:
|
||||
- `quickstart.tape`: `bat -r` line ranges match current config structure
|
||||
- `migration.tape`: nvim keystrokes work, stack exists on nas
|
||||
- `compose.tape`: exec commands produce meaningful output
|
||||
|
||||
Run: `python docs/demos/cli/record.py [demo]`
|
||||
|
||||
## Web Demos
|
||||
|
||||
**Files:** `docs/demos/web/demo_*.py`
|
||||
|
||||
Check:
|
||||
- Stack names in demos still exist in config
|
||||
- Selectors match current templates (grep for IDs in `templates/`)
|
||||
- Shell demo uses command palette for ALL navigation
|
||||
|
||||
Run: `python docs/demos/web/record.py [demo]`
|
||||
|
||||
## Before Recording
|
||||
|
||||
```bash
|
||||
# Check for uncommitted config changes
|
||||
git -C /opt/stacks diff compose-farm.yaml
|
||||
|
||||
# Verify stacks are running
|
||||
cf ps audiobookshelf grocy immich dozzle
|
||||
```
|
||||
63
CLAUDE.md
63
CLAUDE.md
@@ -15,20 +15,22 @@ src/compose_farm/
|
||||
│ ├── app.py # Shared Typer app instance, version callback
|
||||
│ ├── common.py # Shared helpers, options, progress bar utilities
|
||||
│ ├── config.py # Config subcommand (init, show, path, validate, edit, symlink)
|
||||
│ ├── lifecycle.py # up, down, pull, restart, update, apply commands
|
||||
│ ├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose commands
|
||||
│ ├── management.py # refresh, check, init-network, traefik-file commands
|
||||
│ ├── monitoring.py # logs, ps, stats commands
|
||||
│ ├── monitoring.py # logs, ps, stats, list commands
|
||||
│ ├── ssh.py # SSH key management (setup, status, keygen)
|
||||
│ └── web.py # Web UI server command
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── compose.py # Compose file parsing (.env, ports, volumes, networks)
|
||||
├── config.py # Pydantic models, YAML loading
|
||||
├── console.py # Shared Rich console instances
|
||||
├── executor.py # SSH/local command execution, streaming output
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── state.py # Deployment state tracking (which stack on which host)
|
||||
├── glances.py # Glances API integration for host resource stats
|
||||
├── logs.py # Image digest snapshots (dockerfarm-log.toml)
|
||||
├── operations.py # Business logic (up, migrate, discover, preflight checks)
|
||||
├── paths.py # Path utilities, config file discovery
|
||||
├── registry.py # Container registry client for update checking
|
||||
├── ssh_keys.py # SSH key path constants and utilities
|
||||
├── state.py # Deployment state tracking (which stack on which host)
|
||||
├── traefik.py # Traefik file-provider config generation from labels
|
||||
└── web/ # Web UI (FastAPI + HTMX)
|
||||
```
|
||||
@@ -58,22 +60,37 @@ Icons use [Lucide](https://lucide.dev/). Add new icons as macros in `web/templat
|
||||
|
||||
- **Imports at top level**: Never add imports inside functions unless they are explicitly marked with `# noqa: PLC0415` and a comment explaining it speeds up CLI startup. Heavy modules like `pydantic`, `yaml`, and `rich.table` are lazily imported to keep `cf --help` fast.
|
||||
|
||||
## Development Commands
|
||||
|
||||
Use `just` for common tasks. Run `just` to list available commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `just install` | Install dev dependencies |
|
||||
| `just test` | Run all tests |
|
||||
| `just test-cli` | Run CLI tests (parallel) |
|
||||
| `just test-web` | Run web UI tests (parallel) |
|
||||
| `just lint` | Lint, format, and type check |
|
||||
| `just web` | Start web UI (port 9001) |
|
||||
| `just doc` | Build and serve docs (port 9002) |
|
||||
| `just clean` | Clean build artifacts |
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with `uv run pytest`. Browser tests require Chromium (system-installed or via `playwright install chromium`):
|
||||
Run tests with `just test` or `uv run pytest`. Browser tests require Chromium (system-installed or via `playwright install chromium`):
|
||||
|
||||
```bash
|
||||
# Unit tests only (skip browser tests, can parallelize)
|
||||
# Unit tests only (parallel)
|
||||
uv run pytest -m "not browser" -n auto
|
||||
|
||||
# Browser tests only (run sequentially, no coverage)
|
||||
uv run pytest -m browser --no-cov
|
||||
# Browser tests only (parallel)
|
||||
uv run pytest -m browser -n auto
|
||||
|
||||
# All tests
|
||||
uv run pytest --no-cov
|
||||
uv run pytest
|
||||
```
|
||||
|
||||
Browser tests are marked with `@pytest.mark.browser`. They use Playwright to test HTMX behavior, JavaScript functionality (sidebar filter, command palette, terminals), and content stability during navigation. Run sequentially (no `-n`) to avoid resource contention.
|
||||
Browser tests are marked with `@pytest.mark.browser`. They use Playwright to test HTMX behavior, JavaScript functionality (sidebar filter, command palette, terminals), and content stability during navigation.
|
||||
|
||||
## Communication Notes
|
||||
|
||||
@@ -85,6 +102,17 @@ Browser tests are marked with `@pytest.mark.browser`. They use Playwright to tes
|
||||
- **NEVER merge anything into main.** Always commit directly or use fast-forward/rebase.
|
||||
- Never force push.
|
||||
|
||||
## SSH Agent in Remote Sessions
|
||||
|
||||
When pushing to GitHub via SSH fails with "Permission denied (publickey)", fix the SSH agent socket:
|
||||
|
||||
```bash
|
||||
# Find and set the correct SSH agent socket
|
||||
SSH_AUTH_SOCK=$(ls -t ~/.ssh/agent/s.*.sshd.* 2>/dev/null | head -1) git push origin branch-name
|
||||
```
|
||||
|
||||
This is needed because the SSH agent socket path changes between sessions.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- Never include unchecked checklists (e.g., `- [ ] ...`) in PR descriptions. Either omit the checklist or use checked items.
|
||||
@@ -95,6 +123,10 @@ Browser tests are marked with `@pytest.mark.browser`. They use Playwright to tes
|
||||
Use `gh release create` to create releases. The tag is created automatically.
|
||||
|
||||
```bash
|
||||
# IMPORTANT: Ensure you're on latest origin/main before releasing!
|
||||
git fetch origin
|
||||
git checkout origin/main
|
||||
|
||||
# Check current version
|
||||
git tag --sort=-v:refname | head -1
|
||||
|
||||
@@ -116,17 +148,20 @@ CLI available as `cf` or `compose-farm`.
|
||||
|---------|-------------|
|
||||
| `up` | Start stacks (`docker compose up -d`), auto-migrates if host changed |
|
||||
| `down` | Stop stacks (`docker compose down`). Use `--orphaned` to stop stacks removed from config |
|
||||
| `stop` | Stop services without removing containers (`docker compose stop`) |
|
||||
| `pull` | Pull latest images |
|
||||
| `restart` | `down` + `up -d` |
|
||||
| `update` | `pull` + `build` + `down` + `up -d` |
|
||||
| `restart` | Restart running containers (`docker compose restart`) |
|
||||
| `update` | Pull, build, recreate only if changed (`up -d --pull always --build`) |
|
||||
| `apply` | Make reality match config: migrate stacks + stop orphans. Use `--dry-run` to preview |
|
||||
| `compose` | Run any docker compose command on a stack (passthrough) |
|
||||
| `logs` | Show stack logs |
|
||||
| `ps` | Show status of all stacks |
|
||||
| `stats` | Show overview (hosts, stacks, pending migrations; `--live` for container counts) |
|
||||
| `list` | List stacks and hosts (`--simple` for scripting, `--host` to filter) |
|
||||
| `refresh` | Update state from reality: discover running stacks, capture image digests |
|
||||
| `check` | Validate config, traefik labels, mounts, networks; show host compatibility |
|
||||
| `init-network` | Create Docker network on hosts with consistent subnet/gateway |
|
||||
| `traefik-file` | Generate Traefik file-provider config from compose labels |
|
||||
| `config` | Manage config files (init, show, path, validate, edit, symlink) |
|
||||
| `config` | Manage config files (init, init-env, show, path, validate, edit, symlink) |
|
||||
| `ssh` | Manage SSH keys (setup, status, keygen) |
|
||||
| `web` | Start web UI server |
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@@ -16,5 +16,13 @@ RUN apk add --no-cache openssh-client
|
||||
COPY --from=builder /root/.local/share/uv/tools/compose-farm /root/.local/share/uv/tools/compose-farm
|
||||
COPY --from=builder /usr/local/bin/cf /usr/local/bin/compose-farm /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["cf"]
|
||||
# Allow non-root users to access the installed tool
|
||||
# (required when running with user: "${CF_UID:-0}:${CF_GID:-0}")
|
||||
RUN chmod 755 /root
|
||||
|
||||
# Allow non-root users to add passwd entries (required for SSH)
|
||||
RUN chmod 666 /etc/passwd
|
||||
|
||||
# Entrypoint creates /etc/passwd entry for non-root UIDs (required for SSH)
|
||||
ENTRYPOINT ["sh", "-c", "[ $(id -u) != 0 ] && echo ${USER:-u}:x:$(id -u):$(id -g)::${HOME:-/}:/bin/sh >> /etc/passwd; exec cf \"$@\"", "--"]
|
||||
CMD ["--help"]
|
||||
|
||||
685
README.md
685
README.md
@@ -5,12 +5,31 @@
|
||||
[](LICENSE)
|
||||
[](https://github.com/basnijholt/compose-farm/stargazers)
|
||||
|
||||
<img src="http://files.nijho.lt/compose-farm.png" align="right" style="width: 300px;" />
|
||||
<img src="https://files.nijho.lt/compose-farm.png" alt="Compose Farm logo" align="right" style="width: 300px;" />
|
||||
|
||||
A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
|
||||
> [!NOTE]
|
||||
> Run `docker compose` commands across multiple hosts via SSH. One YAML maps stacks to hosts. Run `cf apply` and reality matches your config—stacks start, migrate, or stop as needed. No Kubernetes, no Swarm, no magic.
|
||||
> Agentless multi-host Docker Compose. CLI-first with a web UI. Your files stay as plain folders—version-controllable, no lock-in. Run `cf apply` and reality matches your config.
|
||||
|
||||
**Why Compose Farm?**
|
||||
- **Your files, your control** — Plain folders + YAML, not locked in Portainer. Version control everything.
|
||||
- **Agentless** — Just SSH, no agents to deploy (unlike [Dockge](https://github.com/louislam/dockge)).
|
||||
- **Zero changes required** — Existing compose files work as-is.
|
||||
- **Grows with you** — Start single-host, scale to multi-host seamlessly.
|
||||
- **Declarative** — Change config, run `cf apply`, reality matches.
|
||||
|
||||
## Quick Demo
|
||||
|
||||
**CLI:**
|
||||
|
||||

|
||||
|
||||
**Web UI:**
|
||||
|
||||

|
||||
|
||||
## Table of Contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
@@ -24,17 +43,21 @@ A minimal CLI tool to run Docker Compose commands across multiple hosts via SSH.
|
||||
- [What Compose Farm doesn't do](#what-compose-farm-doesnt-do)
|
||||
- [Installation](#installation)
|
||||
- [SSH Authentication](#ssh-authentication)
|
||||
- [SSH Agent (default)](#ssh-agent-default)
|
||||
- [Dedicated SSH Key (recommended for Docker/Web UI)](#dedicated-ssh-key-recommended-for-dockerweb-ui)
|
||||
- [SSH Agent](#ssh-agent)
|
||||
- [Dedicated SSH Key (default for Docker)](#dedicated-ssh-key-default-for-docker)
|
||||
- [Configuration](#configuration)
|
||||
- [Single-host example](#single-host-example)
|
||||
- [Multi-host example](#multi-host-example)
|
||||
- [Multi-Host Stacks](#multi-host-stacks)
|
||||
- [Config Command](#config-command)
|
||||
- [Usage](#usage)
|
||||
- [Docker Compose Commands](#docker-compose-commands)
|
||||
- [Compose Farm Commands](#compose-farm-commands)
|
||||
- [Aliases](#aliases)
|
||||
- [CLI `--help` Output](#cli---help-output)
|
||||
- [Auto-Migration](#auto-migration)
|
||||
- [Traefik Multihost Ingress (File Provider)](#traefik-multihost-ingress-file-provider)
|
||||
- [Host Resource Monitoring (Glances)](#host-resource-monitoring-glances)
|
||||
- [Comparison with Alternatives](#comparison-with-alternatives)
|
||||
- [License](#license)
|
||||
|
||||
@@ -143,7 +166,7 @@ If you need containers on different hosts to communicate seamlessly, you need Do
|
||||
|
||||
```bash
|
||||
# One-liner (installs uv if needed)
|
||||
curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
|
||||
# Or if you already have uv/pip
|
||||
uv tool install compose-farm
|
||||
@@ -165,15 +188,33 @@ docker run --rm \
|
||||
ghcr.io/basnijholt/compose-farm up --all
|
||||
```
|
||||
|
||||
**Running as non-root user** (recommended for NFS mounts):
|
||||
|
||||
By default, containers run as root. To preserve file ownership on mounted volumes
|
||||
(e.g., `compose-farm-state.yaml`, config edits), set these environment variables:
|
||||
|
||||
```bash
|
||||
# Add to .env file (one-time setup)
|
||||
echo "CF_UID=$(id -u)" >> .env
|
||||
echo "CF_GID=$(id -g)" >> .env
|
||||
echo "CF_HOME=$HOME" >> .env
|
||||
echo "CF_USER=$USER" >> .env
|
||||
```
|
||||
|
||||
Or use [direnv](https://direnv.net/) (copies `.envrc.example` to `.envrc`):
|
||||
```bash
|
||||
cp .envrc.example .envrc && direnv allow
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## SSH Authentication
|
||||
|
||||
Compose Farm uses SSH to run commands on remote hosts. There are two authentication methods:
|
||||
|
||||
### SSH Agent (default)
|
||||
### SSH Agent
|
||||
|
||||
Works out of the box if you have an SSH agent running with your keys loaded:
|
||||
Works out of the box when running locally if you have an SSH agent running with your keys loaded:
|
||||
|
||||
```bash
|
||||
# Verify your agent has keys
|
||||
@@ -183,9 +224,9 @@ ssh-add -l
|
||||
cf up --all
|
||||
```
|
||||
|
||||
### Dedicated SSH Key (recommended for Docker/Web UI)
|
||||
### Dedicated SSH Key (default for Docker)
|
||||
|
||||
When running compose-farm in Docker, the SSH agent connection can be lost (e.g., after container restart). The `cf ssh` command sets up a dedicated key that persists:
|
||||
When running in Docker, SSH agent sockets are ephemeral and can be lost after container restarts. The `cf ssh` command sets up a dedicated key that persists:
|
||||
|
||||
```bash
|
||||
# Generate key and copy to all configured hosts
|
||||
@@ -204,15 +245,22 @@ When running in Docker, mount a volume to persist the SSH keys. Choose ONE optio
|
||||
**Option 1: Host path (default)** - keys at `~/.ssh/compose-farm/id_ed25519`
|
||||
```yaml
|
||||
volumes:
|
||||
- ~/.ssh/compose-farm:/root/.ssh
|
||||
- ~/.ssh/compose-farm:${CF_HOME:-/root}/.ssh
|
||||
```
|
||||
|
||||
**Option 2: Named volume** - managed by Docker
|
||||
```yaml
|
||||
volumes:
|
||||
- cf-ssh:/root/.ssh
|
||||
- cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
```
|
||||
|
||||
**Option 3: SSH agent forwarding** - if you prefer using your host's ssh-agent
|
||||
```yaml
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
```
|
||||
Note: Requires `SSH_AUTH_SOCK` environment variable to be set. The socket path is ephemeral and changes across sessions.
|
||||
|
||||
Run setup once after starting the container (while the SSH agent still works):
|
||||
|
||||
```bash
|
||||
@@ -221,11 +269,13 @@ docker compose exec web cf ssh setup
|
||||
|
||||
The keys will persist across restarts.
|
||||
|
||||
**Note:** When running as non-root (with `CF_UID`/`CF_GID`), set `CF_HOME` to your home directory so SSH finds the keys at the correct path.
|
||||
|
||||
</details>
|
||||
|
||||
## Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml` (or `./compose-farm.yaml` in your working directory):
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands (e.g., `/opt/stacks`). This keeps config near your stacks. Alternatively, use `~/.config/compose-farm/compose-farm.yaml` for a global config, or symlink from one to the other with `cf config symlink`.
|
||||
|
||||
### Single-host example
|
||||
|
||||
@@ -258,7 +308,7 @@ hosts:
|
||||
stacks:
|
||||
plex: server-1
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
grafana: server-1
|
||||
|
||||
# Multi-host stacks (run on multiple/all hosts)
|
||||
autokuma: all # Runs on ALL configured hosts
|
||||
@@ -316,23 +366,49 @@ Use `cf config init` to get started with a fully documented template.
|
||||
|
||||
The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
These wrap `docker compose` with multi-host superpowers:
|
||||
|
||||
| Command | Wraps | Compose Farm Additions |
|
||||
|---------|-------|------------------------|
|
||||
| `cf up` | `up -d` | `--all`, `--host`, parallel execution, auto-migration |
|
||||
| `cf down` | `down` | `--all`, `--host`, `--orphaned`, state tracking |
|
||||
| `cf stop` | `stop` | `--all`, `--service` |
|
||||
| `cf restart` | `restart` | `--all`, `--service` |
|
||||
| `cf pull` | `pull` | `--all`, `--service`, parallel execution |
|
||||
| `cf logs` | `logs` | `--all`, `--host`, multi-stack output |
|
||||
| `cf ps` | `ps` | `--all`, `--host`, unified cross-host view |
|
||||
| `cf compose` | any | passthrough for commands not listed above |
|
||||
|
||||
### Compose Farm Commands
|
||||
|
||||
Multi-host orchestration that Docker Compose can't do:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| **`cf apply`** | **Make reality match config (start + migrate + stop orphans)** |
|
||||
| `cf up <stack>` | Start stack (auto-migrates if host changed) |
|
||||
| `cf down <stack>` | Stop stack |
|
||||
| `cf restart <stack>` | down + up |
|
||||
| `cf update <stack>` | pull + build + down + up |
|
||||
| `cf pull <stack>` | Pull latest images |
|
||||
| `cf logs -f <stack>` | Follow logs |
|
||||
| `cf ps` | Show status of all stacks |
|
||||
| `cf refresh` | Update state from running stacks |
|
||||
| **`cf apply`** | **Reconcile: start missing, migrate moved, stop orphans** |
|
||||
| `cf update` | Shorthand for `up --pull --build` |
|
||||
| `cf refresh` | Sync state from what's actually running |
|
||||
| `cf check` | Validate config, mounts, networks |
|
||||
| `cf init-network` | Create Docker network on hosts |
|
||||
| `cf init-network` | Create Docker network on all hosts |
|
||||
| `cf traefik-file` | Generate Traefik file-provider config |
|
||||
| `cf config <cmd>` | Manage config files (init, show, path, validate, edit, symlink) |
|
||||
| `cf config` | Manage config files (init, show, validate, edit, symlink) |
|
||||
| `cf ssh` | Manage SSH keys (setup, status, keygen) |
|
||||
| `cf list` | List all stacks and their assigned hosts |
|
||||
|
||||
All commands support `--all` to operate on all stacks.
|
||||
### Aliases
|
||||
|
||||
Short aliases for frequently used commands:
|
||||
|
||||
| Alias | Command | Alias | Command |
|
||||
|-------|---------|-------|---------|
|
||||
| `cf a` | `apply` | `cf s` | `stats` |
|
||||
| `cf l` | `logs` | `cf ls` | `list` |
|
||||
| `cf r` | `restart` | `cf rf` | `refresh` |
|
||||
| `cf u` | `update` | `cf ck` | `check` |
|
||||
| `cf p` | `pull` | `cf tf` | `traefik-file` |
|
||||
| `cf c` | `compose` | | |
|
||||
|
||||
Each command replaces: look up host → SSH → find compose file → run `ssh host "cd /opt/compose/plex && docker compose up -d"`.
|
||||
|
||||
@@ -352,10 +428,10 @@ cf down --orphaned # stop stacks removed from config
|
||||
# Pull latest images
|
||||
cf pull --all
|
||||
|
||||
# Restart (down + up)
|
||||
# Restart running containers
|
||||
cf restart plex
|
||||
|
||||
# Update (pull + build + down + up) - the end-to-end update command
|
||||
# Update (pull + build, only recreates containers if images changed)
|
||||
cf update --all
|
||||
|
||||
# Update state from reality (discovers running stacks + captures digests)
|
||||
@@ -402,39 +478,41 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Compose Farm - run docker compose commands across multiple hosts
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to │
|
||||
│ copy it or customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ──────────────────────────────────────────────────────────────────╮
|
||||
│ up Start stacks (docker compose up -d). Auto-migrates if host │
|
||||
│ changed. │
|
||||
│ down Stop stacks (docker compose down). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart stacks (down + up). │
|
||||
│ update Update stacks (pull + build + down + up). │
|
||||
│ apply Make reality match config (start, migrate, stop as needed). │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ──────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose │
|
||||
│ Traefik labels. │
|
||||
│ refresh Update local state from running stacks. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
│ ssh Manage SSH keys for passwordless authentication. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ─────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show stack logs. │
|
||||
│ ps Show status of stacks. │
|
||||
│ stats Show overview statistics for hosts and stacks. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ─────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --version -v Show version and exit │
|
||||
│ --install-completion Install completion for the current shell. │
|
||||
│ --show-completion Show completion for the current shell, to copy it or │
|
||||
│ customize the installation. │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Configuration ────────────────────────────────────────────────────────────────────────╮
|
||||
│ traefik-file Generate a Traefik file-provider fragment from compose Traefik labels. │
|
||||
│ refresh Update local state from running stacks. │
|
||||
│ check Validate configuration, traefik labels, mounts, and networks. │
|
||||
│ init-network Create Docker network on hosts with consistent settings. │
|
||||
│ config Manage compose-farm configuration files. │
|
||||
│ ssh Manage SSH keys for passwordless authentication. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Lifecycle ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ up Start stacks (docker compose up -d). Auto-migrates if host changed. │
|
||||
│ down Stop stacks (docker compose down). │
|
||||
│ stop Stop services without removing containers (docker compose stop). │
|
||||
│ pull Pull latest images (docker compose pull). │
|
||||
│ restart Restart running containers (docker compose restart). │
|
||||
│ update Update stacks (pull + build + up). Shorthand for 'up --pull --build'. │
|
||||
│ apply Make reality match config (start, migrate, stop strays/orphans as │
|
||||
│ needed). │
|
||||
│ compose Run any docker compose command on a stack. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Monitoring ───────────────────────────────────────────────────────────────────────────╮
|
||||
│ logs Show stack logs. With --service, shows logs for just that service. │
|
||||
│ ps Show status of stacks. │
|
||||
│ stats Show overview statistics for hosts and stacks. │
|
||||
│ list List all stacks and their assigned hosts. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Server ───────────────────────────────────────────────────────────────────────────────╮
|
||||
│ web Start the web UI server. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -463,15 +541,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Start stacks (docker compose up -d). Auto-migrates if host changed.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --pull Pull images before starting (--pull always) │
|
||||
│ --build Build images before starting │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -498,17 +579,51 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Stop stacks (docker compose down).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --orphaned Stop orphaned stacks (in state but removed from │
|
||||
│ config) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --orphaned Stop orphaned stacks (in state but removed from config) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf stop --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf stop --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf stop [OPTIONS] [STACKS]...
|
||||
|
||||
Stop services without removing containers (docker compose stop).
|
||||
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -535,14 +650,15 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Pull latest images (docker compose pull).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -567,16 +683,17 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf restart [OPTIONS] [STACKS]...
|
||||
|
||||
Restart stacks (down + up).
|
||||
Restart running containers (docker compose restart).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -601,16 +718,17 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf update [OPTIONS] [STACKS]...
|
||||
|
||||
Update stacks (pull + build + down + up).
|
||||
Update stacks (pull + build + up). Shorthand for 'up --pull --build'.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -635,27 +753,75 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf apply [OPTIONS]
|
||||
|
||||
Make reality match config (start, migrate, stop as needed).
|
||||
Make reality match config (start, migrate, stop strays/orphans as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running stacks match your
|
||||
config file. It will:
|
||||
|
||||
1. Stop orphaned stacks (in state but removed from config)
|
||||
2. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
3. Start missing stacks (in config but not in state)
|
||||
2. Stop stray stacks (running on unauthorized hosts)
|
||||
3. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
4. Start missing stacks (in config but not in state)
|
||||
|
||||
Use --dry-run to preview changes before applying.
|
||||
Use --no-orphans to only migrate/start without stopping orphaned stacks.
|
||||
Use --no-orphans to skip stopping orphaned stacks.
|
||||
Use --no-strays to skip stopping stray stacks.
|
||||
Use --full to also run 'up' on all stacks (picks up compose/env changes).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned stacks │
|
||||
│ --full -f Also run up on all stacks to apply config │
|
||||
│ changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --dry-run -n Show what would change without executing │
|
||||
│ --no-orphans Only migrate, don't stop orphaned stacks │
|
||||
│ --no-strays Don't stop stray stacks (running on wrong host) │
|
||||
│ --full -f Also run up on all stacks to apply config changes │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf compose --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf compose --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf compose [OPTIONS] STACK COMMAND [ARGS]...
|
||||
|
||||
Run any docker compose command on a stack.
|
||||
|
||||
Passthrough to docker compose for commands not wrapped by cf.
|
||||
Options after COMMAND are passed to docker compose, not cf.
|
||||
|
||||
Examples:
|
||||
cf compose mystack --help - show docker compose help
|
||||
cf compose mystack top - view running processes
|
||||
cf compose mystack images - list images
|
||||
cf compose mystack exec web bash - interactive shell
|
||||
cf compose mystack config - view parsed config
|
||||
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ * stack TEXT Stack to operate on (use '.' for current dir) [required] │
|
||||
│ * command TEXT Docker compose command [required] │
|
||||
│ args [ARGS]... Additional arguments │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -684,16 +850,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Generate a Traefik file-provider fragment from compose Traefik labels.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path │
|
||||
│ (stdout if omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --output -o PATH Write Traefik file-provider YAML to this path (stdout if │
|
||||
│ omitted) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -729,16 +895,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Use 'cf apply' to make reality match your config (stop orphans, migrate).
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --log-path -l PATH Path to Dockerfarm TOML log │
|
||||
│ --dry-run -n Show what would change without writing │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -771,14 +937,14 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Use --local to skip SSH-based checks for faster validation.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --local Skip SSH-based checks (faster) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -810,16 +976,16 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
communication. Uses the same subnet/gateway on all hosts to ensure
|
||||
consistent networking.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ hosts [HOSTS]... Hosts to create network on (default: all) │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --network -n TEXT Network name [default: mynetwork] │
|
||||
│ --subnet -s TEXT Network subnet [default: 172.20.0.0/16] │
|
||||
│ --gateway -g TEXT Network gateway [default: 172.20.0.1] │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ hosts [HOSTS]... Hosts to create network on (default: all) │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --network -n TEXT Network name [default: mynetwork] │
|
||||
│ --subnet -s TEXT Network subnet [default: 172.20.0.0/16] │
|
||||
│ --gateway -g TEXT Network gateway [default: 172.20.0.1] │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -847,18 +1013,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Manage compose-farm configuration files.
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ───────────────────────────────────────────────────────────────────╮
|
||||
│ init Create a new config file with documented example. │
|
||||
│ edit Open the config file in your default editor. │
|
||||
│ show Display the config file location and contents. │
|
||||
│ path Print the config file path (useful for scripting). │
|
||||
│ validate Validate the config file syntax and schema. │
|
||||
│ symlink Create a symlink from the default config location to a config │
|
||||
│ file. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ─────────────────────────────────────────────────────────────────────────────╮
|
||||
│ init Create a new config file with documented example. │
|
||||
│ edit Open the config file in your default editor. │
|
||||
│ show Display the config file location and contents. │
|
||||
│ path Print the config file path (useful for scripting). │
|
||||
│ validate Validate the config file syntax and schema. │
|
||||
│ symlink Create a symlink from the default config location to a config file. │
|
||||
│ init-env Generate a .env file for Docker deployment. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -878,6 +1044,26 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- cf ssh --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf ssh [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
Manage SSH keys for passwordless authentication.
|
||||
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Commands ─────────────────────────────────────────────────────────────────────────────╮
|
||||
│ keygen Generate SSH key (does not distribute to hosts). │
|
||||
│ setup Generate SSH key and distribute to all configured hosts. │
|
||||
│ status Show SSH key status and host connectivity. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
@@ -900,20 +1086,20 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Usage: cf logs [OPTIONS] [STACKS]...
|
||||
|
||||
Show stack logs.
|
||||
Show stack logs. With --service, shows logs for just that service.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 │
|
||||
│ otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --follow -f Follow logs │
|
||||
│ --tail -n INTEGER Number of lines (default: 20 for --all, 100 otherwise) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -944,16 +1130,18 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
Without arguments: shows all stacks (same as --all).
|
||||
With stack names: shows only those stacks.
|
||||
With --host: shows stacks on that host.
|
||||
With --service: filters to a specific service within the stack.
|
||||
|
||||
╭─ Arguments ──────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Arguments ────────────────────────────────────────────────────────────────────────────╮
|
||||
│ stacks [STACKS]... Stacks to operate on │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --all -a Run on all stacks │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --service -s TEXT Target a specific service within the stack │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -981,14 +1169,49 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
|
||||
Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
Without flags: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
With --containers: Shows per-container resource stats (requires Glances).
|
||||
|
||||
╭─ Options ────────────────────────────────────────────────────────────────────╮
|
||||
│ --live -l Query Docker for live container stats │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰──────────────────────────────────────────────────────────────────────────────╯
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --live -l Query Docker for live container stats │
|
||||
│ --containers -C Show per-container resource stats (requires Glances) │
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See the output of <code>cf list --help</code></summary>
|
||||
|
||||
<!-- CODE:BASH:START -->
|
||||
<!-- echo '```yaml' -->
|
||||
<!-- export NO_COLOR=1 -->
|
||||
<!-- export TERM=dumb -->
|
||||
<!-- export TERMINAL_WIDTH=90 -->
|
||||
<!-- cf list --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf list [OPTIONS]
|
||||
|
||||
List all stacks and their assigned hosts.
|
||||
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Filter to stacks on this host │
|
||||
│ --simple -s Plain output (one stack per line, for scripting) │
|
||||
│ --config -c PATH Path to config file │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
@@ -1009,6 +1232,24 @@ Full `--help` output for each command. See the [Usage](#usage) table above for a
|
||||
<!-- cf web --help -->
|
||||
<!-- echo '```' -->
|
||||
<!-- CODE:END -->
|
||||
<!-- OUTPUT:START -->
|
||||
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
|
||||
```yaml
|
||||
|
||||
Usage: cf web [OPTIONS]
|
||||
|
||||
Start the web UI server.
|
||||
|
||||
╭─ Options ──────────────────────────────────────────────────────────────────────────────╮
|
||||
│ --host -H TEXT Host to bind to [default: 0.0.0.0] │
|
||||
│ --port -p INTEGER Port to listen on [default: 8000] │
|
||||
│ --reload -r Enable auto-reload for development │
|
||||
│ --help -h Show this message and exit. │
|
||||
╰────────────────────────────────────────────────────────────────────────────────────────╯
|
||||
|
||||
```
|
||||
|
||||
<!-- OUTPUT:END -->
|
||||
|
||||
</details>
|
||||
|
||||
@@ -1096,12 +1337,12 @@ published ports.
|
||||
|
||||
**Auto-regeneration**
|
||||
|
||||
To automatically regenerate the Traefik config after `up`, `down`, `restart`, or `update`,
|
||||
To automatically regenerate the Traefik config after `up`, `down`, or `update`,
|
||||
add `traefik_file` to your config:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml # auto-regenerate on up/down/restart/update
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml # auto-regenerate on up/down/update
|
||||
traefik_stack: traefik # skip stacks on same host (docker provider handles them)
|
||||
|
||||
hosts:
|
||||
@@ -1140,6 +1381,54 @@ Update your Traefik config to use directory watching instead of a single file:
|
||||
- --providers.file.watch=true
|
||||
```
|
||||
|
||||
## Host Resource Monitoring (Glances)
|
||||
|
||||
The web UI can display real-time CPU, memory, and load stats for all configured hosts. This uses [Glances](https://nicolargo.github.io/glances/), a cross-platform system monitoring tool with a REST API.
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Deploy a Glances stack that runs on all hosts:
|
||||
|
||||
```yaml
|
||||
# glances/compose.yaml
|
||||
name: glances
|
||||
services:
|
||||
glances:
|
||||
image: nicolargo/glances:latest
|
||||
container_name: glances
|
||||
restart: unless-stopped
|
||||
pid: host
|
||||
ports:
|
||||
- "61208:61208"
|
||||
environment:
|
||||
- GLANCES_OPT=-w # Enable web server mode
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
```
|
||||
|
||||
2. Add it to your config as a multi-host stack:
|
||||
|
||||
```yaml
|
||||
# compose-farm.yaml
|
||||
stacks:
|
||||
glances: all # Runs on every host
|
||||
|
||||
glances_stack: glances # Enables resource stats in web UI
|
||||
```
|
||||
|
||||
3. Deploy: `cf up glances`
|
||||
|
||||
4. **(Docker web UI only)** The web UI container infers the local host from `CF_WEB_STACK` and reaches Glances via the container name to avoid Docker network isolation issues.
|
||||
|
||||
The web UI dashboard will now show a "Host Resources" section with live stats from all hosts. Hosts where Glances is unreachable show an error indicator.
|
||||
|
||||
**Live Stats Page**
|
||||
|
||||
With Glances configured, a Live Stats page (`/live-stats`) shows all running containers across all hosts:
|
||||
|
||||
- **Columns**: Stack, Service, Host, Image, Status, Uptime, CPU, Memory, Net I/O
|
||||
- **Features**: Sorting, filtering, live updates (no SSH required—uses Glances REST API)
|
||||
|
||||
## Comparison with Alternatives
|
||||
|
||||
There are many ways to run containers on multiple hosts. Here is where Compose Farm sits:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
compose_dir: /opt/compose
|
||||
|
||||
# Optional: Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
# Optional: Auto-regenerate Traefik file-provider config after up/down/update
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik # Skip stacks on same host (docker provider handles them)
|
||||
|
||||
@@ -26,5 +26,5 @@ stacks:
|
||||
traefik: server-1 # Traefik runs here
|
||||
plex: server-2 # Stacks on other hosts get file-provider entries
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
radarr: local
|
||||
grafana: server-1
|
||||
nextcloud: local
|
||||
|
||||
@@ -1,38 +1,56 @@
|
||||
services:
|
||||
cf:
|
||||
image: ghcr.io/basnijholt/compose-farm:latest
|
||||
# Run as current user to preserve file ownership on mounted volumes
|
||||
# Set CF_UID=$(id -u) CF_GID=$(id -g) in your environment or .env file
|
||||
# Defaults to root (0:0) for backwards compatibility
|
||||
user: "${CF_UID:-0}:${CF_GID:-0}"
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# Compose directory (contains compose files AND compose-farm.yaml config)
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
# SSH keys for passwordless auth (generated by `cf ssh setup`)
|
||||
# Choose ONE option below (use the same option for both cf and web services):
|
||||
# Option 1: Host path (default) - keys at ~/.ssh/compose-farm/id_ed25519
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:/root/.ssh
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:${CF_HOME:-/root}/.ssh/compose-farm
|
||||
# Option 2: Named volume - managed by Docker, shared between services
|
||||
# - cf-ssh:/root/.ssh
|
||||
# - cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
# Option 3: SSH agent forwarding (uncomment if using ssh-agent)
|
||||
# - ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
# Config file path (state stored alongside it)
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/stacks}/compose-farm.yaml
|
||||
# HOME must match the user running the container for SSH to find keys
|
||||
- HOME=${CF_HOME:-/root}
|
||||
# USER is required for SSH when running as non-root (UID not in /etc/passwd)
|
||||
- USER=${CF_USER:-root}
|
||||
|
||||
web:
|
||||
image: ghcr.io/basnijholt/compose-farm:latest
|
||||
restart: unless-stopped
|
||||
command: web --host 0.0.0.0 --port 9000
|
||||
# Run as current user to preserve file ownership on mounted volumes
|
||||
user: "${CF_UID:-0}:${CF_GID:-0}"
|
||||
volumes:
|
||||
- ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
- ${CF_COMPOSE_DIR:-/opt/stacks}:${CF_COMPOSE_DIR:-/opt/stacks}
|
||||
# SSH keys - use the SAME option as cf service above
|
||||
# Option 1: Host path (default)
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:/root/.ssh
|
||||
- ${CF_SSH_DIR:-~/.ssh/compose-farm}:${CF_HOME:-/root}/.ssh/compose-farm
|
||||
# Option 2: Named volume
|
||||
# - cf-ssh:/root/.ssh
|
||||
# - cf-ssh:${CF_HOME:-/root}/.ssh
|
||||
# Option 3: SSH agent forwarding (uncomment if using ssh-agent)
|
||||
# - ${SSH_AUTH_SOCK}:/ssh-agent:ro
|
||||
# XDG config dir for backups and image digest logs (persists across restarts)
|
||||
- ${CF_XDG_CONFIG:-~/.config/compose-farm}:${CF_HOME:-/root}/.config/compose-farm
|
||||
environment:
|
||||
- SSH_AUTH_SOCK=/ssh-agent
|
||||
- CF_CONFIG=${CF_COMPOSE_DIR:-/opt/stacks}/compose-farm.yaml
|
||||
# Used to detect self-updates and run via SSH to survive container restart
|
||||
- CF_WEB_STACK=compose-farm
|
||||
# HOME must match the user running the container for SSH to find keys
|
||||
- HOME=${CF_HOME:-/root}
|
||||
# USER is required for SSH when running as non-root (UID not in /etc/passwd)
|
||||
- USER=${CF_USER:-root}
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.compose-farm.rule=Host(`compose-farm.${DOMAIN}`)
|
||||
|
||||
@@ -47,8 +47,7 @@ Compose Farm follows three core principles:
|
||||
Pydantic models for YAML configuration:
|
||||
|
||||
- **Config** - Root configuration with compose_dir, hosts, stacks
|
||||
- **HostConfig** - Host address and SSH user
|
||||
- **ServiceConfig** - Service-to-host mappings
|
||||
- **Host** - Host address, SSH user, and port
|
||||
|
||||
Key features:
|
||||
- Validation with Pydantic
|
||||
@@ -62,7 +61,7 @@ Tracks deployment state in `compose-farm-state.yaml` (stored alongside the confi
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
Used for:
|
||||
@@ -97,8 +96,8 @@ Typer-based CLI with subcommand modules:
|
||||
cli/
|
||||
├── app.py # Shared Typer app, version callback
|
||||
├── common.py # Shared helpers, options, progress utilities
|
||||
├── config.py # config subcommand (init, show, path, validate, edit, symlink)
|
||||
├── lifecycle.py # up, down, pull, restart, update, apply
|
||||
├── config.py # config subcommand (init, init-env, show, path, validate, edit, symlink)
|
||||
├── lifecycle.py # up, down, stop, pull, restart, update, apply, compose
|
||||
├── management.py # refresh, check, init-network, traefik-file
|
||||
├── monitoring.py # logs, ps, stats
|
||||
├── ssh.py # SSH key management (setup, status, keygen)
|
||||
@@ -208,7 +207,7 @@ Location: `compose-farm-state.yaml` (stored alongside the config file)
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
Image digests are stored separately in `dockerfarm-log.toml` (also in the config directory).
|
||||
@@ -344,3 +343,19 @@ For repeated connections to the same host, SSH reuses connections.
|
||||
```
|
||||
|
||||
Icons use [Lucide](https://lucide.dev/). Add new icons as macros in `web/templates/partials/icons.html`.
|
||||
|
||||
### Host Resource Monitoring (`src/compose_farm/glances.py`)
|
||||
|
||||
Integration with [Glances](https://nicolargo.github.io/glances/) for real-time host stats:
|
||||
|
||||
- Fetches CPU, memory, and load from Glances REST API on each host
|
||||
- Used by web UI dashboard to display host resource usage
|
||||
- Requires `glances_stack` config option pointing to a Glances stack running on all hosts
|
||||
|
||||
### Container Registry Client (`src/compose_farm/registry.py`)
|
||||
|
||||
OCI Distribution API client for checking image updates:
|
||||
|
||||
- Parses image references (registry, namespace, name, tag, digest)
|
||||
- Fetches available tags from Docker Hub, GHCR, and other registries
|
||||
- Compares semantic versions to find newer releases
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:bb1372a59a4ed1ac74d3864d7a84dd5311fce4cb6c6a00bf3a574bc2f98d5595
|
||||
size 895927
|
||||
oid sha256:01dabdd8f62773823ba2b8dc74f9931f1a1b88215117e6a080004096025491b0
|
||||
size 901456
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f339a85f3d930db5a020c9f77e106edc5f44ea7dee6f68557106721493c24ef8
|
||||
size 205907
|
||||
oid sha256:134c903a6b3acfb933617b33755b0cdb9bac2a59e5e35b64236e248a141d396d
|
||||
size 206883
|
||||
|
||||
3
docs/assets/compose.gif
Normal file
3
docs/assets/compose.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d8b3cdb3486ec79b3ddb2f7571c13d54ac9aed182edfe708eff76a966a90cfc7
|
||||
size 1132310
|
||||
3
docs/assets/compose.webm
Normal file
3
docs/assets/compose.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:a3c4d4a62f062f717df4e6752efced3caea29004dc90fe97fd7633e7f0ded9db
|
||||
size 341057
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:388aa49a1269145698f9763452aaf6b9c6232ea9229abe1dae304df558e29695
|
||||
size 403442
|
||||
oid sha256:6c1bb48cc2f364681515a4d8bd0c586d133f5a32789b7bb64524ad7d9ed0a8e9
|
||||
size 543135
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9b8bf4dcb8ee67270d4a88124b4dd4abe0dab518e73812ee73f7c66d77f146e2
|
||||
size 228025
|
||||
oid sha256:5f82d96137f039f21964c15c1550aa1b1f0bb2d52c04d012d253dbfbd6fad096
|
||||
size 268086
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:16b9a28137dfae25488e2094de85766a039457f5dca20c2d84ac72e3967c10b9
|
||||
size 164237
|
||||
oid sha256:2a4045b00d90928f42c7764b3c24751576cfb68a34c6e84d12b4e282d2e67378
|
||||
size 146467
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e0fbe697a1f8256ce3b9a6a64c7019d42769134df9b5b964e5abe98a29e918fd
|
||||
size 68242
|
||||
oid sha256:f1b94416ed3740853f863e19bf45f26241a203fb0d7d187160a537f79aa544fa
|
||||
size 60353
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:629b8c80b98eb996b75439745676fd99a83f391ca25f778a71bd59173f814c2f
|
||||
size 1194931
|
||||
oid sha256:848d9c48fb7511da7996149277c038589fad1ee406ff2f30c28f777fc441d919
|
||||
size 1183641
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:33fd46f2d8538cc43be4cb553b3af9d8b412f282ee354b6373e2793fe41c799b
|
||||
size 405057
|
||||
oid sha256:e747ee71bb38b19946005d5a4def4d423dadeaaade452dec875c4cb2d24a5b77
|
||||
size 407373
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ccd96e33faba5f297999917d89834b29d58bd2a8929eea8d62875e3d8830bd5c
|
||||
size 3198466
|
||||
oid sha256:d32c9a3eec06e57df085ad347e6bf61e323f8bd8322d0c540f0b9d4834196dfd
|
||||
size 3589776
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:979a1a21303bbf284b3510981066ef05c41c1035b34392fecc7bee472116e6db
|
||||
size 967564
|
||||
oid sha256:6c54eda599389dac74c24c83527f95cd1399e653d7faf2972c2693d90e590597
|
||||
size 1085344
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2067f4967a93b7ee3a8db7750c435f41b1fccd2919f3443da4b848c20cc54f23
|
||||
size 124559
|
||||
oid sha256:62f9b5ec71496197a3f1c3e3bca8967d603838804279ea7dbf00a70d3391ff6c
|
||||
size 127123
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5471bd94e6d1b9d415547fa44de6021fdad2e1cc5b8b295680e217104aa749d6
|
||||
size 98149
|
||||
oid sha256:ac2b93d3630af87b44a135723c5d10e8287529bed17c28301b2802cd9593e9e8
|
||||
size 98748
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:dac5660cfe6574857ec055fac7822f25b7c5fcb10a836b19c86142515e2fbf75
|
||||
size 1816075
|
||||
oid sha256:7b50a7e9836c496c0989363d1440fa0a6ccdaa38ee16aae92b389b3cf3c3732f
|
||||
size 2385110
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d4efec8ef5a99f2cb31d55cd71cdbf0bb8dd0cd6281571886b7c1f8b41c3f9da
|
||||
size 1660764
|
||||
oid sha256:ccbb3d5366c7734377e12f98cca0b361028f5722124f1bb7efa231f6aeffc116
|
||||
size 2208044
|
||||
|
||||
3
docs/assets/web-live_stats.gif
Normal file
3
docs/assets/web-live_stats.gif
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4135888689a10c5ae2904825d98f2a6d215c174a4bd823e25761f619590f04ff
|
||||
size 3990104
|
||||
3
docs/assets/web-live_stats.webm
Normal file
3
docs/assets/web-live_stats.webm
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:87739cd6f6576a81100392d8d1e59d3e776fecc8f0721a31332df89e7fc8593d
|
||||
size 5814274
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9348dd36e79192344476d61fbbffdb122a96ecc5829fbece1818590cfc521521
|
||||
size 3373003
|
||||
oid sha256:269993b52721ce70674d3aab2a4cd8c58aa621d4ba0739afedae661c90965b26
|
||||
size 3678371
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:bebbf8151434ba37bf5e46566a4e8b57812944281926f579d056bdc835ca26aa
|
||||
size 2729799
|
||||
oid sha256:0098b55bb6a52fa39f807a01fa352ce112bcb446e2a2acb963fb02d21b28c934
|
||||
size 3088813
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3712afff6fcde00eb951264bb24d4301deb085d082b4e95ed4c1893a571938ee
|
||||
size 1528294
|
||||
oid sha256:4bf9d8c247d278799d1daea784fc662a22f12b1bd7883f808ef30f35025ebca6
|
||||
size 4166443
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0b218d400836a50661c9cdcce2d2b1e285cc5fe592cb42f58aae41f3e7d60684
|
||||
size 1327413
|
||||
oid sha256:02d5124217a94849bf2971d6d13d28da18c557195a81b9cca121fb7c07f0501b
|
||||
size 3523244
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6a232ddc1b9ddd9bf6b5d99c05153e1094be56f1952f02636ca498eb7484e096
|
||||
size 3808675
|
||||
oid sha256:412a0e68f8e52801cafbb9a703ca9577e7c14cc7c0e439160b9185961997f23c
|
||||
size 4435697
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5a7c9f5f6d47074a6af135190fda6d0a1936cd7a0b04b3aa04ea7d99167a9e05
|
||||
size 3333014
|
||||
oid sha256:0e600a1d3216b44497a889f91eac94d62ef7207b4ed0471465dcb72408caa28e
|
||||
size 3764693
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:66f4547ed2e83b302d795875588d9a085af76071a480f1096f2bb64344b80c42
|
||||
size 5428670
|
||||
oid sha256:3c07a283f4f70c4ab205b0f0acb5d6f55e3ced4c12caa7a8d5914ffe3548233a
|
||||
size 5768166
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:75c8cdeefbbdcab2a240821d3410539f2a2cbe0a015897f4135404c80c3ac32c
|
||||
size 6578366
|
||||
oid sha256:562228841de976d70ee80999b930eadf3866a13ff2867d900279993744c44671
|
||||
size 6667918
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:92ed41854fe852ae54aa5f05de8ceaf35c3ad8ef82b3034e67edf758d1acdf50
|
||||
size 13593713
|
||||
oid sha256:845746ac1cb101c3077d420c4f3fda3ca372492582dc123ac8a031a68ae9b6b1
|
||||
size 12943150
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8507c61df25981dbe7e5bd2f9ed16c9a0befbca218947cad29f6679c77a695a7
|
||||
size 12451891
|
||||
oid sha256:189259558b5760c02583885168d7b0b47cf476cba81c7c028ec770f9d6033129
|
||||
size 12415357
|
||||
|
||||
@@ -221,7 +221,7 @@ Keep config and data separate:
|
||||
|
||||
/opt/appdata/ # Local: per-host app data
|
||||
├── plex/
|
||||
└── sonarr/
|
||||
└── grafana/
|
||||
```
|
||||
|
||||
## Performance
|
||||
@@ -235,7 +235,7 @@ Compose Farm runs operations in parallel. For large deployments:
|
||||
cf up --all
|
||||
|
||||
# Avoid: sequential updates when possible
|
||||
for svc in plex sonarr radarr; do
|
||||
for svc in plex grafana nextcloud; do
|
||||
cf update $svc
|
||||
done
|
||||
```
|
||||
@@ -249,7 +249,7 @@ SSH connections are reused within a command. For many operations:
|
||||
cf update --all
|
||||
|
||||
# Multiple commands, multiple connections (slower)
|
||||
cf update plex && cf update sonarr && cf update radarr
|
||||
cf update plex && cf update grafana && cf update nextcloud
|
||||
```
|
||||
|
||||
## Traefik Setup
|
||||
@@ -297,7 +297,7 @@ http:
|
||||
|------|----------|--------|
|
||||
| Compose Farm config | `~/.config/compose-farm/` | Git or copy |
|
||||
| Compose files | `/opt/compose/` | Git |
|
||||
| State file | `~/.config/compose-farm/state.yaml` | Optional (can refresh) |
|
||||
| State file | `~/.config/compose-farm/compose-farm-state.yaml` | Optional (can refresh) |
|
||||
| App data | `/opt/appdata/` | Backup solution |
|
||||
|
||||
### Disaster Recovery
|
||||
|
||||
220
docs/commands.md
220
docs/commands.md
@@ -8,17 +8,22 @@ The Compose Farm CLI is available as both `compose-farm` and the shorter alias `
|
||||
|
||||
## Command Overview
|
||||
|
||||
Commands are either **Docker Compose wrappers** (`up`, `down`, `stop`, `restart`, `pull`, `logs`, `ps`, `compose`) with multi-host superpowers, or **Compose Farm originals** (`apply`, `update`, `refresh`, `check`) for orchestration Docker Compose can't do.
|
||||
|
||||
| Category | Command | Description |
|
||||
|----------|---------|-------------|
|
||||
| **Lifecycle** | `apply` | Make reality match config |
|
||||
| | `up` | Start stacks |
|
||||
| | `down` | Stop stacks |
|
||||
| | `restart` | Restart stacks (down + up) |
|
||||
| | `update` | Update stacks (pull + build + down + up) |
|
||||
| | `stop` | Stop services without removing containers |
|
||||
| | `restart` | Restart running containers |
|
||||
| | `update` | Shorthand for `up --pull --build` |
|
||||
| | `pull` | Pull latest images |
|
||||
| | `compose` | Run any docker compose command |
|
||||
| **Monitoring** | `ps` | Show stack status |
|
||||
| | `logs` | Show stack logs |
|
||||
| | `stats` | Show overview statistics |
|
||||
| | `list` | List stacks and hosts |
|
||||
| **Configuration** | `check` | Validate config and mounts |
|
||||
| | `refresh` | Sync state from reality |
|
||||
| | `init-network` | Create Docker network |
|
||||
@@ -34,6 +39,19 @@ cf --version, -v # Show version
|
||||
cf --help, -h # Show help
|
||||
```
|
||||
|
||||
## Command Aliases
|
||||
|
||||
Short aliases for frequently used commands:
|
||||
|
||||
| Alias | Command | Alias | Command |
|
||||
|-------|---------|-------|---------|
|
||||
| `cf a` | `apply` | `cf s` | `stats` |
|
||||
| `cf l` | `logs` | `cf ls` | `list` |
|
||||
| `cf r` | `restart` | `cf rf` | `refresh` |
|
||||
| `cf u` | `update` | `cf ck` | `check` |
|
||||
| `cf p` | `pull` | `cf tf` | `traefik-file` |
|
||||
| `cf c` | `compose` | | |
|
||||
|
||||
---
|
||||
|
||||
## Lifecycle Commands
|
||||
@@ -56,14 +74,16 @@ cf apply [OPTIONS]
|
||||
|--------|-------------|
|
||||
| `--dry-run, -n` | Preview changes without executing |
|
||||
| `--no-orphans` | Skip stopping orphaned stacks |
|
||||
| `--full, -f` | Also refresh running stacks |
|
||||
| `--no-strays` | Skip stopping stray stacks (running on wrong host) |
|
||||
| `--full, -f` | Also run up on all stacks (applies compose/env changes, triggers migrations) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**What it does:**
|
||||
|
||||
1. Stops orphaned stacks (in state but removed from config)
|
||||
2. Migrates stacks on wrong host
|
||||
3. Starts missing stacks (in config but not running)
|
||||
2. Stops stray stacks (running on unauthorized hosts)
|
||||
3. Migrates stacks on wrong host
|
||||
4. Starts missing stacks (in config but not running)
|
||||
|
||||
**Examples:**
|
||||
|
||||
@@ -77,7 +97,10 @@ cf apply
|
||||
# Only start/migrate, don't stop orphans
|
||||
cf apply --no-orphans
|
||||
|
||||
# Also refresh all running stacks
|
||||
# Don't stop stray stacks
|
||||
cf apply --no-strays
|
||||
|
||||
# Also run up on all stacks (applies compose/env changes, triggers migrations)
|
||||
cf apply --full
|
||||
```
|
||||
|
||||
@@ -97,19 +120,25 @@ cf up [OPTIONS] [STACKS]...
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Start all stacks |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--pull` | Pull images before starting (`--pull always`) |
|
||||
| `--build` | Build images before starting |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Start specific stacks
|
||||
cf up plex sonarr
|
||||
cf up plex grafana
|
||||
|
||||
# Start all stacks
|
||||
cf up --all
|
||||
|
||||
# Start all stacks on a specific host
|
||||
cf up --all --host nuc
|
||||
|
||||
# Start a specific service within a stack
|
||||
cf up immich --service database
|
||||
```
|
||||
|
||||
**Auto-migration:**
|
||||
@@ -158,9 +187,40 @@ cf down --all --host nuc
|
||||
|
||||
---
|
||||
|
||||
### cf stop
|
||||
|
||||
Stop services without removing containers.
|
||||
|
||||
```bash
|
||||
cf stop [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Stop all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Stop specific stacks
|
||||
cf stop plex
|
||||
|
||||
# Stop all stacks
|
||||
cf stop --all
|
||||
|
||||
# Stop a specific service within a stack
|
||||
cf stop immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf restart
|
||||
|
||||
Restart stacks (down + up).
|
||||
Restart running containers (`docker compose restart`). With `--service`, restarts just that service.
|
||||
|
||||
```bash
|
||||
cf restart [OPTIONS] [STACKS]...
|
||||
@@ -171,6 +231,7 @@ cf restart [OPTIONS] [STACKS]...
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Restart all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -178,13 +239,16 @@ cf restart [OPTIONS] [STACKS]...
|
||||
```bash
|
||||
cf restart plex
|
||||
cf restart --all
|
||||
|
||||
# Restart a specific service
|
||||
cf restart immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf update
|
||||
|
||||
Update stacks (pull + build + down + up).
|
||||
Update stacks (pull + build + up). Shorthand for `up --pull --build`. With `--service`, updates just that service.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/update.webm" type="video/webm">
|
||||
@@ -199,6 +263,7 @@ cf update [OPTIONS] [STACKS]...
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Update all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -209,6 +274,9 @@ cf update plex
|
||||
|
||||
# Update all stacks
|
||||
cf update --all
|
||||
|
||||
# Update a specific service
|
||||
cf update immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
@@ -226,6 +294,7 @@ cf pull [OPTIONS] [STACKS]...
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Pull for all stacks |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -233,6 +302,60 @@ cf pull [OPTIONS] [STACKS]...
|
||||
```bash
|
||||
cf pull plex
|
||||
cf pull --all
|
||||
|
||||
# Pull a specific service
|
||||
cf pull immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### cf compose
|
||||
|
||||
Run any docker compose command on a stack. This is a passthrough to docker compose for commands not wrapped by cf.
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/compose.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
```bash
|
||||
cf compose [OPTIONS] STACK COMMAND [ARGS]...
|
||||
```
|
||||
|
||||
**Arguments:**
|
||||
|
||||
| Argument | Description |
|
||||
|----------|-------------|
|
||||
| `STACK` | Stack to operate on (use `.` for current dir) |
|
||||
| `COMMAND` | Docker compose command to run |
|
||||
| `ARGS` | Additional arguments passed to docker compose |
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--host, -H TEXT` | Filter to stacks on this host (required for multi-host stacks) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Show docker compose help
|
||||
cf compose mystack --help
|
||||
|
||||
# View running processes
|
||||
cf compose mystack top
|
||||
|
||||
# List images
|
||||
cf compose mystack images
|
||||
|
||||
# Interactive shell
|
||||
cf compose mystack exec web bash
|
||||
|
||||
# View parsed config
|
||||
cf compose mystack config
|
||||
|
||||
# Use current directory as stack
|
||||
cf compose . ps
|
||||
```
|
||||
|
||||
---
|
||||
@@ -253,6 +376,7 @@ cf ps [OPTIONS] [STACKS]...
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Show all stacks (default) |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
@@ -262,10 +386,13 @@ cf ps [OPTIONS] [STACKS]...
|
||||
cf ps
|
||||
|
||||
# Show specific stacks
|
||||
cf ps plex sonarr
|
||||
cf ps plex grafana
|
||||
|
||||
# Filter by host
|
||||
cf ps --host nuc
|
||||
|
||||
# Show status of a specific service
|
||||
cf ps immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
@@ -288,6 +415,7 @@ cf logs [OPTIONS] [STACKS]...
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Show logs for all stacks |
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--service, -s TEXT` | Target a specific service within the stack |
|
||||
| `--follow, -f` | Follow logs (live stream) |
|
||||
| `--tail, -n INTEGER` | Number of lines (default: 20 for --all, 100 otherwise) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
@@ -302,10 +430,13 @@ cf logs plex
|
||||
cf logs -f plex
|
||||
|
||||
# Show last 50 lines of multiple stacks
|
||||
cf logs -n 50 plex sonarr
|
||||
cf logs -n 50 plex grafana
|
||||
|
||||
# Show last 20 lines of all stacks
|
||||
cf logs --all
|
||||
|
||||
# Show logs for a specific service
|
||||
cf logs immich --service database
|
||||
```
|
||||
|
||||
---
|
||||
@@ -337,6 +468,40 @@ cf stats --live
|
||||
|
||||
---
|
||||
|
||||
### cf list
|
||||
|
||||
List all stacks and their assigned hosts.
|
||||
|
||||
```bash
|
||||
cf list [OPTIONS]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--host, -H TEXT` | Filter to stacks on this host |
|
||||
| `--simple, -s` | Plain output for scripting (one stack per line) |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# List all stacks
|
||||
cf list
|
||||
|
||||
# Filter by host
|
||||
cf list --host nas
|
||||
|
||||
# Plain output for scripting
|
||||
cf list --simple
|
||||
|
||||
# Combine: list stack names on a specific host
|
||||
cf list --host nuc --simple
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Commands
|
||||
|
||||
### cf check
|
||||
@@ -374,25 +539,31 @@ cf check jellyfin
|
||||
Update local state from running stacks.
|
||||
|
||||
```bash
|
||||
cf refresh [OPTIONS]
|
||||
cf refresh [OPTIONS] [STACKS]...
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--all, -a` | Refresh all stacks |
|
||||
| `--dry-run, -n` | Show what would change |
|
||||
| `--log-path, -l PATH` | Path to Dockerfarm TOML log |
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
Without arguments, refreshes all stacks (same as `--all`). With stack names, refreshes only those stacks.
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Sync state with reality
|
||||
# Sync state with reality (all stacks)
|
||||
cf refresh
|
||||
|
||||
# Preview changes
|
||||
cf refresh --dry-run
|
||||
|
||||
# Refresh specific stacks only
|
||||
cf refresh plex sonarr
|
||||
```
|
||||
|
||||
---
|
||||
@@ -473,6 +644,7 @@ cf config COMMAND
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `init` | Create new config with examples |
|
||||
| `init-env` | Generate .env file for Docker deployment |
|
||||
| `show` | Display config with highlighting |
|
||||
| `path` | Print config file path |
|
||||
| `validate` | Validate syntax and schema |
|
||||
@@ -484,6 +656,7 @@ cf config COMMAND
|
||||
| Subcommand | Options |
|
||||
|------------|---------|
|
||||
| `init` | `--path/-p PATH`, `--force/-f` |
|
||||
| `init-env` | `--path/-p PATH`, `--output/-o PATH`, `--force/-f` |
|
||||
| `show` | `--path/-p PATH`, `--raw/-r` |
|
||||
| `edit` | `--path/-p PATH` |
|
||||
| `path` | `--path/-p PATH` |
|
||||
@@ -519,6 +692,12 @@ cf config symlink
|
||||
|
||||
# Create symlink to specific file
|
||||
cf config symlink /opt/compose-farm/config.yaml
|
||||
|
||||
# Generate .env file in current directory
|
||||
cf config init-env
|
||||
|
||||
# Generate .env at specific path
|
||||
cf config init-env -o /opt/stacks/.env
|
||||
```
|
||||
|
||||
---
|
||||
@@ -539,7 +718,20 @@ cf ssh COMMAND
|
||||
| `status` | Show SSH key status and host connectivity |
|
||||
| `keygen` | Generate key without distributing |
|
||||
|
||||
**Options for `cf ssh setup` and `cf ssh keygen`:**
|
||||
**Options for `cf ssh setup`:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
| `--force, -f` | Regenerate key even if it exists |
|
||||
|
||||
**Options for `cf ssh status`:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--config, -c PATH` | Path to config file |
|
||||
|
||||
**Options for `cf ssh keygen`:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
|
||||
@@ -42,8 +42,8 @@ hosts:
|
||||
# Map stacks to the local host
|
||||
stacks:
|
||||
plex: local
|
||||
sonarr: local
|
||||
radarr: local
|
||||
grafana: local
|
||||
nextcloud: local
|
||||
```
|
||||
|
||||
### Multi-host (full example)
|
||||
@@ -69,8 +69,8 @@ hosts:
|
||||
stacks:
|
||||
# Single-host stacks
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
|
||||
# Multi-host stacks
|
||||
dozzle: all # Run on ALL hosts
|
||||
@@ -94,7 +94,7 @@ compose_dir: /opt/compose
|
||||
├── plex/
|
||||
│ ├── docker-compose.yml # or compose.yaml
|
||||
│ └── .env # optional environment file
|
||||
├── sonarr/
|
||||
├── grafana/
|
||||
│ └── docker-compose.yml
|
||||
└── ...
|
||||
```
|
||||
@@ -107,7 +107,7 @@ Supported compose file names (checked in order):
|
||||
|
||||
### traefik_file
|
||||
|
||||
Path to auto-generated Traefik file-provider config. When set, Compose Farm regenerates this file after `up`, `down`, `restart`, and `update` commands.
|
||||
Path to auto-generated Traefik file-provider config. When set, Compose Farm regenerates this file after `up`, `down`, and `update` commands.
|
||||
|
||||
```yaml
|
||||
traefik_file: /opt/traefik/dynamic.d/compose-farm.yml
|
||||
@@ -121,6 +121,16 @@ Stack name running Traefik. Stacks on the same host are skipped in file-provider
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
### glances_stack
|
||||
|
||||
Stack name running [Glances](https://nicolargo.github.io/glances/) for host resource monitoring. When set, the CLI (`cf stats --containers`) and web UI display CPU, memory, and container stats for all hosts.
|
||||
|
||||
```yaml
|
||||
glances_stack: glances
|
||||
```
|
||||
|
||||
The Glances stack should run on all hosts and expose port 61208. See the README for full setup instructions.
|
||||
|
||||
## Hosts Configuration
|
||||
|
||||
### Basic Host
|
||||
@@ -185,8 +195,8 @@ hosts:
|
||||
```yaml
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
### Multi-Host Stack
|
||||
@@ -229,7 +239,7 @@ For example, if your config is at `~/.config/compose-farm/compose-farm.yaml`, th
|
||||
```yaml
|
||||
deployed:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
grafana: nuc
|
||||
```
|
||||
|
||||
This file records which stacks are deployed and on which host.
|
||||
@@ -257,6 +267,25 @@ When generating Traefik config, Compose Farm resolves `${VAR}` and `${VAR:-defau
|
||||
1. The stack's `.env` file
|
||||
2. Current environment
|
||||
|
||||
### Compose Farm Environment Variables
|
||||
|
||||
These environment variables configure Compose Farm itself:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `CF_CONFIG` | Path to config file |
|
||||
| `CF_WEB_STACK` | Web UI stack name (Docker only, enables self-update detection and local host inference) |
|
||||
|
||||
**Docker deployment variables** (used in docker-compose.yml):
|
||||
|
||||
| Variable | Description | Generated by |
|
||||
|----------|-------------|--------------|
|
||||
| `CF_COMPOSE_DIR` | Compose files directory | `cf config init-env` |
|
||||
| `CF_UID` / `CF_GID` | User/group ID for containers | `cf config init-env` |
|
||||
| `CF_HOME` / `CF_USER` | Home directory and username | `cf config init-env` |
|
||||
| `CF_SSH_DIR` | SSH keys volume mount | Manual |
|
||||
| `CF_XDG_CONFIG` | Config backup volume mount | Manual |
|
||||
|
||||
## Config Commands
|
||||
|
||||
### Initialize Config
|
||||
@@ -373,8 +402,8 @@ hosts:
|
||||
stacks:
|
||||
# Media
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
jellyfin: nuc
|
||||
immich: nuc
|
||||
|
||||
# Infrastructure
|
||||
traefik: nuc
|
||||
@@ -388,7 +417,6 @@ stacks:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
network: production
|
||||
traefik_file: /opt/traefik/dynamic.d/cf.yml
|
||||
traefik_stack: traefik
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ VHS-based terminal demo recordings for Compose Farm CLI.
|
||||
|
||||
```bash
|
||||
# Record all demos
|
||||
./docs/demos/cli/record.sh
|
||||
python docs/demos/cli/record.py
|
||||
|
||||
# Record single demo
|
||||
cd /opt/stacks && vhs docs/demos/cli/quickstart.tape
|
||||
# Record specific demos
|
||||
python docs/demos/cli/record.py quickstart migration
|
||||
```
|
||||
|
||||
## Demos
|
||||
@@ -23,6 +23,7 @@ cd /opt/stacks && vhs docs/demos/cli/quickstart.tape
|
||||
| `install.tape` | Installing with `uv tool install` |
|
||||
| `quickstart.tape` | `cf ps`, `cf up`, `cf logs` |
|
||||
| `logs.tape` | Viewing logs |
|
||||
| `compose.tape` | `cf compose` passthrough (--help, images, exec) |
|
||||
| `update.tape` | `cf update` |
|
||||
| `migration.tape` | Service migration |
|
||||
| `apply.tape` | `cf apply` |
|
||||
|
||||
50
docs/demos/cli/compose.tape
Normal file
50
docs/demos/cli/compose.tape
Normal file
@@ -0,0 +1,50 @@
|
||||
# Compose Demo
|
||||
# Shows that cf compose passes through ANY docker compose command
|
||||
|
||||
Output docs/assets/compose.gif
|
||||
Output docs/assets/compose.webm
|
||||
|
||||
Set Shell "bash"
|
||||
Set FontSize 14
|
||||
Set Width 900
|
||||
Set Height 550
|
||||
Set Theme "Catppuccin Mocha"
|
||||
Set TypingSpeed 50ms
|
||||
|
||||
Type "# cf compose runs ANY docker compose command on the right host"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "# See ALL available compose commands"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "cf compose immich --help"
|
||||
Enter
|
||||
Sleep 4s
|
||||
|
||||
Type "# Show images"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "cf compose immich images"
|
||||
Enter
|
||||
Wait+Screen /immich/
|
||||
Sleep 2s
|
||||
|
||||
Type "# Open shell in a container"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "cf compose immich exec immich-machine-learning sh"
|
||||
Enter
|
||||
Wait+Screen /#/
|
||||
Sleep 1s
|
||||
|
||||
Type "python3 --version"
|
||||
Enter
|
||||
Sleep 1s
|
||||
|
||||
Type "exit"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
@@ -21,7 +21,7 @@ Type "# First, define your hosts..."
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "bat -r 1:11 compose-farm.yaml"
|
||||
Type "bat -r 1:16 compose-farm.yaml"
|
||||
Enter
|
||||
Sleep 3s
|
||||
Type "q"
|
||||
@@ -31,7 +31,7 @@ Type "# Then map each stack to a host"
|
||||
Enter
|
||||
Sleep 500ms
|
||||
|
||||
Type "bat -r 13:30 compose-farm.yaml"
|
||||
Type "bat -r 17:35 compose-farm.yaml"
|
||||
Enter
|
||||
Sleep 3s
|
||||
Type "q"
|
||||
|
||||
134
docs/demos/cli/record.py
Executable file
134
docs/demos/cli/record.py
Executable file
@@ -0,0 +1,134 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Record CLI demos using VHS."""
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from compose_farm.config import load_config
|
||||
from compose_farm.state import load_state
|
||||
|
||||
console = Console()
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
STACKS_DIR = Path("/opt/stacks")
|
||||
CONFIG_FILE = STACKS_DIR / "compose-farm.yaml"
|
||||
OUTPUT_DIR = SCRIPT_DIR.parent.parent / "assets"
|
||||
|
||||
DEMOS = ["install", "quickstart", "logs", "compose", "update", "migration", "apply"]
|
||||
|
||||
|
||||
def _run(cmd: list[str], **kw) -> bool:
|
||||
return subprocess.run(cmd, check=False, **kw).returncode == 0
|
||||
|
||||
|
||||
def _set_config(host: str) -> None:
|
||||
"""Set audiobookshelf host in config file."""
|
||||
_run(["sed", "-i", f"s/audiobookshelf: .*/audiobookshelf: {host}/", str(CONFIG_FILE)])
|
||||
|
||||
|
||||
def _get_hosts() -> tuple[str | None, str | None]:
|
||||
"""Return (config_host, state_host) for audiobookshelf."""
|
||||
config = load_config()
|
||||
state = load_state(config)
|
||||
return config.stacks.get("audiobookshelf"), state.get("audiobookshelf")
|
||||
|
||||
|
||||
def _setup_state(demo: str) -> bool:
|
||||
"""Set up required state for demo. Returns False on failure."""
|
||||
if demo not in ("migration", "apply"):
|
||||
return True
|
||||
|
||||
config_host, state_host = _get_hosts()
|
||||
|
||||
if demo == "migration":
|
||||
# Migration needs audiobookshelf on nas in BOTH config and state
|
||||
if config_host != "nas":
|
||||
console.print("[yellow]Setting up: config → nas[/yellow]")
|
||||
_set_config("nas")
|
||||
if state_host != "nas":
|
||||
console.print("[yellow]Setting up: state → nas[/yellow]")
|
||||
if not _run(["cf", "apply"], cwd=STACKS_DIR):
|
||||
return False
|
||||
|
||||
elif demo == "apply":
|
||||
# Apply needs config=nas, state=anton (so there's something to apply)
|
||||
if config_host != "nas":
|
||||
console.print("[yellow]Setting up: config → nas[/yellow]")
|
||||
_set_config("nas")
|
||||
if state_host == "nas":
|
||||
console.print("[yellow]Setting up: state → anton[/yellow]")
|
||||
_set_config("anton")
|
||||
if not _run(["cf", "apply"], cwd=STACKS_DIR):
|
||||
return False
|
||||
_set_config("nas")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _record(name: str, index: int, total: int) -> bool:
|
||||
"""Record a single demo."""
|
||||
console.print(f"[cyan][{index}/{total}][/cyan] [green]Recording:[/green] {name}")
|
||||
if _run(["vhs", str(SCRIPT_DIR / f"{name}.tape")], cwd=STACKS_DIR):
|
||||
console.print("[green] ✓ Done[/green]")
|
||||
return True
|
||||
console.print("[red] ✗ Failed[/red]")
|
||||
return False
|
||||
|
||||
|
||||
def _reset_after(demo: str, next_demo: str | None) -> None:
|
||||
"""Reset state after demos that modify audiobookshelf."""
|
||||
if demo not in ("quickstart", "migration"):
|
||||
return
|
||||
_set_config("nas")
|
||||
if next_demo != "apply": # Let apply demo show the migration
|
||||
_run(["cf", "apply"], cwd=STACKS_DIR)
|
||||
|
||||
|
||||
def _restore_config(original: str) -> None:
|
||||
"""Restore original config and sync state."""
|
||||
console.print("[yellow]Restoring original config...[/yellow]")
|
||||
CONFIG_FILE.write_text(original)
|
||||
_run(["cf", "apply"], cwd=STACKS_DIR)
|
||||
|
||||
|
||||
def _main() -> int:
|
||||
if not shutil.which("vhs"):
|
||||
console.print("[red]VHS not found. Install: brew install vhs[/red]")
|
||||
return 1
|
||||
|
||||
if not _run(["git", "-C", str(STACKS_DIR), "diff", "--quiet", "compose-farm.yaml"]):
|
||||
console.print("[red]compose-farm.yaml has uncommitted changes[/red]")
|
||||
return 1
|
||||
|
||||
demos = [d for d in sys.argv[1:] if d in DEMOS] or DEMOS
|
||||
if sys.argv[1:] and not demos:
|
||||
console.print(f"[red]Unknown demo. Available: {', '.join(DEMOS)}[/red]")
|
||||
return 1
|
||||
|
||||
# Save original config to restore after recording
|
||||
original_config = CONFIG_FILE.read_text()
|
||||
|
||||
try:
|
||||
for i, demo in enumerate(demos, 1):
|
||||
if not _setup_state(demo):
|
||||
return 1
|
||||
if not _record(demo, i, len(demos)):
|
||||
return 1
|
||||
_reset_after(demo, demos[i] if i < len(demos) else None)
|
||||
finally:
|
||||
_restore_config(original_config)
|
||||
|
||||
# Move outputs
|
||||
OUTPUT_DIR.mkdir(exist_ok=True)
|
||||
for f in (STACKS_DIR / "docs/assets").glob("*.[gw]*"):
|
||||
shutil.move(str(f), str(OUTPUT_DIR / f.name))
|
||||
|
||||
console.print(f"\n[green]Done![/green] Saved to {OUTPUT_DIR}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(_main())
|
||||
@@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Record all VHS demos
|
||||
# Run this on a Docker host with compose-farm configured
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DEMOS_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
DOCS_DIR="$(dirname "$DEMOS_DIR")"
|
||||
REPO_DIR="$(dirname "$DOCS_DIR")"
|
||||
OUTPUT_DIR="$DOCS_DIR/assets"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[0;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check for VHS
|
||||
if ! command -v vhs &> /dev/null; then
|
||||
echo "VHS not found. Install with:"
|
||||
echo " brew install vhs"
|
||||
echo " # or"
|
||||
echo " go install github.com/charmbracelet/vhs@latest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure output directory exists
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Temp output dir (VHS runs from /opt/stacks, so relative paths go here)
|
||||
TEMP_OUTPUT="/opt/stacks/docs/assets"
|
||||
mkdir -p "$TEMP_OUTPUT"
|
||||
|
||||
# Change to /opt/stacks so cf commands use installed version (not editable install)
|
||||
cd /opt/stacks
|
||||
|
||||
# Ensure compose-farm.yaml has no uncommitted changes (safety check)
|
||||
if ! git diff --quiet compose-farm.yaml; then
|
||||
echo -e "${RED}Error: compose-farm.yaml has uncommitted changes${NC}"
|
||||
echo "Commit or stash your changes before recording demos"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}Recording VHS demos...${NC}"
|
||||
echo "Output directory: $OUTPUT_DIR"
|
||||
echo ""
|
||||
|
||||
# Function to record a tape
|
||||
record_tape() {
|
||||
local tape=$1
|
||||
local name=$(basename "$tape" .tape)
|
||||
echo -e "${GREEN}Recording:${NC} $name"
|
||||
if vhs "$tape"; then
|
||||
echo -e "${GREEN} ✓ Done${NC}"
|
||||
else
|
||||
echo -e "${RED} ✗ Failed${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Record demos in logical order
|
||||
echo -e "${YELLOW}=== Phase 1: Basic demos ===${NC}"
|
||||
record_tape "$SCRIPT_DIR/install.tape"
|
||||
record_tape "$SCRIPT_DIR/quickstart.tape"
|
||||
record_tape "$SCRIPT_DIR/logs.tape"
|
||||
|
||||
echo -e "${YELLOW}=== Phase 2: Update demo ===${NC}"
|
||||
record_tape "$SCRIPT_DIR/update.tape"
|
||||
|
||||
echo -e "${YELLOW}=== Phase 3: Migration demo ===${NC}"
|
||||
record_tape "$SCRIPT_DIR/migration.tape"
|
||||
git -C /opt/stacks checkout compose-farm.yaml # Reset after migration
|
||||
|
||||
echo -e "${YELLOW}=== Phase 4: Apply demo ===${NC}"
|
||||
record_tape "$SCRIPT_DIR/apply.tape"
|
||||
|
||||
# Move GIFs and WebMs from temp location to repo
|
||||
echo ""
|
||||
echo -e "${BLUE}Moving recordings to repo...${NC}"
|
||||
mv "$TEMP_OUTPUT"/*.gif "$OUTPUT_DIR/" 2>/dev/null || true
|
||||
mv "$TEMP_OUTPUT"/*.webm "$OUTPUT_DIR/" 2>/dev/null || true
|
||||
rmdir "$TEMP_OUTPUT" 2>/dev/null || true
|
||||
rmdir "$(dirname "$TEMP_OUTPUT")" 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}Done!${NC} Recordings saved to $OUTPUT_DIR/"
|
||||
ls -la "$OUTPUT_DIR"/*.gif "$OUTPUT_DIR"/*.webm 2>/dev/null || echo "No recordings found (check for errors above)"
|
||||
@@ -1,5 +1,5 @@
|
||||
# Update Demo
|
||||
# Shows updating stacks (pull + build + down + up)
|
||||
# Shows updating stacks (only recreates containers if images changed)
|
||||
|
||||
Output docs/assets/update.gif
|
||||
Output docs/assets/update.webm
|
||||
|
||||
@@ -21,24 +21,37 @@ import uvicorn
|
||||
|
||||
from compose_farm.config import Config as CFConfig
|
||||
from compose_farm.config import load_config
|
||||
from compose_farm.executor import (
|
||||
get_container_compose_labels as _original_get_compose_labels,
|
||||
)
|
||||
from compose_farm.glances import ContainerStats
|
||||
from compose_farm.glances import fetch_container_stats as _original_fetch_container_stats
|
||||
from compose_farm.state import load_state as _original_load_state
|
||||
from compose_farm.web.app import create_app
|
||||
from compose_farm.web.cdn import CDN_ASSETS, ensure_vendor_cache
|
||||
|
||||
# NOTE: Do NOT import create_app here - it must be imported AFTER patches are applied
|
||||
# to ensure the patched get_config is used by all route modules
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
|
||||
from playwright.sync_api import BrowserContext, Page, Route
|
||||
|
||||
# Stacks to exclude from demo recordings (exact match)
|
||||
DEMO_EXCLUDE_STACKS = {"arr"}
|
||||
# Substrings to exclude from demo recordings (case-insensitive)
|
||||
DEMO_EXCLUDE_PATTERNS = {"arr", "vpn", "tash"}
|
||||
|
||||
|
||||
def _should_exclude(name: str) -> bool:
|
||||
"""Check if a stack/container name should be excluded from demo."""
|
||||
name_lower = name.lower()
|
||||
return any(pattern in name_lower for pattern in DEMO_EXCLUDE_PATTERNS)
|
||||
|
||||
|
||||
def _get_filtered_config() -> CFConfig:
|
||||
"""Load config but filter out excluded stacks."""
|
||||
config = load_config()
|
||||
filtered_stacks = {
|
||||
name: host for name, host in config.stacks.items() if name not in DEMO_EXCLUDE_STACKS
|
||||
name: host for name, host in config.stacks.items() if not _should_exclude(name)
|
||||
}
|
||||
return CFConfig(
|
||||
compose_dir=config.compose_dir,
|
||||
@@ -46,6 +59,7 @@ def _get_filtered_config() -> CFConfig:
|
||||
stacks=filtered_stacks,
|
||||
traefik_file=config.traefik_file,
|
||||
traefik_stack=config.traefik_stack,
|
||||
glances_stack=config.glances_stack,
|
||||
config_path=config.config_path,
|
||||
)
|
||||
|
||||
@@ -53,7 +67,37 @@ def _get_filtered_config() -> CFConfig:
|
||||
def _get_filtered_state(config: CFConfig) -> dict[str, str | list[str]]:
|
||||
"""Load state but filter out excluded stacks."""
|
||||
state = _original_load_state(config)
|
||||
return {name: host for name, host in state.items() if name not in DEMO_EXCLUDE_STACKS}
|
||||
return {name: host for name, host in state.items() if not _should_exclude(name)}
|
||||
|
||||
|
||||
async def _filtered_fetch_container_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = 61208,
|
||||
request_timeout: float = 10.0,
|
||||
) -> tuple[list[ContainerStats] | None, str | None]:
|
||||
"""Fetch container stats but filter out excluded containers."""
|
||||
containers, error = await _original_fetch_container_stats(
|
||||
host_name, host_address, port, request_timeout
|
||||
)
|
||||
if containers:
|
||||
# Filter by container name (stack is empty at this point)
|
||||
containers = [c for c in containers if not _should_exclude(c.name)]
|
||||
return containers, error
|
||||
|
||||
|
||||
async def _filtered_get_compose_labels(
|
||||
config: CFConfig,
|
||||
host_name: str,
|
||||
) -> dict[str, tuple[str, str]]:
|
||||
"""Get compose labels but filter out excluded stacks."""
|
||||
labels = await _original_get_compose_labels(config, host_name)
|
||||
# Filter out containers whose stack (project) name should be excluded
|
||||
return {
|
||||
name: (stack, service)
|
||||
for name, (stack, service) in labels.items()
|
||||
if not _should_exclude(stack)
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
@@ -84,19 +128,23 @@ def server_url() -> Generator[str, None, None]:
|
||||
|
||||
# Patch at source module level so all callers get filtered versions
|
||||
patches = [
|
||||
# Patch load_state at source - all functions calling it get filtered state
|
||||
# Patch load_config at source - get_config() calls this internally
|
||||
patch("compose_farm.config.load_config", _get_filtered_config),
|
||||
# Patch load_state at source and where imported
|
||||
patch("compose_farm.state.load_state", _get_filtered_state),
|
||||
# Patch get_config where imported
|
||||
patch("compose_farm.web.routes.pages.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.api.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.actions.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.app.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.ws.get_config", _get_filtered_config),
|
||||
patch("compose_farm.web.routes.pages.load_state", _get_filtered_state),
|
||||
# Patch container fetch to filter out excluded containers (Live Stats page)
|
||||
patch("compose_farm.glances.fetch_container_stats", _filtered_fetch_container_stats),
|
||||
# Patch compose labels to filter out excluded stacks
|
||||
patch("compose_farm.executor.get_container_compose_labels", _filtered_get_compose_labels),
|
||||
]
|
||||
|
||||
for p in patches:
|
||||
p.start()
|
||||
|
||||
# Import create_app AFTER patches are started so route modules see patched get_config
|
||||
from compose_farm.web.app import create_app # noqa: PLC0415
|
||||
|
||||
with socket.socket() as s:
|
||||
s.bind(("127.0.0.1", 0))
|
||||
port = s.getsockname()[1]
|
||||
@@ -160,6 +208,7 @@ def recording_context(
|
||||
if url.startswith(url_prefix):
|
||||
route.fulfill(status=200, content_type=content_type, body=filepath.read_bytes())
|
||||
return
|
||||
print(f"UNCACHED CDN request: {url}")
|
||||
route.abort("failed")
|
||||
|
||||
context.route(re.compile(r"https://(cdn\.jsdelivr\.net|unpkg\.com)/.*"), handle_cdn)
|
||||
@@ -176,6 +225,35 @@ def recording_page(recording_context: BrowserContext) -> Generator[Page, None, N
|
||||
page.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wide_recording_context(
|
||||
browser: Any, # pytest-playwright's browser fixture
|
||||
recording_output_dir: Path,
|
||||
) -> Generator[BrowserContext, None, None]:
|
||||
"""Browser context with wider viewport for demos needing more horizontal space.
|
||||
|
||||
NOTE: This fixture does NOT use CDN interception (unlike recording_context).
|
||||
CDN interception was causing inline scripts from containers.html to be
|
||||
removed from the DOM, likely due to Tailwind's browser plugin behavior.
|
||||
"""
|
||||
context = browser.new_context(
|
||||
viewport={"width": 1920, "height": 1080},
|
||||
record_video_dir=str(recording_output_dir),
|
||||
record_video_size={"width": 1920, "height": 1080},
|
||||
)
|
||||
|
||||
yield context
|
||||
context.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wide_recording_page(wide_recording_context: BrowserContext) -> Generator[Page, None, None]:
|
||||
"""Page with wider viewport for demos needing more horizontal space."""
|
||||
page = wide_recording_context.new_page()
|
||||
yield page
|
||||
page.close()
|
||||
|
||||
|
||||
# Demo helper functions
|
||||
|
||||
|
||||
|
||||
@@ -60,10 +60,14 @@ def test_demo_console(recording_page: Page, server_url: str) -> None:
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2500) # Wait for output
|
||||
|
||||
# Scroll down to show the Editor section with Compose Farm config
|
||||
editor_section = page.locator(".collapse", has_text="Editor").first
|
||||
editor_section.scroll_into_view_if_needed()
|
||||
pause(page, 800)
|
||||
# Smoothly scroll down to show the Editor section with Compose Farm config
|
||||
page.evaluate("""
|
||||
const editor = document.getElementById('console-editor');
|
||||
if (editor) {
|
||||
editor.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}
|
||||
""")
|
||||
pause(page, 1200) # Wait for smooth scroll animation
|
||||
|
||||
# Wait for Monaco editor to load with config content
|
||||
page.wait_for_selector("#console-editor .monaco-editor", timeout=10000)
|
||||
|
||||
85
docs/demos/web/demo_live_stats.py
Normal file
85
docs/demos/web/demo_live_stats.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Demo: Live Stats page.
|
||||
|
||||
Records a ~20 second demo showing:
|
||||
- Navigating to Live Stats via command palette
|
||||
- Container table with real-time stats
|
||||
- Filtering containers
|
||||
- Sorting by different columns
|
||||
- Auto-refresh countdown
|
||||
|
||||
Run: pytest docs/demos/web/demo_live_stats.py -v --no-cov
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
open_command_palette,
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Page
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
def test_demo_live_stats(wide_recording_page: Page, server_url: str) -> None:
|
||||
"""Record Live Stats page demo."""
|
||||
page = wide_recording_page
|
||||
|
||||
# Start on dashboard
|
||||
page.goto(server_url)
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 1000)
|
||||
|
||||
# Navigate to Live Stats via command palette
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "live", delay=100)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/live-stats", timeout=5000)
|
||||
|
||||
# Wait for containers to load (may take ~10s on first load due to SSH)
|
||||
page.wait_for_selector("#container-rows tr:not(:has(.loading))", timeout=30000)
|
||||
pause(page, 2000) # Let viewer see the full table with timer
|
||||
|
||||
# Demonstrate filtering
|
||||
slow_type(page, "#filter-input", "grocy", delay=100)
|
||||
pause(page, 1500) # Show filtered results
|
||||
|
||||
# Clear filter
|
||||
page.fill("#filter-input", "")
|
||||
pause(page, 1000)
|
||||
|
||||
# Sort by memory (click header)
|
||||
page.click("th:has-text('Mem')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Sort by CPU
|
||||
page.click("th:has-text('CPU')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Sort by host
|
||||
page.click("th:has-text('Host')")
|
||||
pause(page, 1500)
|
||||
|
||||
# Watch auto-refresh timer count down
|
||||
pause(page, 3500) # Wait for refresh to happen
|
||||
|
||||
# Hover on action menu to show pause behavior
|
||||
action_btn = page.locator('button[onclick^="openActionMenu"]').first
|
||||
action_btn.scroll_into_view_if_needed()
|
||||
action_btn.hover()
|
||||
pause(page, 2000) # Show paused state (timer shows ⏸) and action menu
|
||||
|
||||
# Move away to close menu and resume refresh
|
||||
page.locator("h2").first.hover() # Move to header
|
||||
pause(page, 3500) # Watch countdown resume and refresh happen
|
||||
|
||||
# Final pause
|
||||
pause(page, 1000)
|
||||
@@ -1,9 +1,11 @@
|
||||
"""Demo: Container shell exec.
|
||||
"""Demo: Container shell exec via command palette.
|
||||
|
||||
Records a ~25 second demo showing:
|
||||
- Navigating to a stack page
|
||||
- Clicking Shell button on a container
|
||||
- Running top command inside the container
|
||||
Records a ~35 second demo showing:
|
||||
- Navigating to immich stack (multiple containers)
|
||||
- Using command palette with fuzzy matching ("sh mach") to open shell
|
||||
- Running a command
|
||||
- Using command palette to switch to server container shell
|
||||
- Running another command
|
||||
|
||||
Run: pytest docs/demos/web/demo_shell.py -v --no-cov
|
||||
"""
|
||||
@@ -14,6 +16,7 @@ from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from conftest import (
|
||||
open_command_palette,
|
||||
pause,
|
||||
slow_type,
|
||||
wait_for_sidebar,
|
||||
@@ -33,39 +36,71 @@ def test_demo_shell(recording_page: Page, server_url: str) -> None:
|
||||
wait_for_sidebar(page)
|
||||
pause(page, 800)
|
||||
|
||||
# Navigate to a stack with a running container (grocy)
|
||||
page.locator("#sidebar-stacks a", has_text="grocy").click()
|
||||
page.wait_for_url("**/stack/grocy", timeout=5000)
|
||||
# Navigate to immich via command palette (has multiple containers)
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "immich", delay=100)
|
||||
pause(page, 600)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/immich", timeout=5000)
|
||||
pause(page, 1500)
|
||||
|
||||
# Wait for containers list to load (loaded via HTMX)
|
||||
# Wait for containers list to load (so shell commands are available)
|
||||
page.wait_for_selector("#containers-list button", timeout=10000)
|
||||
pause(page, 800)
|
||||
|
||||
# Click Shell button on the first container
|
||||
shell_btn = page.locator("#containers-list button", has_text="Shell").first
|
||||
shell_btn.click()
|
||||
# Use command palette with fuzzy matching: "sh mach" -> "Shell: immich-machine-learning"
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "sh mach", delay=100)
|
||||
pause(page, 600)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Wait for exec terminal to appear
|
||||
page.wait_for_selector("#exec-terminal .xterm", timeout=10000)
|
||||
|
||||
# Scroll down to make the terminal visible
|
||||
page.locator("#exec-terminal").scroll_into_view_if_needed()
|
||||
pause(page, 2000)
|
||||
# Smoothly scroll down to make the terminal visible
|
||||
page.evaluate("""
|
||||
const terminal = document.getElementById('exec-terminal');
|
||||
if (terminal) {
|
||||
terminal.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}
|
||||
""")
|
||||
pause(page, 1200)
|
||||
|
||||
# Run top command
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "top", delay=100)
|
||||
# Run python version command
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "python3 --version", delay=60)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 4000) # Let top run for a bit
|
||||
pause(page, 1500)
|
||||
|
||||
# Press q to quit top
|
||||
page.keyboard.press("q")
|
||||
# Blur the terminal to release focus (won't scroll)
|
||||
page.evaluate("document.activeElement?.blur()")
|
||||
pause(page, 500)
|
||||
|
||||
# Use command palette to switch to server container: "sh serv" -> "Shell: immich-server"
|
||||
open_command_palette(page)
|
||||
pause(page, 400)
|
||||
slow_type(page, "#cmd-input", "sh serv", delay=100)
|
||||
pause(page, 600)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Run another command to show it's interactive
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "ps aux | head", delay=60)
|
||||
# Wait for new terminal
|
||||
page.wait_for_selector("#exec-terminal .xterm", timeout=10000)
|
||||
|
||||
# Scroll to terminal
|
||||
page.evaluate("""
|
||||
const terminal = document.getElementById('exec-terminal');
|
||||
if (terminal) {
|
||||
terminal.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}
|
||||
""")
|
||||
pause(page, 1200)
|
||||
|
||||
# Run ls command
|
||||
slow_type(page, "#exec-terminal .xterm-helper-textarea", "ls /usr/src/app", delay=60)
|
||||
pause(page, 300)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 2000)
|
||||
|
||||
@@ -55,9 +55,14 @@ def test_demo_stack(recording_page: Page, server_url: str) -> None:
|
||||
page.wait_for_selector("#compose-editor .monaco-editor", timeout=10000)
|
||||
pause(page, 2000) # Let viewer see the compose file
|
||||
|
||||
# Scroll down slightly to show more of the editor
|
||||
page.locator("#compose-editor").scroll_into_view_if_needed()
|
||||
pause(page, 1500)
|
||||
# Smoothly scroll down to show more of the editor
|
||||
page.evaluate("""
|
||||
const editor = document.getElementById('compose-editor');
|
||||
if (editor) {
|
||||
editor.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
||||
}
|
||||
""")
|
||||
pause(page, 1200) # Wait for smooth scroll animation
|
||||
|
||||
# Close the compose file section
|
||||
compose_collapse.locator("input[type=checkbox]").click(force=True)
|
||||
|
||||
@@ -63,7 +63,7 @@ def test_demo_themes(recording_page: Page, server_url: str) -> None:
|
||||
pause(page, 400)
|
||||
|
||||
# Type to filter to a light theme (theme button pre-populates "theme:")
|
||||
slow_type(page, "#cmd-input", " cup", delay=100)
|
||||
slow_type(page, "#cmd-input", "cup", delay=100)
|
||||
pause(page, 500)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
@@ -75,7 +75,7 @@ def test_demo_themes(recording_page: Page, server_url: str) -> None:
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 300)
|
||||
|
||||
slow_type(page, "#cmd-input", " dark", delay=100)
|
||||
slow_type(page, "#cmd-input", "dark", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 800)
|
||||
|
||||
@@ -5,7 +5,7 @@ Records a comprehensive demo (~60 seconds) combining all major features:
|
||||
2. Editor showing Compose Farm YAML config
|
||||
3. Command palette navigation to grocy stack
|
||||
4. Stack actions: up, logs
|
||||
5. Switch to mealie stack via command palette, run update
|
||||
5. Switch to dozzle stack via command palette, run update
|
||||
6. Dashboard overview
|
||||
7. Theme cycling via command palette
|
||||
|
||||
@@ -126,13 +126,13 @@ def _demo_stack_actions(page: Page) -> None:
|
||||
page.wait_for_selector("#terminal-output .xterm", timeout=5000)
|
||||
pause(page, 2500)
|
||||
|
||||
# Switch to mealie via command palette
|
||||
# Switch to dozzle via command palette (on nas for lower latency)
|
||||
open_command_palette(page)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", "mealie", delay=100)
|
||||
slow_type(page, "#cmd-input", "dozzle", delay=100)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
page.wait_for_url("**/stack/mealie", timeout=5000)
|
||||
page.wait_for_url("**/stack/dozzle", timeout=5000)
|
||||
pause(page, 1000)
|
||||
|
||||
# Run update action
|
||||
@@ -162,32 +162,20 @@ def _demo_dashboard_and_themes(page: Page, server_url: str) -> None:
|
||||
page.evaluate("window.scrollTo(0, 0)")
|
||||
pause(page, 600)
|
||||
|
||||
# Open theme picker and arrow down to Luxury (shows live preview)
|
||||
# Theme order: light, dark, cupcake, bumblebee, emerald, corporate, synthwave,
|
||||
# retro, cyberpunk, valentine, halloween, garden, forest, aqua, lofi, pastel,
|
||||
# fantasy, wireframe, black, luxury (index 19)
|
||||
# Open theme picker and arrow down to Dracula (shows live preview)
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 400)
|
||||
|
||||
# Arrow down through themes with live preview until we reach Luxury
|
||||
# Arrow down through themes with live preview until we reach Dracula
|
||||
for _ in range(19):
|
||||
page.keyboard.press("ArrowDown")
|
||||
pause(page, 180)
|
||||
|
||||
# Select Luxury theme
|
||||
# Select Dracula theme and end on it
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
|
||||
# Return to dark theme
|
||||
page.locator("#theme-btn").click()
|
||||
page.wait_for_selector("#cmd-palette[open]", timeout=2000)
|
||||
pause(page, 300)
|
||||
slow_type(page, "#cmd-input", " dark", delay=80)
|
||||
pause(page, 400)
|
||||
page.keyboard.press("Enter")
|
||||
pause(page, 1000)
|
||||
pause(page, 1500)
|
||||
|
||||
|
||||
@pytest.mark.browser # type: ignore[misc]
|
||||
|
||||
@@ -37,6 +37,7 @@ DEMOS = [
|
||||
"workflow",
|
||||
"console",
|
||||
"shell",
|
||||
"live_stats",
|
||||
]
|
||||
|
||||
# High-quality ffmpeg settings for VP8 encoding
|
||||
@@ -88,9 +89,9 @@ def patch_playwright_video_quality() -> None:
|
||||
console.print("[green]Patched Playwright for high-quality video recording[/green]")
|
||||
|
||||
|
||||
def record_demo(name: str) -> Path | None:
|
||||
def record_demo(name: str, index: int, total: int) -> Path | None:
|
||||
"""Run a single demo and return the video path."""
|
||||
console.print(f"[green]Recording:[/green] web-{name}")
|
||||
console.print(f"[cyan][{index}/{total}][/cyan] [green]Recording:[/green] web-{name}")
|
||||
|
||||
demo_file = SCRIPT_DIR / f"demo_{name}.py"
|
||||
if not demo_file.exists():
|
||||
@@ -227,9 +228,7 @@ def main() -> int:
|
||||
|
||||
try:
|
||||
for i, demo in enumerate(demos_to_record, 1):
|
||||
console.print(f"[yellow]=== Demo {i}/{len(demos_to_record)}: {demo} ===[/yellow]")
|
||||
|
||||
video_path = record_demo(demo)
|
||||
video_path = record_demo(demo, i, len(demos_to_record))
|
||||
if video_path:
|
||||
webm, gif = move_recording(video_path, demo)
|
||||
results[demo] = (webm, gif)
|
||||
|
||||
101
docs/docker-deployment.md
Normal file
101
docs/docker-deployment.md
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
icon: lucide/container
|
||||
---
|
||||
|
||||
# Docker Deployment
|
||||
|
||||
Run the Compose Farm web UI in Docker.
|
||||
|
||||
## Quick Start
|
||||
|
||||
**1. Get the compose file:**
|
||||
|
||||
```bash
|
||||
curl -O https://raw.githubusercontent.com/basnijholt/compose-farm/main/docker-compose.yml
|
||||
```
|
||||
|
||||
**2. Generate `.env` file:**
|
||||
|
||||
```bash
|
||||
cf config init-env
|
||||
```
|
||||
|
||||
This auto-detects settings from your `compose-farm.yaml`:
|
||||
- `DOMAIN` from existing traefik labels
|
||||
- `CF_COMPOSE_DIR` from config
|
||||
- `CF_UID/GID/HOME/USER` from current user
|
||||
|
||||
Review the output and edit if needed.
|
||||
|
||||
**3. Set up SSH keys:**
|
||||
|
||||
```bash
|
||||
docker compose run --rm cf ssh setup
|
||||
```
|
||||
|
||||
**4. Start the web UI:**
|
||||
|
||||
```bash
|
||||
docker compose up -d web
|
||||
```
|
||||
|
||||
Open `http://localhost:9000` (or `https://compose-farm.example.com` if using Traefik).
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
The `cf config init-env` command auto-detects most settings. After running it, review the generated `.env` file and edit if needed:
|
||||
|
||||
```bash
|
||||
$EDITOR .env
|
||||
```
|
||||
|
||||
### What init-env detects
|
||||
|
||||
| Variable | How it's detected |
|
||||
|----------|-------------------|
|
||||
| `DOMAIN` | Extracted from traefik labels in your stacks |
|
||||
| `CF_COMPOSE_DIR` | From `compose_dir` in your config |
|
||||
| `CF_UID/GID/HOME/USER` | From current user (for NFS compatibility) |
|
||||
|
||||
If auto-detection fails for any value, edit the `.env` file manually.
|
||||
|
||||
### Glances Monitoring
|
||||
|
||||
To show host CPU/memory stats in the dashboard, deploy [Glances](https://nicolargo.github.io/glances/) on your hosts. When running the web UI container, Compose Farm infers the local host from `CF_WEB_STACK` and uses the Glances container name for that host.
|
||||
|
||||
See [Host Resource Monitoring](https://github.com/basnijholt/compose-farm#host-resource-monitoring-glances) in the README.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### SSH "Permission denied" or "Host key verification failed"
|
||||
|
||||
Regenerate keys:
|
||||
|
||||
```bash
|
||||
docker compose run --rm cf ssh setup
|
||||
```
|
||||
|
||||
### Files created as root
|
||||
|
||||
Add the non-root variables above and restart.
|
||||
|
||||
---
|
||||
|
||||
## All Environment Variables
|
||||
|
||||
For advanced users, here's the complete reference:
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `DOMAIN` | Domain for Traefik labels | *(required)* |
|
||||
| `CF_COMPOSE_DIR` | Compose files directory | `/opt/stacks` |
|
||||
| `CF_UID` / `CF_GID` | User/group ID | `0` (root) |
|
||||
| `CF_HOME` | Home directory | `/root` |
|
||||
| `CF_USER` | Username for SSH | `root` |
|
||||
| `CF_WEB_STACK` | Web UI stack name (enables self-update, local host inference) | *(none)* |
|
||||
| `CF_SSH_DIR` | SSH keys directory | `~/.ssh/compose-farm` |
|
||||
| `CF_XDG_CONFIG` | Config/backup directory | `~/.config/compose-farm` |
|
||||
@@ -24,7 +24,7 @@ Before you begin, ensure you have:
|
||||
### One-liner (recommended)
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
```
|
||||
|
||||
This installs [uv](https://docs.astral.sh/uv/) if needed, then installs compose-farm.
|
||||
@@ -54,6 +54,25 @@ docker run --rm \
|
||||
ghcr.io/basnijholt/compose-farm up --all
|
||||
```
|
||||
|
||||
**Running as non-root user** (recommended for NFS mounts):
|
||||
|
||||
By default, containers run as root. To preserve file ownership on mounted volumes, set these environment variables in your `.env` file:
|
||||
|
||||
```bash
|
||||
# Add to .env file (one-time setup)
|
||||
echo "CF_UID=$(id -u)" >> .env
|
||||
echo "CF_GID=$(id -g)" >> .env
|
||||
echo "CF_HOME=$HOME" >> .env
|
||||
echo "CF_USER=$USER" >> .env
|
||||
```
|
||||
|
||||
Or use [direnv](https://direnv.net/) to auto-set these variables when entering the directory:
|
||||
```bash
|
||||
cp .envrc.example .envrc && direnv allow
|
||||
```
|
||||
|
||||
This ensures files like `compose-farm-state.yaml` and web UI edits are owned by your user instead of root. The `CF_USER` variable is required for SSH to work when running as a non-root user.
|
||||
|
||||
### Verify Installation
|
||||
|
||||
```bash
|
||||
@@ -111,9 +130,9 @@ nas:/volume1/compose /opt/compose nfs defaults 0 0
|
||||
/opt/compose/ # compose_dir in config
|
||||
├── plex/
|
||||
│ └── docker-compose.yml
|
||||
├── sonarr/
|
||||
├── grafana/
|
||||
│ └── docker-compose.yml
|
||||
├── radarr/
|
||||
├── nextcloud/
|
||||
│ └── docker-compose.yml
|
||||
└── jellyfin/
|
||||
└── docker-compose.yml
|
||||
@@ -123,7 +142,21 @@ nas:/volume1/compose /opt/compose nfs defaults 0 0
|
||||
|
||||
### Create Config File
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml`:
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands. For example, if your stacks are in `/opt/stacks`, place the config there too:
|
||||
|
||||
```bash
|
||||
cd /opt/stacks
|
||||
cf config init
|
||||
```
|
||||
|
||||
Alternatively, use `~/.config/compose-farm/compose-farm.yaml` for a global config. You can also symlink a working directory config to the global location:
|
||||
|
||||
```bash
|
||||
# Create config in your stacks directory, symlink to ~/.config
|
||||
cf config symlink /opt/stacks/compose-farm.yaml
|
||||
```
|
||||
|
||||
This way, `cf` commands work from anywhere while the config lives with your stacks.
|
||||
|
||||
#### Single host example
|
||||
|
||||
@@ -136,8 +169,8 @@ hosts:
|
||||
|
||||
stacks:
|
||||
plex: local
|
||||
sonarr: local
|
||||
radarr: local
|
||||
grafana: local
|
||||
nextcloud: local
|
||||
```
|
||||
|
||||
#### Multi-host example
|
||||
@@ -157,8 +190,8 @@ hosts:
|
||||
# Map stacks to hosts
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
Each entry in `stacks:` maps to a folder under `compose_dir` that contains a compose file.
|
||||
@@ -197,7 +230,7 @@ Starts all stacks on their assigned hosts.
|
||||
### Start Specific Stacks
|
||||
|
||||
```bash
|
||||
cf up plex sonarr
|
||||
cf up plex grafana
|
||||
```
|
||||
|
||||
### Apply Configuration
|
||||
@@ -236,19 +269,22 @@ Create the compose file:
|
||||
|
||||
```bash
|
||||
# On any host (shared storage)
|
||||
mkdir -p /opt/compose/prowlarr
|
||||
cat > /opt/compose/prowlarr/docker-compose.yml << 'EOF'
|
||||
mkdir -p /opt/compose/gitea
|
||||
cat > /opt/compose/gitea/docker-compose.yml << 'EOF'
|
||||
services:
|
||||
prowlarr:
|
||||
image: lscr.io/linuxserver/prowlarr:latest
|
||||
container_name: prowlarr
|
||||
gitea:
|
||||
image: docker.gitea.com/gitea:latest
|
||||
container_name: gitea
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
volumes:
|
||||
- /opt/config/prowlarr:/config
|
||||
- /opt/config/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "9696:9696"
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
restart: unless-stopped
|
||||
EOF
|
||||
```
|
||||
@@ -258,13 +294,13 @@ Add to config:
|
||||
```yaml
|
||||
stacks:
|
||||
# ... existing stacks
|
||||
prowlarr: nuc
|
||||
gitea: nuc
|
||||
```
|
||||
|
||||
Start the stack:
|
||||
|
||||
```bash
|
||||
cf up prowlarr
|
||||
cf up gitea
|
||||
```
|
||||
|
||||
### 2. Move a Stack to Another Host
|
||||
@@ -293,7 +329,7 @@ cf apply
|
||||
|
||||
```bash
|
||||
cf update --all
|
||||
# Runs: pull + build + down + up for each stack
|
||||
# Only recreates containers if images changed
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
@@ -76,7 +76,7 @@ hosts:
|
||||
stacks:
|
||||
plex: server-1
|
||||
jellyfin: server-2
|
||||
sonarr: server-1
|
||||
grafana: server-1
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -96,7 +96,7 @@ pip install compose-farm
|
||||
|
||||
### Configuration
|
||||
|
||||
Create `~/.config/compose-farm/compose-farm.yaml`:
|
||||
Create `compose-farm.yaml` in the directory where you'll run commands (e.g., `/opt/stacks`), or in `~/.config/compose-farm/`:
|
||||
|
||||
```yaml
|
||||
compose_dir: /opt/compose
|
||||
@@ -110,10 +110,12 @@ hosts:
|
||||
|
||||
stacks:
|
||||
plex: nuc
|
||||
sonarr: nuc
|
||||
radarr: hp
|
||||
grafana: nuc
|
||||
nextcloud: hp
|
||||
```
|
||||
|
||||
See [Configuration](configuration.md) for all options and the full search order.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
@@ -121,7 +123,7 @@ stacks:
|
||||
cf apply
|
||||
|
||||
# Start specific stacks
|
||||
cf up plex sonarr
|
||||
cf up plex grafana
|
||||
|
||||
# Check status
|
||||
cf ps
|
||||
|
||||
2
bootstrap.sh → docs/install
Executable file → Normal file
2
bootstrap.sh → docs/install
Executable file → Normal file
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
# Compose Farm bootstrap script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/basnijholt/compose-farm/main/bootstrap.sh | sh
|
||||
# Usage: curl -fsSL https://compose-farm.nijho.lt/install | sh
|
||||
#
|
||||
# This script installs uv (if needed) and then installs compose-farm as a uv tool.
|
||||
|
||||
21
docs/javascripts/video-fix.js
Normal file
21
docs/javascripts/video-fix.js
Normal file
@@ -0,0 +1,21 @@
|
||||
// Fix Safari video autoplay issues
|
||||
(function() {
|
||||
function initVideos() {
|
||||
document.querySelectorAll('video[autoplay]').forEach(function(video) {
|
||||
video.load();
|
||||
video.play().catch(function() {});
|
||||
});
|
||||
}
|
||||
|
||||
// For initial page load (needed for Chrome)
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', initVideos);
|
||||
} else {
|
||||
initVideos();
|
||||
}
|
||||
|
||||
// For MkDocs instant navigation (needed for Safari)
|
||||
if (typeof document$ !== 'undefined') {
|
||||
document$.subscribe(initVideos);
|
||||
}
|
||||
})();
|
||||
@@ -0,0 +1,6 @@
|
||||
<!-- Privacy-friendly analytics by Plausible -->
|
||||
<script async src="https://plausible.nijho.lt/js/pa-NRX7MolONWKTUREJpAjkB.js"></script>
|
||||
<script>
|
||||
window.plausible=window.plausible||function(){(plausible.q=plausible.q||[]).push(arguments)},plausible.init=plausible.init||function(i){plausible.o=i||{}};
|
||||
plausible.init()
|
||||
</script>
|
||||
@@ -27,8 +27,8 @@ hosts:
|
||||
stacks:
|
||||
plex: nuc
|
||||
jellyfin: hp
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
grafana: nuc
|
||||
nextcloud: nuc
|
||||
```
|
||||
|
||||
Then just:
|
||||
|
||||
@@ -133,13 +133,12 @@ hosts:
|
||||
stacks:
|
||||
traefik: nuc # Traefik runs here
|
||||
plex: hp # Routed via file-provider
|
||||
sonarr: hp
|
||||
grafana: hp
|
||||
```
|
||||
|
||||
With `traefik_file` set, these commands auto-regenerate the config:
|
||||
- `cf up`
|
||||
- `cf down`
|
||||
- `cf restart`
|
||||
- `cf update`
|
||||
- `cf apply`
|
||||
|
||||
@@ -256,8 +255,8 @@ stacks:
|
||||
traefik: nuc
|
||||
plex: hp
|
||||
jellyfin: nas
|
||||
sonarr: nuc
|
||||
radarr: nuc
|
||||
grafana: nuc
|
||||
nextcloud: nuc
|
||||
```
|
||||
|
||||
### /opt/compose/plex/docker-compose.yml
|
||||
@@ -309,7 +308,7 @@ http:
|
||||
- url: http://192.168.1.100:8096
|
||||
```
|
||||
|
||||
Note: `sonarr` and `radarr` are NOT in the file because they're on the same host as Traefik (`nuc`).
|
||||
Note: `grafana` and `nextcloud` are NOT in the file because they're on the same host as Traefik (`nuc`).
|
||||
|
||||
## Combining with Existing Config
|
||||
|
||||
|
||||
@@ -51,10 +51,32 @@ Press `Ctrl+K` (or `Cmd+K` on macOS) to open the command palette. Use fuzzy sear
|
||||
### Dashboard (`/`)
|
||||
|
||||
- Stack overview with status indicators
|
||||
- Host statistics
|
||||
- Host statistics (CPU, memory, disk, load via Glances)
|
||||
- Pending operations (migrations, orphaned stacks)
|
||||
- Quick actions via command palette
|
||||
|
||||
### Live Stats (`/live-stats`)
|
||||
|
||||
Real-time container monitoring across all hosts, powered by [Glances](https://nicolargo.github.io/glances/).
|
||||
|
||||
- **Live metrics**: CPU, memory, network I/O for every container
|
||||
- **Auto-refresh**: Updates every 3 seconds (pauses when dropdown menus are open)
|
||||
- **Filtering**: Type to filter containers by name, stack, host, or image
|
||||
- **Sorting**: Click column headers to sort by any metric
|
||||
- **Update detection**: Shows when container images have updates available
|
||||
|
||||
<video autoplay loop muted playsinline>
|
||||
<source src="/assets/web-live_stats.webm" type="video/webm">
|
||||
</video>
|
||||
|
||||
#### Requirements
|
||||
|
||||
Live Stats requires Glances to be deployed on all hosts:
|
||||
|
||||
1. Add `glances_stack: glances` to your `compose-farm.yaml`
|
||||
2. Deploy a Glances stack that runs on all hosts (see [example](https://github.com/basnijholt/compose-farm/tree/main/examples/glances))
|
||||
3. Glances must expose its REST API on port 61208
|
||||
|
||||
### Stack Detail (`/stack/{name}`)
|
||||
|
||||
- Compose file editor (Monaco)
|
||||
@@ -63,6 +85,8 @@ Press `Ctrl+K` (or `Cmd+K` on macOS) to open the command palette. Use fuzzy sear
|
||||
- Container shell access (exec into running containers)
|
||||
- Terminal output for running commands
|
||||
|
||||
Files are automatically backed up before saving to `~/.config/compose-farm/backups/`.
|
||||
|
||||
### Console (`/console`)
|
||||
|
||||
- Full shell access to any host
|
||||
@@ -116,7 +140,7 @@ The web UI requires additional dependencies:
|
||||
pip install compose-farm[web]
|
||||
|
||||
# If installed via uv
|
||||
uv tool install compose-farm --with web
|
||||
uv tool install 'compose-farm[web]'
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -7,9 +7,10 @@ Real-world examples demonstrating compose-farm patterns for multi-host Docker de
|
||||
| Stack | Type | Demonstrates |
|
||||
|---------|------|--------------|
|
||||
| [traefik](traefik/) | Infrastructure | Reverse proxy, Let's Encrypt, file-provider |
|
||||
| [coredns](coredns/) | Infrastructure | Wildcard DNS for `*.local` domains |
|
||||
| [mealie](mealie/) | Single container | Traefik labels, resource limits, environment vars |
|
||||
| [uptime-kuma](uptime-kuma/) | Single container | Docker socket, user mapping, custom DNS |
|
||||
| [paperless-ngx](paperless-ngx/) | Multi-container | Redis + App stack (SQLite) |
|
||||
| [paperless-ngx](paperless-ngx/) | Multi-container | Redis + PostgreSQL + App stack |
|
||||
| [autokuma](autokuma/) | Multi-host | Demonstrates `all` keyword (runs on every host) |
|
||||
|
||||
## Key Patterns
|
||||
@@ -53,7 +54,8 @@ labels:
|
||||
- traefik.http.routers.myapp-local.entrypoints=web
|
||||
```
|
||||
|
||||
> **Note:** `.local` domains require local DNS (e.g., Pi-hole, Technitium) to resolve to your Traefik host.
|
||||
> **Note:** `.local` domains require local DNS to resolve to your Traefik host.
|
||||
> The [coredns](coredns/) example provides this - edit `Corefile` to set your Traefik IP.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
@@ -88,23 +90,6 @@ stacks:
|
||||
autokuma: all # Runs on every configured host
|
||||
```
|
||||
|
||||
### Multi-Container Stacks
|
||||
|
||||
Database-backed apps with multiple services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
redis:
|
||||
image: redis:7
|
||||
app:
|
||||
depends_on:
|
||||
- redis
|
||||
```
|
||||
|
||||
> **NFS + PostgreSQL Warning:** PostgreSQL should NOT run on NFS storage due to
|
||||
> fsync and file locking issues. Use SQLite (safe for single-writer on NFS) or
|
||||
> keep PostgreSQL data on local volumes (non-migratable).
|
||||
|
||||
### AutoKuma Labels (Optional)
|
||||
|
||||
The autokuma example demonstrates compose-farm's **multi-host feature** - running the same stack on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers.
|
||||
@@ -125,8 +110,8 @@ cd examples
|
||||
# 1. Create the shared network on all hosts
|
||||
compose-farm init-network
|
||||
|
||||
# 2. Start Traefik first (the reverse proxy)
|
||||
compose-farm up traefik
|
||||
# 2. Start infrastructure (reverse proxy + DNS)
|
||||
compose-farm up traefik coredns
|
||||
|
||||
# 3. Start other stacks
|
||||
compose-farm up mealie uptime-kuma
|
||||
@@ -168,4 +153,4 @@ traefik_file: /opt/stacks/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik
|
||||
```
|
||||
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, `restart`, and `update` commands.
|
||||
With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, and `update` commands.
|
||||
|
||||
@@ -3,6 +3,7 @@ deployed:
|
||||
- primary
|
||||
- secondary
|
||||
- local
|
||||
coredns: primary
|
||||
mealie: secondary
|
||||
paperless-ngx: primary
|
||||
traefik: primary
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
compose_dir: /opt/stacks/compose-farm/examples
|
||||
|
||||
# Auto-regenerate Traefik file-provider config after up/down/restart/update
|
||||
# Auto-regenerate Traefik file-provider config after up/down/update
|
||||
traefik_file: /opt/stacks/compose-farm/examples/traefik/dynamic.d/compose-farm.yml
|
||||
traefik_stack: traefik # Skip Traefik's host in file-provider (docker provider handles it)
|
||||
|
||||
@@ -27,6 +27,7 @@ hosts:
|
||||
stacks:
|
||||
# Infrastructure (runs on primary where Traefik is)
|
||||
traefik: primary
|
||||
coredns: primary # DNS for *.local resolution
|
||||
|
||||
# Multi-host stacks (runs on ALL hosts)
|
||||
# AutoKuma monitors Docker containers on each host
|
||||
|
||||
2
examples/coredns/.env
Normal file
2
examples/coredns/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
# CoreDNS doesn't need environment variables
|
||||
# The Traefik IP is configured in the Corefile
|
||||
22
examples/coredns/Corefile
Normal file
22
examples/coredns/Corefile
Normal file
@@ -0,0 +1,22 @@
|
||||
# CoreDNS configuration for .local domain resolution
|
||||
#
|
||||
# Resolves *.local to the Traefik host IP (where your reverse proxy runs).
|
||||
# All other queries are forwarded to upstream DNS.
|
||||
|
||||
# Handle .local domains - resolve everything to Traefik's host
|
||||
local {
|
||||
template IN A {
|
||||
answer "{{ .Name }} 60 IN A 192.168.1.10"
|
||||
}
|
||||
template IN AAAA {
|
||||
# Return empty for AAAA to avoid delays on IPv4-only networks
|
||||
rcode NOERROR
|
||||
}
|
||||
}
|
||||
|
||||
# Forward everything else to upstream DNS
|
||||
. {
|
||||
forward . 1.1.1.1 8.8.8.8
|
||||
cache 300
|
||||
errors
|
||||
}
|
||||
27
examples/coredns/compose.yaml
Normal file
27
examples/coredns/compose.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# CoreDNS - DNS server for .local domain resolution
|
||||
#
|
||||
# Demonstrates:
|
||||
# - Wildcard DNS for *.local domains
|
||||
# - Config file mounting from stack directory
|
||||
# - UDP/TCP port exposure
|
||||
#
|
||||
# This enables all the .local routes in the examples to work.
|
||||
# Point your devices/router DNS to this server's IP.
|
||||
name: coredns
|
||||
services:
|
||||
coredns:
|
||||
image: coredns/coredns:latest
|
||||
container_name: coredns
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "53:53/udp"
|
||||
- "53:53/tcp"
|
||||
volumes:
|
||||
- ./Corefile:/root/Corefile:ro
|
||||
command: -conf /root/Corefile
|
||||
|
||||
networks:
|
||||
mynetwork:
|
||||
external: true
|
||||
@@ -1,3 +1,4 @@
|
||||
# Copy to .env and fill in your values
|
||||
DOMAIN=example.com
|
||||
PAPERLESS_SECRET_KEY=change-me-to-a-random-string
|
||||
POSTGRES_PASSWORD=change-me-to-a-secure-password
|
||||
PAPERLESS_SECRET_KEY=change-me-to-a-long-random-string
|
||||
|
||||
@@ -1,44 +1,57 @@
|
||||
# Paperless-ngx - Document management system
|
||||
#
|
||||
# Demonstrates:
|
||||
# - HTTPS route: paperless.${DOMAIN} (e.g., paperless.example.com) with Let's Encrypt
|
||||
# - HTTP route: paperless.local for LAN access without TLS
|
||||
# - Multi-container stack (Redis + App with SQLite)
|
||||
#
|
||||
# NOTE: This example uses SQLite (the default) instead of PostgreSQL.
|
||||
# PostgreSQL should NOT be used with NFS storage due to fsync/locking issues.
|
||||
# If you need PostgreSQL, use local volumes for the database.
|
||||
# - HTTPS route: paperless.${DOMAIN} with Let's Encrypt
|
||||
# - HTTP route: paperless.local for LAN access
|
||||
# - Multi-container stack (Redis + PostgreSQL + App)
|
||||
# - Separate env_file for app-specific settings
|
||||
name: paperless-ngx
|
||||
services:
|
||||
redis:
|
||||
image: redis:8
|
||||
broker:
|
||||
image: redis:7
|
||||
container_name: paperless-redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
volumes:
|
||||
- /mnt/data/paperless/redis:/data
|
||||
- /mnt/data/paperless/redisdata:/data
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
container_name: paperless-db
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- mynetwork
|
||||
volumes:
|
||||
- /mnt/data/paperless/pgdata:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: paperless
|
||||
POSTGRES_USER: paperless
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
paperless:
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:latest
|
||||
container_name: paperless
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- redis
|
||||
- db
|
||||
- broker
|
||||
networks:
|
||||
- mynetwork
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
# SQLite database stored here (safe on NFS for single-writer)
|
||||
- /mnt/data/paperless/data:/usr/src/paperless/data
|
||||
- /mnt/data/paperless/media:/usr/src/paperless/media
|
||||
- /mnt/data/paperless/export:/usr/src/paperless/export
|
||||
- /mnt/data/paperless/consume:/usr/src/paperless/consume
|
||||
environment:
|
||||
PAPERLESS_REDIS: redis://redis:6379
|
||||
PAPERLESS_REDIS: redis://broker:6379
|
||||
PAPERLESS_DBHOST: db
|
||||
PAPERLESS_URL: https://paperless.${DOMAIN}
|
||||
PAPERLESS_SECRET_KEY: ${PAPERLESS_SECRET_KEY}
|
||||
PAPERLESS_TIME_ZONE: America/Los_Angeles
|
||||
PAPERLESS_OCR_LANGUAGE: eng
|
||||
USERMAP_UID: 1000
|
||||
USERMAP_GID: 1000
|
||||
labels:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Hatch build hook to vendor CDN assets for offline use.
|
||||
|
||||
During wheel builds, this hook:
|
||||
1. Parses base.html to find elements with data-vendor attributes
|
||||
1. Reads vendor-assets.json to find assets marked for vendoring
|
||||
2. Downloads each CDN asset to a temporary vendor directory
|
||||
3. Rewrites base.html to use local /static/vendor/ paths
|
||||
4. Fetches and bundles license information
|
||||
@@ -13,6 +13,7 @@ distributed wheel has vendored assets.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
@@ -23,22 +24,6 @@ from urllib.request import Request, urlopen
|
||||
|
||||
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
|
||||
|
||||
# Matches elements with data-vendor attribute: extracts URL and target filename
|
||||
# Example: <script src="https://..." data-vendor="htmx.js">
|
||||
# Captures: (1) src/href, (2) URL, (3) attributes between, (4) vendor filename
|
||||
VENDOR_PATTERN = re.compile(r'(src|href)="(https://[^"]+)"([^>]*?)data-vendor="([^"]+)"')
|
||||
|
||||
# License URLs for each package (GitHub raw URLs)
|
||||
LICENSE_URLS: dict[str, tuple[str, str]] = {
|
||||
"htmx": ("MIT", "https://raw.githubusercontent.com/bigskysoftware/htmx/master/LICENSE"),
|
||||
"xterm": ("MIT", "https://raw.githubusercontent.com/xtermjs/xterm.js/master/LICENSE"),
|
||||
"daisyui": ("MIT", "https://raw.githubusercontent.com/saadeghi/daisyui/master/LICENSE"),
|
||||
"tailwindcss": (
|
||||
"MIT",
|
||||
"https://raw.githubusercontent.com/tailwindlabs/tailwindcss/master/LICENSE",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _download(url: str) -> bytes:
|
||||
"""Download a URL, trying urllib first then curl as fallback."""
|
||||
@@ -61,7 +46,14 @@ def _download(url: str) -> bytes:
|
||||
return bytes(result.stdout)
|
||||
|
||||
|
||||
def _generate_licenses_file(temp_dir: Path) -> None:
|
||||
def _load_vendor_assets(root: Path) -> dict[str, Any]:
|
||||
"""Load vendor-assets.json from the web module."""
|
||||
json_path = root / "src" / "compose_farm" / "web" / "vendor-assets.json"
|
||||
with json_path.open() as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _generate_licenses_file(temp_dir: Path, licenses: dict[str, dict[str, str]]) -> None:
|
||||
"""Download and combine license files into LICENSES.txt."""
|
||||
lines = [
|
||||
"# Vendored Dependencies - License Information",
|
||||
@@ -73,7 +65,9 @@ def _generate_licenses_file(temp_dir: Path) -> None:
|
||||
"",
|
||||
]
|
||||
|
||||
for pkg_name, (license_type, license_url) in LICENSE_URLS.items():
|
||||
for pkg_name, license_info in licenses.items():
|
||||
license_type = license_info["type"]
|
||||
license_url = license_info["url"]
|
||||
lines.append(f"## {pkg_name} ({license_type})")
|
||||
lines.append(f"Source: {license_url}")
|
||||
lines.append("")
|
||||
@@ -107,44 +101,57 @@ class VendorAssetsHook(BuildHookInterface): # type: ignore[misc]
|
||||
if not base_html_path.exists():
|
||||
return
|
||||
|
||||
# Load vendor assets configuration
|
||||
vendor_config = _load_vendor_assets(Path(self.root))
|
||||
assets_to_vendor = vendor_config["assets"]
|
||||
|
||||
if not assets_to_vendor:
|
||||
return
|
||||
|
||||
# Create temp directory for vendored assets
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="compose_farm_vendor_"))
|
||||
vendor_dir = temp_dir / "vendor"
|
||||
vendor_dir.mkdir()
|
||||
|
||||
# Read and parse base.html
|
||||
# Read base.html
|
||||
html_content = base_html_path.read_text()
|
||||
|
||||
# Build URL to filename mapping and download assets
|
||||
url_to_filename: dict[str, str] = {}
|
||||
|
||||
# Find all elements with data-vendor attribute and download them
|
||||
for match in VENDOR_PATTERN.finditer(html_content):
|
||||
url = match.group(2)
|
||||
filename = match.group(4)
|
||||
|
||||
if url in url_to_filename:
|
||||
continue
|
||||
|
||||
for asset in assets_to_vendor:
|
||||
url = asset["url"]
|
||||
filename = asset["filename"]
|
||||
url_to_filename[url] = filename
|
||||
filepath = vendor_dir / filename
|
||||
filepath.parent.mkdir(parents=True, exist_ok=True)
|
||||
content = _download(url)
|
||||
(vendor_dir / filename).write_bytes(content)
|
||||
filepath.write_bytes(content)
|
||||
|
||||
if not url_to_filename:
|
||||
return
|
||||
# Generate LICENSES.txt from the JSON config
|
||||
_generate_licenses_file(vendor_dir, vendor_config["licenses"])
|
||||
|
||||
# Generate LICENSES.txt
|
||||
_generate_licenses_file(vendor_dir)
|
||||
# Rewrite HTML: replace CDN URLs with local paths and remove data-vendor attributes
|
||||
# Pattern matches: src="URL" ... data-vendor="filename" or href="URL" ... data-vendor="filename"
|
||||
vendor_pattern = re.compile(r'(src|href)="(https://[^"]+)"([^>]*?)data-vendor="([^"]+)"')
|
||||
|
||||
# Rewrite HTML to use local paths (remove data-vendor, update URL)
|
||||
def replace_vendor_tag(match: re.Match[str]) -> str:
|
||||
attr = match.group(1) # src or href
|
||||
url = match.group(2)
|
||||
between = match.group(3) # attributes between URL and data-vendor
|
||||
filename = match.group(4)
|
||||
if url in url_to_filename:
|
||||
filename = url_to_filename[url]
|
||||
return f'{attr}="/static/vendor/{filename}"{between}'
|
||||
return match.group(0)
|
||||
|
||||
modified_html = VENDOR_PATTERN.sub(replace_vendor_tag, html_content)
|
||||
modified_html = vendor_pattern.sub(replace_vendor_tag, html_content)
|
||||
|
||||
# Inject vendored mode flag for JavaScript to detect
|
||||
# Insert right after <head> tag so it's available early
|
||||
modified_html = modified_html.replace(
|
||||
"<head>",
|
||||
"<head>\n <script>window.CF_VENDORED=true;</script>",
|
||||
1, # Only replace first occurrence
|
||||
)
|
||||
|
||||
# Write modified base.html to temp
|
||||
templates_dir = temp_dir / "templates"
|
||||
|
||||
60
justfile
Normal file
60
justfile
Normal file
@@ -0,0 +1,60 @@
|
||||
# Compose Farm Development Commands
|
||||
# Run `just` to see available commands
|
||||
|
||||
# Default: list available commands
|
||||
default:
|
||||
@just --list
|
||||
|
||||
# Install development dependencies
|
||||
install:
|
||||
uv sync --all-extras --dev
|
||||
|
||||
# Run all tests (parallel)
|
||||
test:
|
||||
uv run pytest -n auto
|
||||
|
||||
# Run CLI tests only (parallel, with coverage)
|
||||
test-cli:
|
||||
uv run pytest -m "not browser" -n auto
|
||||
|
||||
# Run web UI tests only (parallel)
|
||||
test-web:
|
||||
uv run pytest -m browser -n auto
|
||||
|
||||
# Lint, format, and type check
|
||||
lint:
|
||||
uv run ruff check --fix .
|
||||
uv run ruff format .
|
||||
uv run mypy src
|
||||
uv run ty check src
|
||||
|
||||
# Start web UI in development mode with auto-reload
|
||||
web:
|
||||
uv run cf web --reload --port 9001
|
||||
|
||||
# Kill the web server
|
||||
kill-web:
|
||||
lsof -ti :9001 | xargs kill -9 2>/dev/null || true
|
||||
|
||||
# Build docs and serve locally
|
||||
doc:
|
||||
uvx zensical build
|
||||
python -m http.server -d site 9002
|
||||
|
||||
# Kill the docs server
|
||||
kill-doc:
|
||||
lsof -ti :9002 | xargs kill -9 2>/dev/null || true
|
||||
|
||||
# Record CLI demos (all or specific: just record-cli quickstart)
|
||||
record-cli *demos:
|
||||
python docs/demos/cli/record.py {{demos}}
|
||||
|
||||
# Record web UI demos (all or specific: just record-web navigation)
|
||||
record-web *demos:
|
||||
python docs/demos/web/record.py {{demos}}
|
||||
|
||||
# Clean up build artifacts and caches
|
||||
clean:
|
||||
rm -rf .pytest_cache .mypy_cache .ruff_cache .coverage htmlcov dist build
|
||||
find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
|
||||
find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true
|
||||
@@ -30,7 +30,8 @@ classifiers = [
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: System Administrators",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Operating System :: MacOS",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
@@ -46,6 +47,7 @@ dependencies = [
|
||||
"asyncssh>=2.14.0",
|
||||
"pyyaml>=6.0",
|
||||
"rich>=13.0.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
@@ -53,6 +55,7 @@ web = [
|
||||
"fastapi[standard]>=0.109.0",
|
||||
"jinja2>=3.1.0",
|
||||
"websockets>=12.0",
|
||||
"humanize>=4.0.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -121,6 +124,10 @@ python_version = "3.11"
|
||||
strict = true
|
||||
plugins = ["pydantic.mypy"]
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "compose_farm._version"
|
||||
ignore_missing_imports = true
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "asyncssh.*"
|
||||
ignore_missing_imports = true
|
||||
@@ -171,8 +178,12 @@ python-version = "3.11"
|
||||
exclude = [
|
||||
"hatch_build.py", # Build-time only, hatchling not in dev deps
|
||||
"docs/demos/**", # Demo scripts with local conftest imports
|
||||
"src/compose_farm/_version.py", # Generated at build time
|
||||
]
|
||||
|
||||
[tool.ty.rules]
|
||||
unresolved-import = "ignore" # _version.py is generated at build time
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"mypy>=1.19.0",
|
||||
|
||||
@@ -23,6 +23,7 @@ app = typer.Typer(
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
rich_markup_mode="rich",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -59,6 +59,10 @@ HostOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--host", "-H", help="Filter to stacks on this host"),
|
||||
]
|
||||
ServiceOption = Annotated[
|
||||
str | None,
|
||||
typer.Option("--service", "-s", help="Target a specific service within the stack"),
|
||||
]
|
||||
|
||||
# --- Constants (internal) ---
|
||||
_MISSING_PATH_PREVIEW_LIMIT = 2
|
||||
@@ -138,6 +142,9 @@ def load_config_or_exit(config_path: Path | None) -> Config:
|
||||
except FileNotFoundError as e:
|
||||
print_error(str(e))
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
print_error(f"Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def get_stacks(
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
from importlib import resources
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
|
||||
@@ -17,6 +16,9 @@ from compose_farm.cli.app import app
|
||||
from compose_farm.console import MSG_CONFIG_NOT_FOUND, console, print_error, print_success
|
||||
from compose_farm.paths import config_search_paths, default_config_path, find_config_path
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
config_app = typer.Typer(
|
||||
name="config",
|
||||
help="Manage compose-farm configuration files.",
|
||||
@@ -43,8 +45,6 @@ def _get_editor() -> str:
|
||||
"""Get the user's preferred editor ($EDITOR > $VISUAL > platform default)."""
|
||||
if editor := os.environ.get("EDITOR") or os.environ.get("VISUAL"):
|
||||
return editor
|
||||
if platform.system() == "Windows":
|
||||
return "notepad"
|
||||
return next((e for e in ("nano", "vim", "vi") if shutil.which(e)), "vi")
|
||||
|
||||
|
||||
@@ -68,6 +68,22 @@ def _get_config_file(path: Path | None) -> Path | None:
|
||||
return config_path.resolve() if config_path else None
|
||||
|
||||
|
||||
def _load_config_with_path(path: Path | None) -> tuple[Path, Config]:
|
||||
"""Load config and return both the resolved path and Config object.
|
||||
|
||||
Exits with error if config not found or invalid.
|
||||
"""
|
||||
from compose_farm.cli.common import load_config_or_exit # noqa: PLC0415
|
||||
|
||||
config_file = _get_config_file(path)
|
||||
if config_file is None:
|
||||
print_error(MSG_CONFIG_NOT_FOUND)
|
||||
raise typer.Exit(1)
|
||||
|
||||
cfg = load_config_or_exit(config_file)
|
||||
return config_file, cfg
|
||||
|
||||
|
||||
def _report_missing_config(explicit_path: Path | None = None) -> None:
|
||||
"""Report that a config file was not found."""
|
||||
console.print("[yellow]Config file not found.[/yellow]")
|
||||
@@ -135,7 +151,7 @@ def config_edit(
|
||||
console.print(f"[dim]Opening {config_file} with {editor}...[/dim]")
|
||||
|
||||
try:
|
||||
editor_cmd = shlex.split(editor, posix=os.name != "nt")
|
||||
editor_cmd = shlex.split(editor)
|
||||
except ValueError as e:
|
||||
print_error("Invalid editor command. Check [bold]$EDITOR[/]/[bold]$VISUAL[/]")
|
||||
raise typer.Exit(1) from e
|
||||
@@ -207,23 +223,7 @@ def config_validate(
|
||||
path: _PathOption = None,
|
||||
) -> None:
|
||||
"""Validate the config file syntax and schema."""
|
||||
config_file = _get_config_file(path)
|
||||
|
||||
if config_file is None:
|
||||
print_error(MSG_CONFIG_NOT_FOUND)
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Lazy import: pydantic adds ~50ms to startup, only load when actually needed
|
||||
from compose_farm.config import load_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
cfg = load_config(config_file)
|
||||
except FileNotFoundError as e:
|
||||
print_error(str(e))
|
||||
raise typer.Exit(1) from e
|
||||
except Exception as e:
|
||||
print_error(f"Invalid config: {e}")
|
||||
raise typer.Exit(1) from e
|
||||
config_file, cfg = _load_config_with_path(path)
|
||||
|
||||
print_success(f"Valid config: {config_file}")
|
||||
console.print(f" Hosts: {len(cfg.hosts)}")
|
||||
@@ -293,5 +293,114 @@ def config_symlink(
|
||||
console.print(f" -> {target_path}")
|
||||
|
||||
|
||||
def _detect_domain(cfg: Config) -> str | None:
|
||||
"""Try to detect DOMAIN from traefik Host() rules in existing stacks.
|
||||
|
||||
Uses extract_website_urls from traefik module to get interpolated
|
||||
URLs, then extracts the domain from the first valid URL.
|
||||
Skips local domains (.local, localhost, etc.).
|
||||
"""
|
||||
from urllib.parse import urlparse # noqa: PLC0415
|
||||
|
||||
from compose_farm.traefik import extract_website_urls # noqa: PLC0415
|
||||
|
||||
max_stacks_to_check = 10
|
||||
min_domain_parts = 2
|
||||
subdomain_parts = 4
|
||||
skip_tlds = {"local", "localhost", "internal", "lan", "home"}
|
||||
|
||||
for stack_name in list(cfg.stacks.keys())[:max_stacks_to_check]:
|
||||
urls = extract_website_urls(cfg, stack_name)
|
||||
for url in urls:
|
||||
host = urlparse(url).netloc
|
||||
parts = host.split(".")
|
||||
# Skip local/internal domains
|
||||
if parts[-1].lower() in skip_tlds:
|
||||
continue
|
||||
if len(parts) >= subdomain_parts:
|
||||
# e.g., "app.lab.nijho.lt" -> "lab.nijho.lt"
|
||||
return ".".join(parts[-3:])
|
||||
if len(parts) >= min_domain_parts:
|
||||
# e.g., "app.example.com" -> "example.com"
|
||||
return ".".join(parts[-2:])
|
||||
return None
|
||||
|
||||
|
||||
@config_app.command("init-env")
|
||||
def config_init_env(
|
||||
path: _PathOption = None,
|
||||
output: Annotated[
|
||||
Path | None,
|
||||
typer.Option(
|
||||
"--output", "-o", help="Output .env file path. Defaults to .env in current directory."
|
||||
),
|
||||
] = None,
|
||||
force: _ForceOption = False,
|
||||
) -> None:
|
||||
"""Generate a .env file for Docker deployment.
|
||||
|
||||
Reads the compose-farm.yaml config and auto-detects settings:
|
||||
|
||||
- CF_COMPOSE_DIR from compose_dir
|
||||
- CF_UID/GID/HOME/USER from current user
|
||||
- DOMAIN from traefik labels in stacks (if found)
|
||||
|
||||
Example::
|
||||
|
||||
cf config init-env # Create .env in current directory
|
||||
cf config init-env -o /path/to/.env # Create .env at specific path
|
||||
|
||||
"""
|
||||
config_file, cfg = _load_config_with_path(path)
|
||||
|
||||
# Determine output path (default: current directory)
|
||||
env_path = output.expanduser().resolve() if output else Path.cwd() / ".env"
|
||||
|
||||
if env_path.exists() and not force:
|
||||
console.print(f"[yellow].env file already exists:[/] {env_path}")
|
||||
if not typer.confirm("Overwrite?"):
|
||||
console.print("[dim]Aborted.[/dim]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
# Auto-detect values
|
||||
uid = os.getuid()
|
||||
gid = os.getgid()
|
||||
home = os.environ.get("HOME", "/root")
|
||||
user = os.environ.get("USER", "root")
|
||||
compose_dir = str(cfg.compose_dir)
|
||||
domain = _detect_domain(cfg)
|
||||
|
||||
# Generate .env content
|
||||
lines = [
|
||||
"# Generated by: cf config init-env",
|
||||
f"# From config: {config_file}",
|
||||
"",
|
||||
"# Domain for Traefik labels",
|
||||
f"DOMAIN={domain or 'example.com'}",
|
||||
"",
|
||||
"# Compose files location",
|
||||
f"CF_COMPOSE_DIR={compose_dir}",
|
||||
"",
|
||||
"# Run as current user (recommended for NFS)",
|
||||
f"CF_UID={uid}",
|
||||
f"CF_GID={gid}",
|
||||
f"CF_HOME={home}",
|
||||
f"CF_USER={user}",
|
||||
"",
|
||||
]
|
||||
|
||||
env_path.write_text("\n".join(lines), encoding="utf-8")
|
||||
|
||||
print_success(f"Created .env file: {env_path}")
|
||||
console.print()
|
||||
console.print("[dim]Detected settings:[/dim]")
|
||||
console.print(f" DOMAIN: {domain or '[yellow]example.com[/] (edit this)'}")
|
||||
console.print(f" CF_COMPOSE_DIR: {compose_dir}")
|
||||
console.print(f" CF_UID/GID: {uid}:{gid}")
|
||||
console.print()
|
||||
console.print("[dim]Review and edit as needed:[/dim]")
|
||||
console.print(f" [cyan]$EDITOR {env_path}[/cyan]")
|
||||
|
||||
|
||||
# Register config subcommand on the shared app
|
||||
app.add_typer(config_app, name="config", rich_help_panel="Configuration")
|
||||
|
||||
@@ -2,15 +2,21 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import typer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from compose_farm.config import Config
|
||||
|
||||
from compose_farm.cli.app import app
|
||||
from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServiceOption,
|
||||
StacksArg,
|
||||
format_host,
|
||||
get_stacks,
|
||||
@@ -18,11 +24,20 @@ from compose_farm.cli.common import (
|
||||
maybe_regenerate_traefik,
|
||||
report_results,
|
||||
run_async,
|
||||
validate_host_for_stack,
|
||||
validate_stacks,
|
||||
)
|
||||
from compose_farm.cli.management import _discover_stacks_full
|
||||
from compose_farm.console import MSG_DRY_RUN, console, print_error, print_success
|
||||
from compose_farm.executor import run_on_stacks, run_sequential_on_stacks
|
||||
from compose_farm.operations import stop_orphaned_stacks, up_stacks
|
||||
from compose_farm.executor import run_compose_on_host, run_on_stacks
|
||||
from compose_farm.operations import (
|
||||
build_up_cmd,
|
||||
stop_orphaned_stacks,
|
||||
stop_stray_stacks,
|
||||
up_stacks,
|
||||
)
|
||||
from compose_farm.state import (
|
||||
add_stack_host,
|
||||
get_orphaned_stacks,
|
||||
get_stack_host,
|
||||
get_stacks_needing_migration,
|
||||
@@ -36,11 +51,48 @@ def up(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
pull: Annotated[
|
||||
bool,
|
||||
typer.Option("--pull", help="Pull images before starting (--pull always)"),
|
||||
] = False,
|
||||
build: Annotated[
|
||||
bool,
|
||||
typer.Option("--build", help="Build images before starting"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Start stacks (docker compose up -d). Auto-migrates if host changed."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
results = run_async(up_stacks(cfg, stack_list, raw=True))
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
# For service-level up, use run_on_stacks directly (no migration logic)
|
||||
results = run_async(
|
||||
run_on_stacks(
|
||||
cfg, stack_list, build_up_cmd(pull=pull, build=build, service=service), raw=True
|
||||
)
|
||||
)
|
||||
elif host:
|
||||
# For host-filtered up, use run_on_stacks to only affect that host
|
||||
# (skips migration logic, which is intended when explicitly specifying a host)
|
||||
results = run_async(
|
||||
run_on_stacks(
|
||||
cfg,
|
||||
stack_list,
|
||||
build_up_cmd(pull=pull, build=build),
|
||||
raw=True,
|
||||
filter_host=host,
|
||||
)
|
||||
)
|
||||
# Update state for successful host-filtered operations
|
||||
for result in results:
|
||||
if result.success:
|
||||
base_stack = result.stack.split("@")[0]
|
||||
add_stack_host(cfg, base_stack, host)
|
||||
else:
|
||||
results = run_async(up_stacks(cfg, stack_list, raw=True, pull=pull, build=build))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
@@ -82,32 +134,58 @@ def down(
|
||||
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, "down", raw=raw))
|
||||
results = run_async(run_on_stacks(cfg, stack_list, "down", raw=raw, filter_host=host))
|
||||
|
||||
# Remove from state on success
|
||||
# Update state on success
|
||||
# For multi-host stacks, result.stack is "stack@host", extract base name
|
||||
removed_stacks: set[str] = set()
|
||||
updated_stacks: set[str] = set()
|
||||
for result in results:
|
||||
if result.success:
|
||||
base_stack = result.stack.split("@")[0]
|
||||
if base_stack not in removed_stacks:
|
||||
remove_stack(cfg, base_stack)
|
||||
removed_stacks.add(base_stack)
|
||||
if base_stack not in updated_stacks:
|
||||
# When host is specified for multi-host stack, removes just that host
|
||||
# Otherwise removes entire stack from state
|
||||
filter_host = host if host and cfg.is_multi_host(base_stack) else None
|
||||
remove_stack(cfg, base_stack, filter_host)
|
||||
updated_stacks.add(base_stack)
|
||||
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def stop(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Stop services without removing containers (docker compose stop)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"stop {service}" if service else "stop"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Pull latest images (docker compose pull)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"pull --ignore-buildable {service}" if service else "pull --ignore-buildable"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_on_stacks(cfg, stack_list, "pull", raw=raw))
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -115,13 +193,20 @@ def pull(
|
||||
def restart(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Restart stacks (down + up)."""
|
||||
"""Restart running containers (docker compose restart)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
if service:
|
||||
if len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"restart {service}"
|
||||
else:
|
||||
cmd = "restart"
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(run_sequential_on_stacks(cfg, stack_list, ["down", "up -d"], raw=raw))
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, raw=raw))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -129,22 +214,30 @@ def restart(
|
||||
def update(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Update stacks (pull + build + down + up)."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
raw = len(stack_list) == 1
|
||||
results = run_async(
|
||||
run_sequential_on_stacks(
|
||||
cfg, stack_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw
|
||||
)
|
||||
)
|
||||
maybe_regenerate_traefik(cfg, results)
|
||||
report_results(results)
|
||||
"""Update stacks (pull + build + up). Shorthand for 'up --pull --build'."""
|
||||
up(stacks=stacks, all_stacks=all_stacks, service=service, pull=True, build=True, config=config)
|
||||
|
||||
|
||||
def _discover_strays(cfg: Config) -> dict[str, list[str]]:
|
||||
"""Discover stacks running on unauthorized hosts by scanning all hosts."""
|
||||
_, strays, duplicates = _discover_stacks_full(cfg)
|
||||
|
||||
# Merge duplicates into strays (for single-host stacks on multiple hosts,
|
||||
# keep correct host and stop others)
|
||||
for stack, running_hosts in duplicates.items():
|
||||
configured = cfg.get_hosts(stack)[0]
|
||||
stray_hosts = [h for h in running_hosts if h != configured]
|
||||
if stray_hosts:
|
||||
strays[stack] = stray_hosts
|
||||
|
||||
return strays
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
def apply( # noqa: C901, PLR0912, PLR0915 (multi-phase reconciliation needs these branches)
|
||||
dry_run: Annotated[
|
||||
bool,
|
||||
typer.Option("--dry-run", "-n", help="Show what would change without executing"),
|
||||
@@ -153,23 +246,29 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
bool,
|
||||
typer.Option("--no-orphans", help="Only migrate, don't stop orphaned stacks"),
|
||||
] = False,
|
||||
no_strays: Annotated[
|
||||
bool,
|
||||
typer.Option("--no-strays", help="Don't stop stray stacks (running on wrong host)"),
|
||||
] = False,
|
||||
full: Annotated[
|
||||
bool,
|
||||
typer.Option("--full", "-f", help="Also run up on all stacks to apply config changes"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Make reality match config (start, migrate, stop as needed).
|
||||
"""Make reality match config (start, migrate, stop strays/orphans as needed).
|
||||
|
||||
This is the "reconcile" command that ensures running stacks match your
|
||||
config file. It will:
|
||||
|
||||
1. Stop orphaned stacks (in state but removed from config)
|
||||
2. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
3. Start missing stacks (in config but not in state)
|
||||
2. Stop stray stacks (running on unauthorized hosts)
|
||||
3. Migrate stacks on wrong host (host in state ≠ host in config)
|
||||
4. Start missing stacks (in config but not in state)
|
||||
|
||||
Use --dry-run to preview changes before applying.
|
||||
Use --no-orphans to only migrate/start without stopping orphaned stacks.
|
||||
Use --no-orphans to skip stopping orphaned stacks.
|
||||
Use --no-strays to skip stopping stray stacks.
|
||||
Use --full to also run 'up' on all stacks (picks up compose/env changes).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
@@ -177,16 +276,28 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
migrations = get_stacks_needing_migration(cfg)
|
||||
missing = get_stacks_not_in_state(cfg)
|
||||
|
||||
strays: dict[str, list[str]] = {}
|
||||
if not no_strays:
|
||||
console.print("[dim]Scanning hosts for stray containers...[/]")
|
||||
strays = _discover_strays(cfg)
|
||||
|
||||
# For --full: refresh all stacks not already being started/migrated
|
||||
handled = set(migrations) | set(missing)
|
||||
to_refresh = [stack for stack in cfg.stacks if stack not in handled] if full else []
|
||||
|
||||
has_orphans = bool(orphaned) and not no_orphans
|
||||
has_strays = bool(strays)
|
||||
has_migrations = bool(migrations)
|
||||
has_missing = bool(missing)
|
||||
has_refresh = bool(to_refresh)
|
||||
|
||||
if not has_orphans and not has_migrations and not has_missing and not has_refresh:
|
||||
if (
|
||||
not has_orphans
|
||||
and not has_strays
|
||||
and not has_migrations
|
||||
and not has_missing
|
||||
and not has_refresh
|
||||
):
|
||||
print_success("Nothing to apply - reality matches config")
|
||||
return
|
||||
|
||||
@@ -195,6 +306,14 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
console.print(f"[yellow]Orphaned stacks to stop ({len(orphaned)}):[/]")
|
||||
for svc, hosts in orphaned.items():
|
||||
console.print(f" [cyan]{svc}[/] on [magenta]{format_host(hosts)}[/]")
|
||||
if has_strays:
|
||||
console.print(f"[red]Stray stacks to stop ({len(strays)}):[/]")
|
||||
for stack, hosts in strays.items():
|
||||
configured = cfg.get_hosts(stack)
|
||||
console.print(
|
||||
f" [cyan]{stack}[/] on [magenta]{', '.join(hosts)}[/] "
|
||||
f"[dim](should be on {', '.join(configured)})[/]"
|
||||
)
|
||||
if has_migrations:
|
||||
console.print(f"[cyan]Stacks to migrate ({len(migrations)}):[/]")
|
||||
for stack in migrations:
|
||||
@@ -223,21 +342,26 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
console.print("[yellow]Stopping orphaned stacks...[/]")
|
||||
all_results.extend(run_async(stop_orphaned_stacks(cfg)))
|
||||
|
||||
# 2. Migrate stacks on wrong host
|
||||
# 2. Stop stray stacks (running on unauthorized hosts)
|
||||
if has_strays:
|
||||
console.print("[red]Stopping stray stacks...[/]")
|
||||
all_results.extend(run_async(stop_stray_stacks(cfg, strays)))
|
||||
|
||||
# 3. Migrate stacks on wrong host
|
||||
if has_migrations:
|
||||
console.print("[cyan]Migrating stacks...[/]")
|
||||
migrate_results = run_async(up_stacks(cfg, migrations, raw=True))
|
||||
all_results.extend(migrate_results)
|
||||
maybe_regenerate_traefik(cfg, migrate_results)
|
||||
|
||||
# 3. Start missing stacks (reuse up_stacks which handles state updates)
|
||||
# 4. Start missing stacks (reuse up_stacks which handles state updates)
|
||||
if has_missing:
|
||||
console.print("[green]Starting missing stacks...[/]")
|
||||
start_results = run_async(up_stacks(cfg, missing, raw=True))
|
||||
all_results.extend(start_results)
|
||||
maybe_regenerate_traefik(cfg, start_results)
|
||||
|
||||
# 4. Refresh remaining stacks (--full: run up to apply config changes)
|
||||
# 5. Refresh remaining stacks (--full: run up to apply config changes)
|
||||
if has_refresh:
|
||||
console.print("[blue]Refreshing stacks...[/]")
|
||||
refresh_results = run_async(up_stacks(cfg, to_refresh, raw=True))
|
||||
@@ -247,5 +371,66 @@ def apply( # noqa: PLR0912 (multi-phase reconciliation needs these branches)
|
||||
report_results(all_results)
|
||||
|
||||
|
||||
# Alias: cf a = cf apply
|
||||
app.command("a", hidden=True)(apply)
|
||||
@app.command(
|
||||
rich_help_panel="Lifecycle",
|
||||
context_settings={"allow_interspersed_args": False},
|
||||
)
|
||||
def compose(
|
||||
stack: Annotated[str, typer.Argument(help="Stack to operate on (use '.' for current dir)")],
|
||||
command: Annotated[str, typer.Argument(help="Docker compose command")],
|
||||
args: Annotated[list[str] | None, typer.Argument(help="Additional arguments")] = None,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Run any docker compose command on a stack.
|
||||
|
||||
Passthrough to docker compose for commands not wrapped by cf.
|
||||
Options after COMMAND are passed to docker compose, not cf.
|
||||
|
||||
Examples:
|
||||
cf compose mystack --help - show docker compose help
|
||||
cf compose mystack top - view running processes
|
||||
cf compose mystack images - list images
|
||||
cf compose mystack exec web bash - interactive shell
|
||||
cf compose mystack config - view parsed config
|
||||
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
# Resolve "." to current directory name
|
||||
resolved_stack = Path.cwd().name if stack == "." else stack
|
||||
validate_stacks(cfg, [resolved_stack])
|
||||
|
||||
# Handle multi-host stacks
|
||||
hosts = cfg.get_hosts(resolved_stack)
|
||||
if len(hosts) > 1:
|
||||
if host is None:
|
||||
print_error(
|
||||
f"Stack [cyan]{resolved_stack}[/] runs on multiple hosts: {', '.join(hosts)}\n"
|
||||
f"Use [bold]--host[/] to specify which host"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
validate_host_for_stack(cfg, resolved_stack, host)
|
||||
target_host = host
|
||||
else:
|
||||
target_host = hosts[0]
|
||||
|
||||
# Build the full compose command (quote args to preserve spaces)
|
||||
full_cmd = command
|
||||
if args:
|
||||
full_cmd += " " + " ".join(shlex.quote(arg) for arg in args)
|
||||
|
||||
# Run with raw=True for proper TTY handling (progress bars, interactive)
|
||||
result = run_async(run_compose_on_host(cfg, resolved_stack, target_host, full_cmd, raw=True))
|
||||
print() # Ensure newline after raw output
|
||||
|
||||
if not result.success:
|
||||
raise typer.Exit(result.exit_code)
|
||||
|
||||
|
||||
# Aliases (hidden from help, shown in --help as "Aliases: ...")
|
||||
app.command("a", hidden=True)(apply) # cf a = cf apply
|
||||
app.command("r", hidden=True)(restart) # cf r = cf restart
|
||||
app.command("u", hidden=True)(update) # cf u = cf update
|
||||
app.command("p", hidden=True)(pull) # cf p = cf pull
|
||||
app.command("c", hidden=True)(compose) # cf c = cf compose
|
||||
|
||||
@@ -37,61 +37,62 @@ from compose_farm.console import (
|
||||
)
|
||||
from compose_farm.executor import (
|
||||
CommandResult,
|
||||
get_running_stacks_on_host,
|
||||
is_local,
|
||||
run_command,
|
||||
)
|
||||
from compose_farm.logs import (
|
||||
DEFAULT_LOG_PATH,
|
||||
SnapshotEntry,
|
||||
collect_stack_entries,
|
||||
collect_stacks_entries_on_host,
|
||||
isoformat,
|
||||
load_existing_entries,
|
||||
merge_entries,
|
||||
write_toml,
|
||||
)
|
||||
from compose_farm.operations import (
|
||||
build_discovery_results,
|
||||
check_host_compatibility,
|
||||
check_stack_requirements,
|
||||
discover_stack_host,
|
||||
)
|
||||
from compose_farm.state import get_orphaned_stacks, load_state, save_state
|
||||
from compose_farm.traefik import generate_traefik_config, render_traefik_config
|
||||
|
||||
# --- Sync helpers ---
|
||||
|
||||
|
||||
def _discover_stacks(cfg: Config, stacks: list[str] | None = None) -> dict[str, str | list[str]]:
|
||||
"""Discover running stacks with a progress bar."""
|
||||
stack_list = stacks if stacks is not None else list(cfg.stacks)
|
||||
results = run_parallel_with_progress(
|
||||
"Discovering",
|
||||
stack_list,
|
||||
lambda s: discover_stack_host(cfg, s),
|
||||
)
|
||||
return {svc: host for svc, host in results if host is not None}
|
||||
|
||||
|
||||
def _snapshot_stacks(
|
||||
cfg: Config,
|
||||
stacks: list[str],
|
||||
discovered: dict[str, str | list[str]],
|
||||
log_path: Path | None,
|
||||
) -> Path:
|
||||
"""Capture image digests with a progress bar."""
|
||||
"""Capture image digests using batched SSH calls (1 per host).
|
||||
|
||||
Args:
|
||||
cfg: Configuration
|
||||
discovered: Dict mapping stack -> host(s) where it's running
|
||||
log_path: Optional path to write the log file
|
||||
|
||||
Returns:
|
||||
Path to the written log file.
|
||||
|
||||
"""
|
||||
effective_log_path = log_path or DEFAULT_LOG_PATH
|
||||
now_dt = datetime.now(UTC)
|
||||
now_iso = isoformat(now_dt)
|
||||
|
||||
async def collect_stack(stack: str) -> tuple[str, list[SnapshotEntry]]:
|
||||
try:
|
||||
return stack, await collect_stack_entries(cfg, stack, now=now_dt)
|
||||
except RuntimeError:
|
||||
return stack, []
|
||||
# Group stacks by host for batched SSH calls
|
||||
stacks_by_host: dict[str, set[str]] = {}
|
||||
for stack, hosts in discovered.items():
|
||||
# Use first host for multi-host stacks (they use the same images)
|
||||
host = hosts[0] if isinstance(hosts, list) else hosts
|
||||
stacks_by_host.setdefault(host, set()).add(stack)
|
||||
|
||||
results = run_parallel_with_progress(
|
||||
"Capturing",
|
||||
stacks,
|
||||
collect_stack,
|
||||
)
|
||||
# Collect entries with 1 SSH call per host (with progress bar)
|
||||
async def collect_on_host(host: str) -> tuple[str, list[SnapshotEntry]]:
|
||||
entries = await collect_stacks_entries_on_host(cfg, host, stacks_by_host[host], now=now_dt)
|
||||
return host, entries
|
||||
|
||||
results = run_parallel_with_progress("Capturing", list(stacks_by_host.keys()), collect_on_host)
|
||||
snapshot_entries = [entry for _, entries in results for entry in entries]
|
||||
|
||||
if not snapshot_entries:
|
||||
@@ -147,6 +148,61 @@ def _report_sync_changes(
|
||||
console.print(f" [red]-[/] [cyan]{stack}[/] (was on [magenta]{host_str}[/])")
|
||||
|
||||
|
||||
def _discover_stacks_full(
|
||||
cfg: Config,
|
||||
stacks: list[str] | None = None,
|
||||
) -> tuple[dict[str, str | list[str]], dict[str, list[str]], dict[str, list[str]]]:
|
||||
"""Discover running stacks with full host scanning for stray detection.
|
||||
|
||||
Queries each host once for all running stacks (with progress bar),
|
||||
then delegates to build_discovery_results for categorization.
|
||||
"""
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
|
||||
# Query each host for running stacks (with progress bar)
|
||||
async def get_stacks_on_host(host: str) -> tuple[str, set[str]]:
|
||||
running = await get_running_stacks_on_host(cfg, host)
|
||||
return host, running
|
||||
|
||||
host_results = run_parallel_with_progress("Discovering", all_hosts, get_stacks_on_host)
|
||||
running_on_host: dict[str, set[str]] = dict(host_results)
|
||||
|
||||
return build_discovery_results(cfg, running_on_host, stacks)
|
||||
|
||||
|
||||
def _report_stray_stacks(
|
||||
strays: dict[str, list[str]],
|
||||
cfg: Config,
|
||||
) -> None:
|
||||
"""Report stacks running on unauthorized hosts."""
|
||||
if strays:
|
||||
console.print(f"\n[red]Stray stacks[/] (running on wrong host, {len(strays)}):")
|
||||
console.print("[dim]Run [bold]cf apply[/bold] to stop them.[/]")
|
||||
for stack in sorted(strays):
|
||||
stray_hosts = strays[stack]
|
||||
configured = cfg.get_hosts(stack)
|
||||
console.print(
|
||||
f" [red]![/] [cyan]{stack}[/] on [magenta]{', '.join(stray_hosts)}[/] "
|
||||
f"[dim](should be on {', '.join(configured)})[/]"
|
||||
)
|
||||
|
||||
|
||||
def _report_duplicate_stacks(duplicates: dict[str, list[str]], cfg: Config) -> None:
|
||||
"""Report single-host stacks running on multiple hosts."""
|
||||
if duplicates:
|
||||
console.print(
|
||||
f"\n[yellow]Duplicate stacks[/] (running on multiple hosts, {len(duplicates)}):"
|
||||
)
|
||||
console.print("[dim]Run [bold]cf apply[/bold] to stop extras.[/]")
|
||||
for stack in sorted(duplicates):
|
||||
hosts = duplicates[stack]
|
||||
configured = cfg.get_hosts(stack)[0]
|
||||
console.print(
|
||||
f" [yellow]![/] [cyan]{stack}[/] on [magenta]{', '.join(hosts)}[/] "
|
||||
f"[dim](should only be on {configured})[/]"
|
||||
)
|
||||
|
||||
|
||||
# --- Check helpers ---
|
||||
|
||||
|
||||
@@ -271,6 +327,8 @@ def _report_orphaned_stacks(cfg: Config) -> bool:
|
||||
|
||||
def _report_traefik_status(cfg: Config, stacks: list[str]) -> None:
|
||||
"""Check and report traefik label status."""
|
||||
from compose_farm.traefik import generate_traefik_config # noqa: PLC0415
|
||||
|
||||
try:
|
||||
_, warnings = generate_traefik_config(cfg, stacks, check_all=True)
|
||||
except (FileNotFoundError, ValueError):
|
||||
@@ -390,6 +448,11 @@ def traefik_file(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Generate a Traefik file-provider fragment from compose Traefik labels."""
|
||||
from compose_farm.traefik import ( # noqa: PLC0415
|
||||
generate_traefik_config,
|
||||
render_traefik_config,
|
||||
)
|
||||
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config)
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, stack_list)
|
||||
@@ -440,7 +503,7 @@ def refresh(
|
||||
|
||||
current_state = load_state(cfg)
|
||||
|
||||
discovered = _discover_stacks(cfg, stack_list)
|
||||
discovered, strays, duplicates = _discover_stacks_full(cfg, stack_list)
|
||||
|
||||
# Calculate changes (only for the stacks we're refreshing)
|
||||
added = [s for s in discovered if s not in current_state]
|
||||
@@ -463,6 +526,9 @@ def refresh(
|
||||
else:
|
||||
print_success("State is already in sync.")
|
||||
|
||||
_report_stray_stacks(strays, cfg)
|
||||
_report_duplicate_stacks(duplicates, cfg)
|
||||
|
||||
if dry_run:
|
||||
console.print(f"\n{MSG_DRY_RUN}")
|
||||
return
|
||||
@@ -475,10 +541,10 @@ def refresh(
|
||||
save_state(cfg, new_state)
|
||||
print_success(f"State updated: {len(new_state)} stacks tracked.")
|
||||
|
||||
# Capture image digests for running stacks
|
||||
# Capture image digests for running stacks (1 SSH call per host)
|
||||
if discovered:
|
||||
try:
|
||||
path = _snapshot_stacks(cfg, list(discovered.keys()), log_path)
|
||||
path = _snapshot_stacks(cfg, discovered, log_path)
|
||||
print_success(f"Digests written to {path}")
|
||||
except RuntimeError as exc:
|
||||
print_warning(str(exc))
|
||||
@@ -593,3 +659,9 @@ def init_network(
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
# Aliases (hidden from help)
|
||||
app.command("rf", hidden=True)(refresh) # cf rf = cf refresh
|
||||
app.command("ck", hidden=True)(check) # cf ck = cf check
|
||||
app.command("tf", hidden=True)(traefik_file) # cf tf = cf traefik-file
|
||||
|
||||
@@ -14,23 +14,29 @@ from compose_farm.cli.common import (
|
||||
AllOption,
|
||||
ConfigOption,
|
||||
HostOption,
|
||||
ServiceOption,
|
||||
StacksArg,
|
||||
get_stacks,
|
||||
load_config_or_exit,
|
||||
report_results,
|
||||
run_async,
|
||||
run_parallel_with_progress,
|
||||
validate_hosts,
|
||||
)
|
||||
from compose_farm.console import console
|
||||
from compose_farm.console import console, print_error, print_warning
|
||||
from compose_farm.executor import run_command, run_on_stacks
|
||||
from compose_farm.state import get_stacks_needing_migration, group_stacks_by_host, load_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from compose_farm.config import Config
|
||||
from compose_farm.glances import ContainerStats
|
||||
|
||||
|
||||
def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
"""Get container counts from all hosts with a progress bar."""
|
||||
def _get_container_counts(cfg: Config, hosts: list[str] | None = None) -> dict[str, int]:
|
||||
"""Get container counts from hosts with a progress bar."""
|
||||
host_list = hosts if hosts is not None else list(cfg.hosts.keys())
|
||||
|
||||
async def get_count(host_name: str) -> tuple[str, int]:
|
||||
host = cfg.hosts[host_name]
|
||||
@@ -43,7 +49,7 @@ def _get_container_counts(cfg: Config) -> dict[str, int]:
|
||||
|
||||
results = run_parallel_with_progress(
|
||||
"Querying hosts",
|
||||
list(cfg.hosts.keys()),
|
||||
host_list,
|
||||
get_count,
|
||||
)
|
||||
return dict(results)
|
||||
@@ -66,7 +72,7 @@ def _build_host_table(
|
||||
if show_containers:
|
||||
table.add_column("Containers", justify="right")
|
||||
|
||||
for host_name in sorted(cfg.hosts.keys()):
|
||||
for host_name in sorted(stacks_by_host.keys()):
|
||||
host = cfg.hosts[host_name]
|
||||
configured = len(stacks_by_host[host_name])
|
||||
running = len(running_by_host[host_name])
|
||||
@@ -85,19 +91,46 @@ def _build_host_table(
|
||||
return table
|
||||
|
||||
|
||||
def _state_includes_host(host_value: str | list[str], host_name: str) -> bool:
|
||||
"""Check whether a state entry includes the given host."""
|
||||
if isinstance(host_value, list):
|
||||
return host_name in host_value
|
||||
return host_value == host_name
|
||||
|
||||
|
||||
def _build_summary_table(
|
||||
cfg: Config, state: dict[str, str | list[str]], pending: list[str]
|
||||
cfg: Config,
|
||||
state: dict[str, str | list[str]],
|
||||
pending: list[str],
|
||||
*,
|
||||
host_filter: str | None = None,
|
||||
) -> Table:
|
||||
"""Build the summary table."""
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
if host_filter:
|
||||
stacks_configured = [stack for stack in cfg.stacks if host_filter in cfg.get_hosts(stack)]
|
||||
stacks_configured_set = set(stacks_configured)
|
||||
state = {
|
||||
stack: hosts
|
||||
for stack, hosts in state.items()
|
||||
if _state_includes_host(hosts, host_filter)
|
||||
}
|
||||
on_disk = {stack for stack in on_disk if stack in stacks_configured_set}
|
||||
total_hosts = 1
|
||||
stacks_configured_count = len(stacks_configured)
|
||||
stacks_tracked_count = len(state)
|
||||
else:
|
||||
total_hosts = len(cfg.hosts)
|
||||
stacks_configured_count = len(cfg.stacks)
|
||||
stacks_tracked_count = len(state)
|
||||
|
||||
table = Table(title="Summary", show_header=False)
|
||||
table.add_column("Label", style="dim")
|
||||
table.add_column("Value", style="bold")
|
||||
|
||||
table.add_row("Total hosts", str(len(cfg.hosts)))
|
||||
table.add_row("Stacks (configured)", str(len(cfg.stacks)))
|
||||
table.add_row("Stacks (tracked)", str(len(state)))
|
||||
table.add_row("Total hosts", str(total_hosts))
|
||||
table.add_row("Stacks (configured)", str(stacks_configured_count))
|
||||
table.add_row("Stacks (tracked)", str(stacks_tracked_count))
|
||||
table.add_row("Compose files on disk", str(len(on_disk)))
|
||||
|
||||
if pending:
|
||||
@@ -110,6 +143,81 @@ def _build_summary_table(
|
||||
return table
|
||||
|
||||
|
||||
def _format_network(rx: int, tx: int, fmt: Callable[[int], str]) -> str:
|
||||
"""Format network I/O."""
|
||||
return f"[dim]↓[/]{fmt(rx)} [dim]↑[/]{fmt(tx)}"
|
||||
|
||||
|
||||
def _cpu_style(percent: float) -> str:
|
||||
"""Rich style for CPU percentage."""
|
||||
if percent > 80: # noqa: PLR2004
|
||||
return "red"
|
||||
if percent > 50: # noqa: PLR2004
|
||||
return "yellow"
|
||||
return "green"
|
||||
|
||||
|
||||
def _mem_style(percent: float) -> str:
|
||||
"""Rich style for memory percentage."""
|
||||
if percent > 90: # noqa: PLR2004
|
||||
return "red"
|
||||
if percent > 70: # noqa: PLR2004
|
||||
return "yellow"
|
||||
return "green"
|
||||
|
||||
|
||||
def _status_style(status: str) -> str:
|
||||
"""Rich style for container status."""
|
||||
s = status.lower()
|
||||
if s == "running":
|
||||
return "green"
|
||||
if s == "exited":
|
||||
return "red"
|
||||
if s == "paused":
|
||||
return "yellow"
|
||||
return "dim"
|
||||
|
||||
|
||||
def _build_containers_table(
|
||||
containers: list[ContainerStats],
|
||||
host_filter: str | None = None,
|
||||
) -> Table:
|
||||
"""Build Rich table for container stats."""
|
||||
from compose_farm.glances import format_bytes # noqa: PLC0415
|
||||
|
||||
table = Table(title="Containers", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Stack", style="cyan")
|
||||
table.add_column("Service", style="dim")
|
||||
table.add_column("Host", style="magenta")
|
||||
table.add_column("Image")
|
||||
table.add_column("Status")
|
||||
table.add_column("Uptime", justify="right")
|
||||
table.add_column("CPU%", justify="right")
|
||||
table.add_column("Memory", justify="right")
|
||||
table.add_column("Net I/O", justify="right")
|
||||
|
||||
if host_filter:
|
||||
containers = [c for c in containers if c.host == host_filter]
|
||||
|
||||
# Sort by stack, then service
|
||||
containers = sorted(containers, key=lambda c: (c.stack.lower(), c.service.lower()))
|
||||
|
||||
for c in containers:
|
||||
table.add_row(
|
||||
c.stack or c.name,
|
||||
c.service or c.name,
|
||||
c.host,
|
||||
c.image,
|
||||
f"[{_status_style(c.status)}]{c.status}[/]",
|
||||
c.uptime or "[dim]-[/]",
|
||||
f"[{_cpu_style(c.cpu_percent)}]{c.cpu_percent:.1f}%[/]",
|
||||
f"[{_mem_style(c.memory_percent)}]{format_bytes(c.memory_usage)}[/]",
|
||||
_format_network(c.network_rx, c.network_tx, format_bytes),
|
||||
)
|
||||
|
||||
return table
|
||||
|
||||
|
||||
# --- Command functions ---
|
||||
|
||||
|
||||
@@ -118,6 +226,7 @@ def logs(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
|
||||
tail: Annotated[
|
||||
int | None,
|
||||
@@ -125,8 +234,11 @@ def logs(
|
||||
] = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show stack logs."""
|
||||
"""Show stack logs. With --service, shows logs for just that service."""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host)
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Default to fewer lines when showing multiple stacks
|
||||
many_stacks = all_stacks or host is not None or len(stack_list) > 1
|
||||
@@ -134,7 +246,9 @@ def logs(
|
||||
cmd = f"logs --tail {effective_tail}"
|
||||
if follow:
|
||||
cmd += " -f"
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd))
|
||||
if service:
|
||||
cmd += f" {service}"
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, filter_host=host))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -143,6 +257,7 @@ def ps(
|
||||
stacks: StacksArg = None,
|
||||
all_stacks: AllOption = False,
|
||||
host: HostOption = None,
|
||||
service: ServiceOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of stacks.
|
||||
@@ -150,9 +265,14 @@ def ps(
|
||||
Without arguments: shows all stacks (same as --all).
|
||||
With stack names: shows only those stacks.
|
||||
With --host: shows stacks on that host.
|
||||
With --service: filters to a specific service within the stack.
|
||||
"""
|
||||
stack_list, cfg = get_stacks(stacks or [], all_stacks, config, host=host, default_all=True)
|
||||
results = run_async(run_on_stacks(cfg, stack_list, "ps"))
|
||||
if service and len(stack_list) != 1:
|
||||
print_error("--service requires exactly one stack")
|
||||
raise typer.Exit(1)
|
||||
cmd = f"ps {service}" if service else "ps"
|
||||
results = run_async(run_on_stacks(cfg, stack_list, cmd, filter_host=host))
|
||||
report_results(results)
|
||||
|
||||
|
||||
@@ -162,24 +282,66 @@ def stats(
|
||||
bool,
|
||||
typer.Option("--live", "-l", help="Query Docker for live container stats"),
|
||||
] = False,
|
||||
containers: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--containers", "-C", help="Show per-container resource stats (requires Glances)"
|
||||
),
|
||||
] = False,
|
||||
host: HostOption = None,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show overview statistics for hosts and stacks.
|
||||
|
||||
Without --live: Shows config/state info (hosts, stacks, pending migrations).
|
||||
Without flags: Shows config/state info (hosts, stacks, pending migrations).
|
||||
With --live: Also queries Docker on each host for container counts.
|
||||
With --containers: Shows per-container resource stats (requires Glances).
|
||||
"""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
host_filter = None
|
||||
if host:
|
||||
validate_hosts(cfg, host)
|
||||
host_filter = host
|
||||
|
||||
# Handle --containers mode
|
||||
if containers:
|
||||
if not cfg.glances_stack:
|
||||
print_error("Glances not configured")
|
||||
console.print("[dim]Add 'glances_stack: glances' to compose-farm.yaml[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
from compose_farm.glances import fetch_all_container_stats # noqa: PLC0415
|
||||
|
||||
host_list = [host_filter] if host_filter else None
|
||||
container_list = run_async(fetch_all_container_stats(cfg, hosts=host_list))
|
||||
|
||||
if not container_list:
|
||||
print_warning("No containers found")
|
||||
raise typer.Exit(0)
|
||||
|
||||
console.print(_build_containers_table(container_list, host_filter=host_filter))
|
||||
return
|
||||
|
||||
# Validate and filter by host if specified
|
||||
if host_filter:
|
||||
all_hosts = [host_filter]
|
||||
selected_hosts = {host_filter: cfg.hosts[host_filter]}
|
||||
else:
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
selected_hosts = cfg.hosts
|
||||
|
||||
state = load_state(cfg)
|
||||
pending = get_stacks_needing_migration(cfg)
|
||||
|
||||
all_hosts = list(cfg.hosts.keys())
|
||||
stacks_by_host = group_stacks_by_host(cfg.stacks, cfg.hosts, all_hosts)
|
||||
running_by_host = group_stacks_by_host(state, cfg.hosts, all_hosts)
|
||||
# Filter pending migrations to selected host(s)
|
||||
if host_filter:
|
||||
pending = [stack for stack in pending if host_filter in cfg.get_hosts(stack)]
|
||||
stacks_by_host = group_stacks_by_host(cfg.stacks, selected_hosts, all_hosts)
|
||||
running_by_host = group_stacks_by_host(state, selected_hosts, all_hosts)
|
||||
|
||||
container_counts: dict[str, int] = {}
|
||||
if live:
|
||||
container_counts = _get_container_counts(cfg)
|
||||
container_counts = _get_container_counts(cfg, all_hosts)
|
||||
|
||||
host_table = _build_host_table(
|
||||
cfg, stacks_by_host, running_by_host, container_counts, show_containers=live
|
||||
@@ -187,4 +349,46 @@ def stats(
|
||||
console.print(host_table)
|
||||
|
||||
console.print()
|
||||
console.print(_build_summary_table(cfg, state, pending))
|
||||
console.print(_build_summary_table(cfg, state, pending, host_filter=host_filter))
|
||||
|
||||
|
||||
@app.command("list", rich_help_panel="Monitoring")
|
||||
def list_(
|
||||
host: HostOption = None,
|
||||
simple: Annotated[
|
||||
bool,
|
||||
typer.Option("--simple", "-s", help="Plain output (one stack per line, for scripting)"),
|
||||
] = False,
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""List all stacks and their assigned hosts."""
|
||||
cfg = load_config_or_exit(config)
|
||||
|
||||
stacks: list[tuple[str, str | list[str]]] = list(cfg.stacks.items())
|
||||
if host:
|
||||
stacks = [(s, h) for s, h in stacks if str(h) == host or host in str(h).split(",")]
|
||||
|
||||
if simple:
|
||||
for stack, _ in sorted(stacks):
|
||||
console.print(stack)
|
||||
else:
|
||||
# Assign colors to hosts for visual grouping
|
||||
host_colors = ["magenta", "cyan", "green", "yellow", "blue", "red"]
|
||||
unique_hosts = sorted({str(h) for _, h in stacks})
|
||||
host_color_map = {h: host_colors[i % len(host_colors)] for i, h in enumerate(unique_hosts)}
|
||||
|
||||
table = Table(title="Stacks", show_header=True, header_style="bold cyan")
|
||||
table.add_column("Stack")
|
||||
table.add_column("Host")
|
||||
|
||||
for stack, host_val in sorted(stacks):
|
||||
color = host_color_map.get(str(host_val), "white")
|
||||
table.add_row(f"[{color}]{stack}[/]", f"[{color}]{host_val}[/]")
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
# Aliases (hidden from help)
|
||||
app.command("l", hidden=True)(logs) # cf l = cf logs
|
||||
app.command("ls", hidden=True)(list_) # cf ls = cf list
|
||||
app.command("s", hidden=True)(stats) # cf s = cf stats
|
||||
|
||||
@@ -13,6 +13,7 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import yaml
|
||||
from dotenv import dotenv_values
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config
|
||||
@@ -40,25 +41,37 @@ def _load_env(compose_path: Path) -> dict[str, str]:
|
||||
Reads from .env file in the same directory as compose file,
|
||||
then overlays current environment variables.
|
||||
"""
|
||||
env: dict[str, str] = {}
|
||||
env_path = compose_path.parent / ".env"
|
||||
if env_path.exists():
|
||||
for line in env_path.read_text().splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#") or "=" not in stripped:
|
||||
continue
|
||||
key, value = stripped.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env[key] = value
|
||||
env: dict[str, str] = {k: v for k, v in dotenv_values(env_path).items() if v is not None}
|
||||
env.update({k: v for k, v in os.environ.items() if isinstance(v, str)})
|
||||
return env
|
||||
|
||||
|
||||
def parse_compose_data(content: str) -> dict[str, Any]:
|
||||
"""Parse compose YAML content into a dict."""
|
||||
compose_data = yaml.safe_load(content) or {}
|
||||
return compose_data if isinstance(compose_data, dict) else {}
|
||||
|
||||
|
||||
def load_compose_data(compose_path: Path) -> dict[str, Any]:
|
||||
"""Load compose YAML from a file path."""
|
||||
return parse_compose_data(compose_path.read_text())
|
||||
|
||||
|
||||
def load_compose_data_for_stack(config: Config, stack: str) -> tuple[Path, dict[str, Any]]:
|
||||
"""Load compose YAML for a stack, returning (path, data)."""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
if not compose_path.exists():
|
||||
return compose_path, {}
|
||||
return compose_path, load_compose_data(compose_path)
|
||||
|
||||
|
||||
def extract_services(compose_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract services mapping from compose data."""
|
||||
raw_services = compose_data.get("services", {})
|
||||
return raw_services if isinstance(raw_services, dict) else {}
|
||||
|
||||
|
||||
def _interpolate(value: str, env: dict[str, str]) -> str:
|
||||
"""Perform ${VAR} and ${VAR:-default} interpolation."""
|
||||
|
||||
@@ -185,16 +198,15 @@ def parse_host_volumes(config: Config, stack: str) -> list[str]:
|
||||
Returns a list of absolute host paths used as volume mounts.
|
||||
Skips named volumes and resolves relative paths.
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
paths: list[str] = []
|
||||
compose_dir = compose_path.parent
|
||||
|
||||
@@ -221,16 +233,15 @@ def parse_devices(config: Config, stack: str) -> list[str]:
|
||||
|
||||
Returns a list of host device paths (e.g., /dev/dri, /dev/dri/renderD128).
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return []
|
||||
|
||||
env = _load_env(compose_path)
|
||||
devices: list[str] = []
|
||||
for definition in raw_services.values():
|
||||
if not isinstance(definition, dict):
|
||||
@@ -260,18 +271,20 @@ def parse_external_networks(config: Config, stack: str) -> list[str]:
|
||||
|
||||
Returns a list of network names marked as external: true.
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
return []
|
||||
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
networks = compose_data.get("networks", {})
|
||||
if not isinstance(networks, dict):
|
||||
return []
|
||||
|
||||
external_networks: list[str] = []
|
||||
for name, definition in networks.items():
|
||||
for key, definition in networks.items():
|
||||
if isinstance(definition, dict) and definition.get("external") is True:
|
||||
# Networks may have a "name" field, which may differ from the key.
|
||||
# Use it if present, else fall back to the key.
|
||||
name = str(definition.get("name", key))
|
||||
external_networks.append(name)
|
||||
|
||||
return external_networks
|
||||
@@ -285,15 +298,14 @@ def load_compose_services(
|
||||
|
||||
Returns (services_dict, env_dict, host_address).
|
||||
"""
|
||||
compose_path = config.get_compose_path(stack)
|
||||
compose_path, compose_data = load_compose_data_for_stack(config, stack)
|
||||
if not compose_path.exists():
|
||||
message = f"[{stack}] Compose file not found: {compose_path}"
|
||||
raise FileNotFoundError(message)
|
||||
|
||||
env = _load_env(compose_path)
|
||||
compose_data = yaml.safe_load(compose_path.read_text()) or {}
|
||||
raw_services = compose_data.get("services", {})
|
||||
if not isinstance(raw_services, dict):
|
||||
raw_services = extract_services(compose_data)
|
||||
if not raw_services:
|
||||
return {}, env, config.get_host(stack).address
|
||||
return raw_services, env, config.get_host(stack).address
|
||||
|
||||
@@ -336,3 +348,18 @@ def get_ports_for_service(
|
||||
if isinstance(ref_def, dict):
|
||||
return _parse_ports(ref_def.get("ports"), env)
|
||||
return _parse_ports(definition.get("ports"), env)
|
||||
|
||||
|
||||
def get_container_name(
|
||||
service_name: str,
|
||||
service_def: dict[str, Any] | None,
|
||||
project_name: str,
|
||||
) -> str:
|
||||
"""Get the container name for a service.
|
||||
|
||||
Uses container_name from compose if set, otherwise defaults to {project}-{service}-1.
|
||||
This matches Docker Compose's default naming convention.
|
||||
"""
|
||||
if isinstance(service_def, dict) and service_def.get("container_name"):
|
||||
return str(service_def["container_name"])
|
||||
return f"{project_name}-{service_name}-1"
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import getpass
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
@@ -14,7 +16,7 @@ from .paths import config_search_paths, find_config_path
|
||||
COMPOSE_FILENAMES = ("compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml")
|
||||
|
||||
|
||||
class Host(BaseModel):
|
||||
class Host(BaseModel, extra="forbid"):
|
||||
"""SSH host configuration."""
|
||||
|
||||
address: str
|
||||
@@ -22,7 +24,7 @@ class Host(BaseModel):
|
||||
port: int = 22
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
class Config(BaseModel, extra="forbid"):
|
||||
"""Main configuration."""
|
||||
|
||||
compose_dir: Path = Path("/opt/compose")
|
||||
@@ -30,6 +32,9 @@ class Config(BaseModel):
|
||||
stacks: dict[str, str | list[str]] # stack_name -> host_name or list of hosts
|
||||
traefik_file: Path | None = None # Auto-regenerate traefik config after up/down
|
||||
traefik_stack: str | None = None # Stack name for Traefik (skip its host in file-provider)
|
||||
glances_stack: str | None = (
|
||||
None # Stack name for Glances (enables host resource stats in web UI)
|
||||
)
|
||||
config_path: Path = Path() # Set by load_config()
|
||||
|
||||
def get_state_path(self) -> Path:
|
||||
@@ -92,9 +97,17 @@ class Config(BaseModel):
|
||||
host_names = self.get_hosts(stack)
|
||||
return self.hosts[host_names[0]]
|
||||
|
||||
def get_stack_dir(self, stack: str) -> Path:
|
||||
"""Get stack directory path."""
|
||||
return self.compose_dir / stack
|
||||
|
||||
def get_compose_path(self, stack: str) -> Path:
|
||||
"""Get compose file path for a stack (tries compose.yaml first)."""
|
||||
stack_dir = self.compose_dir / stack
|
||||
"""Get compose file path for a stack (tries compose.yaml first).
|
||||
|
||||
Note: This checks local filesystem. For remote execution, use
|
||||
get_stack_dir() and let docker compose find the file.
|
||||
"""
|
||||
stack_dir = self.get_stack_dir(stack)
|
||||
for filename in COMPOSE_FILENAMES:
|
||||
candidate = stack_dir / filename
|
||||
if candidate.exists():
|
||||
@@ -112,8 +125,33 @@ class Config(BaseModel):
|
||||
found.add(subdir.name)
|
||||
return found
|
||||
|
||||
def get_web_stack(self) -> str:
|
||||
"""Get web stack name from CF_WEB_STACK environment variable."""
|
||||
return os.environ.get("CF_WEB_STACK", "")
|
||||
|
||||
def _parse_hosts(raw_hosts: dict[str, str | dict[str, str | int]]) -> dict[str, Host]:
|
||||
def get_local_host_from_web_stack(self) -> str | None:
|
||||
"""Resolve the local host from the web stack configuration (container only).
|
||||
|
||||
When running in the web UI container (CF_WEB_STACK is set), this returns
|
||||
the host that the web stack runs on. This is used for:
|
||||
- Glances connectivity (use container name instead of IP)
|
||||
- Container exec (local docker exec vs SSH)
|
||||
- File read/write (local filesystem vs SSH)
|
||||
|
||||
Returns None if not in container mode or web stack is not configured.
|
||||
"""
|
||||
if os.environ.get("CF_WEB_STACK") is None:
|
||||
return None
|
||||
web_stack = self.get_web_stack()
|
||||
if not web_stack or web_stack not in self.stacks:
|
||||
return None
|
||||
host_names = self.get_hosts(web_stack)
|
||||
if len(host_names) != 1:
|
||||
return None
|
||||
return host_names[0]
|
||||
|
||||
|
||||
def _parse_hosts(raw_hosts: dict[str, Any]) -> dict[str, Host]:
|
||||
"""Parse hosts from config, handling both simple and full forms."""
|
||||
hosts = {}
|
||||
for name, value in raw_hosts.items():
|
||||
@@ -122,11 +160,7 @@ def _parse_hosts(raw_hosts: dict[str, str | dict[str, str | int]]) -> dict[str,
|
||||
hosts[name] = Host(address=value)
|
||||
else:
|
||||
# Full form: hostname: {address: ..., user: ..., port: ...}
|
||||
hosts[name] = Host(
|
||||
address=str(value.get("address", "")),
|
||||
user=str(value["user"]) if "user" in value else getpass.getuser(),
|
||||
port=int(value["port"]) if "port" in value else 22,
|
||||
)
|
||||
hosts[name] = Host(**value)
|
||||
return hosts
|
||||
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ stacks:
|
||||
# traefik_file: (optional) Auto-generate Traefik file-provider config
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, compose-farm automatically regenerates this file after
|
||||
# up/down/restart/update commands. Traefik watches this file for changes.
|
||||
# up/down/update commands. Traefik watches this file for changes.
|
||||
#
|
||||
# traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml
|
||||
|
||||
@@ -87,3 +87,13 @@ stacks:
|
||||
# skipped (they're handled by Traefik's Docker provider directly).
|
||||
#
|
||||
# traefik_stack: traefik
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# glances_stack: (optional) Stack/container name for Glances
|
||||
# ------------------------------------------------------------------------------
|
||||
# When set, enables host resource monitoring via the Glances API. Used by:
|
||||
# - CLI: `cf stats --containers` shows container stats from all hosts
|
||||
# - Web UI: displays host resource graphs and container metrics
|
||||
# This should be the container name that runs Glances on the same Docker network.
|
||||
#
|
||||
# glances_stack: glances
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING, Any
|
||||
@@ -23,24 +24,46 @@ LOCAL_ADDRESSES = frozenset({"local", "localhost", "127.0.0.1", "::1"})
|
||||
_DEFAULT_SSH_PORT = 22
|
||||
|
||||
|
||||
class TTLCache:
|
||||
"""Simple TTL cache for async function results."""
|
||||
|
||||
def __init__(self, ttl_seconds: float = 30.0) -> None:
|
||||
"""Initialize cache with default TTL in seconds."""
|
||||
# Cache stores: key -> (timestamp, value, item_ttl)
|
||||
self._cache: dict[str, tuple[float, Any, float]] = {}
|
||||
self._default_ttl = ttl_seconds
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
"""Get value if exists and not expired."""
|
||||
if key in self._cache:
|
||||
timestamp, value, item_ttl = self._cache[key]
|
||||
if time.monotonic() - timestamp < item_ttl:
|
||||
return value
|
||||
del self._cache[key]
|
||||
return None
|
||||
|
||||
def set(self, key: str, value: Any, ttl_seconds: float | None = None) -> None:
|
||||
"""Set value with current timestamp and optional custom TTL."""
|
||||
ttl = ttl_seconds if ttl_seconds is not None else self._default_ttl
|
||||
self._cache[key] = (time.monotonic(), value, ttl)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cached values."""
|
||||
self._cache.clear()
|
||||
|
||||
|
||||
# Cache compose labels per host for 30 seconds
|
||||
_compose_labels_cache = TTLCache(ttl_seconds=30.0)
|
||||
|
||||
|
||||
def _print_compose_command(
|
||||
host_name: str,
|
||||
compose_dir: str,
|
||||
compose_path: str,
|
||||
stack: str,
|
||||
compose_cmd: str,
|
||||
) -> None:
|
||||
"""Print the docker compose command being executed.
|
||||
|
||||
Shows the host and a simplified command with relative path from compose_dir.
|
||||
"""
|
||||
# Show relative path from compose_dir for cleaner output
|
||||
if compose_path.startswith(compose_dir):
|
||||
rel_path = compose_path[len(compose_dir) :].lstrip("/")
|
||||
else:
|
||||
rel_path = compose_path
|
||||
|
||||
"""Print the docker compose command being executed."""
|
||||
console.print(
|
||||
f"[dim][magenta]{host_name}[/magenta]: docker compose -f {rel_path} {compose_cmd}[/dim]"
|
||||
f"[dim][magenta]{host_name}[/magenta]: ({stack}) docker compose {compose_cmd}[/dim]"
|
||||
)
|
||||
|
||||
|
||||
@@ -158,15 +181,20 @@ def ssh_connect_kwargs(host: Host) -> dict[str, Any]:
|
||||
"port": host.port,
|
||||
"username": host.user,
|
||||
"known_hosts": None,
|
||||
"gss_auth": False, # Disable GSSAPI - causes multi-second delays
|
||||
}
|
||||
# Add SSH agent path (auto-detect forwarded agent if needed)
|
||||
agent_path = get_ssh_auth_sock()
|
||||
if agent_path:
|
||||
kwargs["agent_path"] = agent_path
|
||||
# Add key file fallback for when SSH agent is unavailable
|
||||
# Add key file fallback (prioritized over agent if present)
|
||||
key_path = get_key_path()
|
||||
agent_path = get_ssh_auth_sock()
|
||||
|
||||
if key_path:
|
||||
# If dedicated key exists, force use of it and ignore agent
|
||||
# This avoids issues with stale/broken forwarded agents in Docker
|
||||
kwargs["client_keys"] = [str(key_path)]
|
||||
elif agent_path:
|
||||
# Fallback to agent if no dedicated key
|
||||
kwargs["agent_path"] = agent_path
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
@@ -324,11 +352,12 @@ async def run_compose(
|
||||
"""Run a docker compose command for a stack."""
|
||||
host_name = config.get_hosts(stack)[0]
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
_print_compose_command(host_name, stack, compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {compose_cmd}'
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
@@ -347,11 +376,12 @@ async def run_compose_on_host(
|
||||
Used for migration - running 'down' on the old host before 'up' on new host.
|
||||
"""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), compose_cmd)
|
||||
_print_compose_command(host_name, stack, compose_cmd)
|
||||
|
||||
command = f"docker compose -f {compose_path} {compose_cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {compose_cmd}'
|
||||
return await run_command(host, command, stack, stream=stream, raw=raw, prefix=prefix)
|
||||
|
||||
|
||||
@@ -362,13 +392,17 @@ async def run_on_stacks(
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
filter_host: str | None = None,
|
||||
) -> list[CommandResult]:
|
||||
"""Run a docker compose command on multiple stacks in parallel.
|
||||
|
||||
For multi-host stacks, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-stack operations.
|
||||
For multi-host stacks, runs on all configured hosts unless filter_host is set,
|
||||
in which case only the filtered host is affected. raw=True only makes sense
|
||||
for single-stack operations.
|
||||
"""
|
||||
return await run_sequential_on_stacks(config, stacks, [compose_cmd], stream=stream, raw=raw)
|
||||
return await run_sequential_on_stacks(
|
||||
config, stacks, [compose_cmd], stream=stream, raw=raw, filter_host=filter_host
|
||||
)
|
||||
|
||||
|
||||
async def _run_sequential_stack_commands(
|
||||
@@ -388,6 +422,33 @@ async def _run_sequential_stack_commands(
|
||||
return CommandResult(stack=stack, exit_code=0, success=True)
|
||||
|
||||
|
||||
async def _run_sequential_stack_commands_on_host(
|
||||
config: Config,
|
||||
stack: str,
|
||||
host_name: str,
|
||||
commands: list[str],
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
prefix: str | None = None,
|
||||
) -> CommandResult:
|
||||
"""Run multiple compose commands sequentially for a stack on a specific host.
|
||||
|
||||
Used when --host filter is applied to a multi-host stack.
|
||||
"""
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
host = config.hosts[host_name]
|
||||
label = f"{stack}@{host_name}"
|
||||
|
||||
for cmd in commands:
|
||||
_print_compose_command(host_name, stack, cmd)
|
||||
command = f'cd "{stack_dir}" && docker compose {cmd}'
|
||||
result = await run_command(host, command, label, stream=stream, raw=raw, prefix=prefix)
|
||||
if not result.success:
|
||||
return result
|
||||
return CommandResult(stack=label, exit_code=0, success=True)
|
||||
|
||||
|
||||
async def _run_sequential_stack_commands_multi_host(
|
||||
config: Config,
|
||||
stack: str,
|
||||
@@ -403,14 +464,15 @@ async def _run_sequential_stack_commands_multi_host(
|
||||
For multi-host stacks, prefix defaults to stack@host format.
|
||||
"""
|
||||
host_names = config.get_hosts(stack)
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
final_results: list[CommandResult] = []
|
||||
|
||||
for cmd in commands:
|
||||
command = f"docker compose -f {compose_path} {cmd}"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {cmd}'
|
||||
tasks = []
|
||||
for host_name in host_names:
|
||||
_print_compose_command(host_name, str(config.compose_dir), str(compose_path), cmd)
|
||||
_print_compose_command(host_name, stack, cmd)
|
||||
host = config.hosts[host_name]
|
||||
# For multi-host stacks, always use stack@host prefix to distinguish output
|
||||
label = f"{stack}@{host_name}" if len(host_names) > 1 else stack
|
||||
@@ -438,11 +500,13 @@ async def run_sequential_on_stacks(
|
||||
*,
|
||||
stream: bool = True,
|
||||
raw: bool = False,
|
||||
filter_host: str | None = None,
|
||||
) -> list[CommandResult]:
|
||||
"""Run sequential commands on multiple stacks in parallel.
|
||||
|
||||
For multi-host stacks, runs on all configured hosts.
|
||||
Note: raw=True only makes sense for single-stack operations.
|
||||
For multi-host stacks, runs on all configured hosts unless filter_host is set,
|
||||
in which case only the filtered host is affected. raw=True only makes sense
|
||||
for single-stack operations.
|
||||
"""
|
||||
# Skip prefix for single-stack operations (command line already shows context)
|
||||
prefix: str | None = "" if len(stacks) == 1 else None
|
||||
@@ -452,12 +516,20 @@ async def run_sequential_on_stacks(
|
||||
single_host_tasks = []
|
||||
|
||||
for stack in stacks:
|
||||
if config.is_multi_host(stack):
|
||||
if config.is_multi_host(stack) and filter_host is None:
|
||||
# Multi-host stack without filter: run on all hosts
|
||||
multi_host_tasks.append(
|
||||
_run_sequential_stack_commands_multi_host(
|
||||
config, stack, commands, stream=stream, raw=raw, prefix=prefix
|
||||
)
|
||||
)
|
||||
elif config.is_multi_host(stack) and filter_host is not None:
|
||||
# Multi-host stack with filter: run only on filtered host
|
||||
single_host_tasks.append(
|
||||
_run_sequential_stack_commands_on_host(
|
||||
config, stack, filter_host, commands, stream=stream, raw=raw, prefix=prefix
|
||||
)
|
||||
)
|
||||
else:
|
||||
single_host_tasks.append(
|
||||
_run_sequential_stack_commands(
|
||||
@@ -487,16 +559,83 @@ async def check_stack_running(
|
||||
) -> bool:
|
||||
"""Check if a stack has running containers on a specific host."""
|
||||
host = config.hosts[host_name]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
stack_dir = config.get_stack_dir(stack)
|
||||
|
||||
# Use ps --status running to check for running containers
|
||||
command = f"docker compose -f {compose_path} ps --status running -q"
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose ps --status running -q'
|
||||
result = await run_command(host, command, stack, stream=False)
|
||||
|
||||
# If command succeeded and has output, containers are running
|
||||
return result.success and bool(result.stdout.strip())
|
||||
|
||||
|
||||
async def get_running_stacks_on_host(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
) -> set[str]:
|
||||
"""Get all running compose stacks on a host in a single SSH call.
|
||||
|
||||
Uses docker ps with the compose.project label to identify running stacks.
|
||||
Much more efficient than checking each stack individually.
|
||||
"""
|
||||
host = config.hosts[host_name]
|
||||
|
||||
# Get unique project names from running containers
|
||||
command = "docker ps --format '{{.Label \"com.docker.compose.project\"}}' | sort -u"
|
||||
result = await run_command(host, command, stack=host_name, stream=False, prefix="")
|
||||
|
||||
if not result.success:
|
||||
return set()
|
||||
|
||||
# Filter out empty lines and return as set
|
||||
return {line.strip() for line in result.stdout.splitlines() if line.strip()}
|
||||
|
||||
|
||||
async def get_container_compose_labels(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
) -> dict[str, tuple[str, str]]:
|
||||
"""Get compose labels for all containers on a host.
|
||||
|
||||
Returns dict of container_name -> (project, service).
|
||||
Includes all containers (-a flag) since Glances shows stopped containers too.
|
||||
Falls back to empty dict on timeout/error (5s timeout).
|
||||
Results are cached for 30 seconds to reduce SSH overhead.
|
||||
"""
|
||||
# Check cache first
|
||||
cached: dict[str, tuple[str, str]] | None = _compose_labels_cache.get(host_name)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
host = config.hosts[host_name]
|
||||
cmd = (
|
||||
"docker ps -a --format "
|
||||
'\'{{.Names}}\t{{.Label "com.docker.compose.project"}}\t'
|
||||
'{{.Label "com.docker.compose.service"}}\''
|
||||
)
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(5.0):
|
||||
result = await run_command(host, cmd, stack=host_name, stream=False, prefix="")
|
||||
except TimeoutError:
|
||||
return {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
labels: dict[str, tuple[str, str]] = {}
|
||||
if result.success:
|
||||
for line in result.stdout.splitlines():
|
||||
parts = line.strip().split("\t")
|
||||
if len(parts) >= 3: # noqa: PLR2004
|
||||
name, project, service = parts[0], parts[1], parts[2]
|
||||
labels[name] = (project or "", service or "")
|
||||
|
||||
# Cache the result
|
||||
_compose_labels_cache.set(host_name, labels)
|
||||
return labels
|
||||
|
||||
|
||||
async def _batch_check_existence(
|
||||
config: Config,
|
||||
host_name: str,
|
||||
@@ -533,18 +672,28 @@ async def check_paths_exist(
|
||||
host_name: str,
|
||||
paths: list[str],
|
||||
) -> dict[str, bool]:
|
||||
"""Check if multiple paths exist on a specific host.
|
||||
"""Check if multiple paths exist and are accessible on a specific host.
|
||||
|
||||
Returns a dict mapping path -> exists.
|
||||
Handles permission denied as "exists" (path is there, just not accessible).
|
||||
Uses timeout to detect stale NFS mounts that would hang.
|
||||
"""
|
||||
# Only report missing if stat says "No such file", otherwise assume exists
|
||||
# (handles permission denied correctly - path exists, just not accessible)
|
||||
# Use timeout to detect stale NFS mounts (which hang on access)
|
||||
# - First try ls with timeout to check accessibility
|
||||
# - If ls succeeds: path exists and is accessible
|
||||
# - If ls fails/times out: use stat (also with timeout) to distinguish
|
||||
# "no such file" from "permission denied" or stale NFS
|
||||
# - Timeout (exit code 124) is treated as inaccessible (stale NFS mount)
|
||||
return await _batch_check_existence(
|
||||
config,
|
||||
host_name,
|
||||
paths,
|
||||
lambda esc: f"stat '{esc}' 2>&1 | grep -q 'No such file' && echo 'N:{esc}' || echo 'Y:{esc}'",
|
||||
lambda esc: (
|
||||
f"OUT=$(timeout 2 stat '{esc}' 2>&1); RC=$?; "
|
||||
f"if [ $RC -eq 124 ]; then echo 'N:{esc}'; "
|
||||
f"elif echo \"$OUT\" | grep -q 'No such file'; then echo 'N:{esc}'; "
|
||||
f"else echo 'Y:{esc}'; fi"
|
||||
),
|
||||
"mount-check",
|
||||
)
|
||||
|
||||
|
||||
299
src/compose_farm/glances.py
Normal file
299
src/compose_farm/glances.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""Glances API client for host resource monitoring."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .executor import is_local
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config, Host
|
||||
|
||||
# Default Glances REST API port
|
||||
DEFAULT_GLANCES_PORT = 61208
|
||||
|
||||
|
||||
def format_bytes(bytes_val: int) -> str:
|
||||
"""Format bytes to human readable string (e.g., 1.5 GiB)."""
|
||||
import humanize # noqa: PLC0415
|
||||
|
||||
return humanize.naturalsize(bytes_val, binary=True, format="%.1f")
|
||||
|
||||
|
||||
def _get_glances_address(
|
||||
host_name: str,
|
||||
host: Host,
|
||||
glances_container: str | None,
|
||||
local_host: str | None = None,
|
||||
) -> str:
|
||||
"""Get the address to use for Glances API requests.
|
||||
|
||||
When running in a Docker container (CF_WEB_STACK set), the local host's Glances
|
||||
may not be reachable via its LAN IP due to Docker network isolation. In this
|
||||
case, we use the Glances container name for the local host.
|
||||
"""
|
||||
# CF_WEB_STACK indicates we're running in the web UI container.
|
||||
in_container = os.environ.get("CF_WEB_STACK") is not None
|
||||
if not in_container or not glances_container:
|
||||
return host.address
|
||||
|
||||
if local_host and host_name == local_host:
|
||||
return glances_container
|
||||
|
||||
# Fall back to is_local detection (may not work in container)
|
||||
if is_local(host):
|
||||
return glances_container
|
||||
|
||||
return host.address
|
||||
|
||||
|
||||
@dataclass
|
||||
class HostStats:
|
||||
"""Resource statistics for a host."""
|
||||
|
||||
host: str
|
||||
cpu_percent: float
|
||||
mem_percent: float
|
||||
swap_percent: float
|
||||
load: float
|
||||
disk_percent: float
|
||||
net_rx_rate: float = 0.0 # bytes/sec
|
||||
net_tx_rate: float = 0.0 # bytes/sec
|
||||
error: str | None = None
|
||||
|
||||
@classmethod
|
||||
def from_error(cls, host: str, error: str) -> HostStats:
|
||||
"""Create a HostStats with an error."""
|
||||
return cls(
|
||||
host=host,
|
||||
cpu_percent=0,
|
||||
mem_percent=0,
|
||||
swap_percent=0,
|
||||
load=0,
|
||||
disk_percent=0,
|
||||
net_rx_rate=0,
|
||||
net_tx_rate=0,
|
||||
error=error,
|
||||
)
|
||||
|
||||
|
||||
async def fetch_host_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
request_timeout: float = 10.0,
|
||||
) -> HostStats:
|
||||
"""Fetch stats from a single host's Glances API."""
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
base_url = f"http://{host_address}:{port}/api/4"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=request_timeout) as client:
|
||||
# Fetch quicklook stats (CPU, mem, load)
|
||||
response = await client.get(f"{base_url}/quicklook")
|
||||
if not response.is_success:
|
||||
return HostStats.from_error(host_name, f"HTTP {response.status_code}")
|
||||
data = response.json()
|
||||
|
||||
# Fetch filesystem stats for disk usage (root fs or max across all)
|
||||
disk_percent = 0.0
|
||||
try:
|
||||
fs_response = await client.get(f"{base_url}/fs")
|
||||
if fs_response.is_success:
|
||||
fs_data = fs_response.json()
|
||||
root = next((fs for fs in fs_data if fs.get("mnt_point") == "/"), None)
|
||||
disk_percent = (
|
||||
root.get("percent", 0)
|
||||
if root
|
||||
else max((fs.get("percent", 0) for fs in fs_data), default=0)
|
||||
)
|
||||
except httpx.HTTPError:
|
||||
pass # Disk stats are optional
|
||||
|
||||
# Fetch network stats for rate (sum across non-loopback interfaces)
|
||||
net_rx_rate, net_tx_rate = 0.0, 0.0
|
||||
try:
|
||||
net_response = await client.get(f"{base_url}/network")
|
||||
if net_response.is_success:
|
||||
for iface in net_response.json():
|
||||
if not iface.get("interface_name", "").startswith("lo"):
|
||||
net_rx_rate += iface.get("bytes_recv_rate_per_sec") or 0
|
||||
net_tx_rate += iface.get("bytes_sent_rate_per_sec") or 0
|
||||
except httpx.HTTPError:
|
||||
pass # Network stats are optional
|
||||
|
||||
return HostStats(
|
||||
host=host_name,
|
||||
cpu_percent=data.get("cpu", 0),
|
||||
mem_percent=data.get("mem", 0),
|
||||
swap_percent=data.get("swap", 0),
|
||||
load=data.get("load", 0),
|
||||
disk_percent=disk_percent,
|
||||
net_rx_rate=net_rx_rate,
|
||||
net_tx_rate=net_tx_rate,
|
||||
)
|
||||
except httpx.TimeoutException:
|
||||
return HostStats.from_error(host_name, "timeout")
|
||||
except httpx.HTTPError as e:
|
||||
return HostStats.from_error(host_name, str(e))
|
||||
except Exception as e:
|
||||
return HostStats.from_error(host_name, str(e))
|
||||
|
||||
|
||||
async def fetch_all_host_stats(
|
||||
config: Config,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
) -> dict[str, HostStats]:
|
||||
"""Fetch stats from all hosts in parallel."""
|
||||
glances_container = config.glances_stack
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
tasks = [
|
||||
fetch_host_stats(
|
||||
name,
|
||||
_get_glances_address(name, host, glances_container, local_host),
|
||||
port,
|
||||
)
|
||||
for name, host in config.hosts.items()
|
||||
]
|
||||
results = await asyncio.gather(*tasks)
|
||||
return {stats.host: stats for stats in results}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContainerStats:
|
||||
"""Container statistics from Glances."""
|
||||
|
||||
name: str
|
||||
host: str
|
||||
status: str
|
||||
image: str
|
||||
cpu_percent: float
|
||||
memory_usage: int # bytes
|
||||
memory_limit: int # bytes
|
||||
memory_percent: float
|
||||
network_rx: int # cumulative bytes received
|
||||
network_tx: int # cumulative bytes sent
|
||||
uptime: str
|
||||
ports: str
|
||||
engine: str # docker, podman, etc.
|
||||
stack: str = "" # compose project name (from docker labels)
|
||||
service: str = "" # compose service name (from docker labels)
|
||||
|
||||
|
||||
def _parse_container(data: dict[str, Any], host_name: str) -> ContainerStats:
|
||||
"""Parse container data from Glances API response."""
|
||||
# Image can be a list or string
|
||||
image = data.get("image", ["unknown"])
|
||||
if isinstance(image, list):
|
||||
image = image[0] if image else "unknown"
|
||||
|
||||
# Calculate memory percent
|
||||
mem_usage = data.get("memory_usage", 0) or 0
|
||||
mem_limit = data.get("memory_limit", 1) or 1 # Avoid division by zero
|
||||
mem_percent = (mem_usage / mem_limit) * 100 if mem_limit > 0 else 0
|
||||
|
||||
# Network stats
|
||||
network = data.get("network", {}) or {}
|
||||
network_rx = network.get("cumulative_rx", 0) or 0
|
||||
network_tx = network.get("cumulative_tx", 0) or 0
|
||||
|
||||
return ContainerStats(
|
||||
name=data.get("name", "unknown"),
|
||||
host=host_name,
|
||||
status=data.get("status", "unknown"),
|
||||
image=image,
|
||||
cpu_percent=data.get("cpu_percent", 0) or 0,
|
||||
memory_usage=mem_usage,
|
||||
memory_limit=mem_limit,
|
||||
memory_percent=mem_percent,
|
||||
network_rx=network_rx,
|
||||
network_tx=network_tx,
|
||||
uptime=data.get("uptime", ""),
|
||||
ports=data.get("ports", "") or "",
|
||||
engine=data.get("engine", "docker"),
|
||||
)
|
||||
|
||||
|
||||
async def fetch_container_stats(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
request_timeout: float = 10.0,
|
||||
) -> tuple[list[ContainerStats] | None, str | None]:
|
||||
"""Fetch container stats from a single host's Glances API.
|
||||
|
||||
Returns:
|
||||
(containers, error_message)
|
||||
- Success: ([...], None)
|
||||
- Failure: (None, "error message")
|
||||
|
||||
"""
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
url = f"http://{host_address}:{port}/api/4/containers"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=request_timeout) as client:
|
||||
response = await client.get(url)
|
||||
if not response.is_success:
|
||||
return None, f"HTTP {response.status_code}: {response.reason_phrase}"
|
||||
data = response.json()
|
||||
return [_parse_container(c, host_name) for c in data], None
|
||||
except httpx.ConnectError:
|
||||
return None, "Connection refused (Glances offline?)"
|
||||
except httpx.TimeoutException:
|
||||
return None, "Connection timed out"
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
|
||||
|
||||
async def fetch_all_container_stats(
|
||||
config: Config,
|
||||
port: int = DEFAULT_GLANCES_PORT,
|
||||
hosts: list[str] | None = None,
|
||||
) -> list[ContainerStats]:
|
||||
"""Fetch container stats from all hosts in parallel, enriched with compose labels."""
|
||||
from .executor import get_container_compose_labels # noqa: PLC0415
|
||||
|
||||
glances_container = config.glances_stack
|
||||
host_names = hosts if hosts is not None else list(config.hosts.keys())
|
||||
local_host = config.get_local_host_from_web_stack()
|
||||
|
||||
async def fetch_host_data(
|
||||
host_name: str,
|
||||
host_address: str,
|
||||
) -> list[ContainerStats]:
|
||||
# Fetch Glances stats and compose labels in parallel
|
||||
stats_task = fetch_container_stats(host_name, host_address, port)
|
||||
labels_task = get_container_compose_labels(config, host_name)
|
||||
(containers, _), labels = await asyncio.gather(stats_task, labels_task)
|
||||
|
||||
if containers is None:
|
||||
# Skip failed hosts in aggregate view
|
||||
return []
|
||||
|
||||
# Enrich containers with compose labels (mutate in place)
|
||||
for c in containers:
|
||||
c.stack, c.service = labels.get(c.name, ("", ""))
|
||||
return containers
|
||||
|
||||
tasks = [
|
||||
fetch_host_data(
|
||||
name,
|
||||
_get_glances_address(
|
||||
name,
|
||||
config.hosts[name],
|
||||
glances_container,
|
||||
local_host,
|
||||
),
|
||||
)
|
||||
for name in host_names
|
||||
if name in config.hosts
|
||||
]
|
||||
results = await asyncio.gather(*tasks)
|
||||
# Flatten list of lists
|
||||
return [container for host_containers in results for container in host_containers]
|
||||
@@ -6,21 +6,22 @@ import json
|
||||
import tomllib
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .executor import run_compose
|
||||
from .executor import run_command
|
||||
from .paths import xdg_config_home
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Awaitable, Callable, Iterable
|
||||
from collections.abc import Iterable
|
||||
from pathlib import Path
|
||||
|
||||
from .config import Config
|
||||
from .executor import CommandResult
|
||||
|
||||
# Separator used to split output sections
|
||||
_SECTION_SEPARATOR = "---CF-SEP---"
|
||||
|
||||
|
||||
DEFAULT_LOG_PATH = xdg_config_home() / "compose-farm" / "dockerfarm-log.toml"
|
||||
_DIGEST_HEX_LENGTH = 64
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@@ -56,87 +57,97 @@ def _escape(value: str) -> str:
|
||||
return value.replace("\\", "\\\\").replace('"', '\\"')
|
||||
|
||||
|
||||
def _parse_images_output(raw: str) -> list[dict[str, Any]]:
|
||||
"""Parse `docker compose images --format json` output.
|
||||
|
||||
Handles both a JSON array and newline-separated JSON objects for robustness.
|
||||
"""
|
||||
raw = raw.strip()
|
||||
if not raw:
|
||||
return []
|
||||
|
||||
def _parse_image_digests(image_json: str) -> dict[str, str]:
|
||||
"""Parse docker image inspect JSON to build image tag -> digest map."""
|
||||
if not image_json:
|
||||
return {}
|
||||
try:
|
||||
parsed = json.loads(raw)
|
||||
image_data = json.loads(image_json)
|
||||
except json.JSONDecodeError:
|
||||
objects = []
|
||||
for line in raw.splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
objects.append(json.loads(line))
|
||||
return objects
|
||||
return {}
|
||||
|
||||
if isinstance(parsed, list):
|
||||
return parsed
|
||||
if isinstance(parsed, dict):
|
||||
return [parsed]
|
||||
return []
|
||||
image_digests: dict[str, str] = {}
|
||||
for img in image_data:
|
||||
tags = img.get("RepoTags") or []
|
||||
digests = img.get("RepoDigests") or []
|
||||
digest = digests[0].split("@")[-1] if digests else img.get("Id", "")
|
||||
for tag in tags:
|
||||
image_digests[tag] = digest
|
||||
if img.get("Id"):
|
||||
image_digests[img["Id"]] = digest
|
||||
return image_digests
|
||||
|
||||
|
||||
def _extract_image_fields(record: dict[str, Any]) -> tuple[str, str]:
|
||||
"""Extract image name and digest with fallbacks."""
|
||||
image = record.get("Image") or record.get("Repository") or record.get("Name") or ""
|
||||
tag = record.get("Tag") or record.get("Version")
|
||||
if tag and ":" not in image.rsplit("/", 1)[-1]:
|
||||
image = f"{image}:{tag}"
|
||||
|
||||
digest = (
|
||||
record.get("Digest")
|
||||
or record.get("Image ID")
|
||||
or record.get("ImageID")
|
||||
or record.get("ID")
|
||||
or ""
|
||||
)
|
||||
|
||||
if digest and not digest.startswith("sha256:") and len(digest) == _DIGEST_HEX_LENGTH:
|
||||
digest = f"sha256:{digest}"
|
||||
|
||||
return image, digest
|
||||
|
||||
|
||||
async def collect_stack_entries(
|
||||
async def collect_stacks_entries_on_host(
|
||||
config: Config,
|
||||
stack: str,
|
||||
host_name: str,
|
||||
stacks: set[str],
|
||||
*,
|
||||
now: datetime,
|
||||
run_compose_fn: Callable[..., Awaitable[CommandResult]] = run_compose,
|
||||
) -> list[SnapshotEntry]:
|
||||
"""Run `docker compose images` for a stack and normalize results."""
|
||||
result = await run_compose_fn(config, stack, "images --format json", stream=False)
|
||||
"""Collect image entries for stacks on one host using 2 docker commands.
|
||||
|
||||
Uses `docker ps` to get running containers + their compose project labels,
|
||||
then `docker image inspect` to get digests for all unique images.
|
||||
Much faster than running N `docker compose images` commands.
|
||||
"""
|
||||
if not stacks:
|
||||
return []
|
||||
|
||||
host = config.hosts[host_name]
|
||||
|
||||
# Single SSH call with 2 docker commands:
|
||||
# 1. Get project|image pairs from running containers
|
||||
# 2. Get image info (including digests) for all unique images
|
||||
command = (
|
||||
f"docker ps --format '{{{{.Label \"com.docker.compose.project\"}}}}|{{{{.Image}}}}' && "
|
||||
f"echo '{_SECTION_SEPARATOR}' && "
|
||||
"docker image inspect $(docker ps --format '{{.Image}}' | sort -u) 2>/dev/null || true"
|
||||
)
|
||||
result = await run_command(host, command, host_name, stream=False, prefix="")
|
||||
|
||||
if not result.success:
|
||||
msg = result.stderr or f"compose images exited with {result.exit_code}"
|
||||
error = f"[{stack}] Unable to read images: {msg}"
|
||||
raise RuntimeError(error)
|
||||
return []
|
||||
|
||||
records = _parse_images_output(result.stdout)
|
||||
# Use first host for snapshots (multi-host stacks use same images on all hosts)
|
||||
host_name = config.get_hosts(stack)[0]
|
||||
compose_path = config.get_compose_path(stack)
|
||||
# Split output into two sections
|
||||
parts = result.stdout.split(_SECTION_SEPARATOR)
|
||||
if len(parts) != 2: # noqa: PLR2004
|
||||
return []
|
||||
|
||||
entries: list[SnapshotEntry] = []
|
||||
for record in records:
|
||||
image, digest = _extract_image_fields(record)
|
||||
if not digest:
|
||||
container_lines, image_json = parts[0].strip(), parts[1].strip()
|
||||
|
||||
# Parse project|image pairs, filtering to only stacks we care about
|
||||
stack_images: dict[str, set[str]] = {}
|
||||
for line in container_lines.splitlines():
|
||||
if "|" not in line:
|
||||
continue
|
||||
entries.append(
|
||||
SnapshotEntry(
|
||||
stack=stack,
|
||||
host=host_name,
|
||||
compose_file=compose_path,
|
||||
image=image,
|
||||
digest=digest,
|
||||
captured_at=now,
|
||||
)
|
||||
)
|
||||
project, image = line.split("|", 1)
|
||||
if project in stacks:
|
||||
stack_images.setdefault(project, set()).add(image)
|
||||
|
||||
if not stack_images:
|
||||
return []
|
||||
|
||||
# Parse image inspect JSON to build image -> digest map
|
||||
image_digests = _parse_image_digests(image_json)
|
||||
|
||||
# Build entries
|
||||
entries: list[SnapshotEntry] = []
|
||||
for stack, images in stack_images.items():
|
||||
for image in images:
|
||||
digest = image_digests.get(image, "")
|
||||
if digest:
|
||||
entries.append(
|
||||
SnapshotEntry(
|
||||
stack=stack,
|
||||
host=host_name,
|
||||
compose_file=config.get_compose_path(stack),
|
||||
image=image,
|
||||
digest=digest,
|
||||
captured_at=now,
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
|
||||
@@ -76,29 +76,37 @@ def get_stack_paths(cfg: Config, stack: str) -> list[str]:
|
||||
return paths
|
||||
|
||||
|
||||
async def discover_stack_host(cfg: Config, stack: str) -> tuple[str, str | list[str] | None]:
|
||||
"""Discover where a stack is running.
|
||||
class StackDiscoveryResult(NamedTuple):
|
||||
"""Result of discovering where a stack is running across all hosts."""
|
||||
|
||||
For multi-host stacks, checks all assigned hosts in parallel.
|
||||
For single-host, checks assigned host first, then others.
|
||||
stack: str
|
||||
configured_hosts: list[str] # From config (where it SHOULD run)
|
||||
running_hosts: list[str] # From reality (where it IS running)
|
||||
|
||||
Returns (stack_name, host_or_hosts_or_none).
|
||||
"""
|
||||
assigned_hosts = cfg.get_hosts(stack)
|
||||
@property
|
||||
def is_multi_host(self) -> bool:
|
||||
"""Check if this is a multi-host stack."""
|
||||
return len(self.configured_hosts) > 1
|
||||
|
||||
if cfg.is_multi_host(stack):
|
||||
# Check all assigned hosts in parallel
|
||||
checks = await asyncio.gather(*[check_stack_running(cfg, stack, h) for h in assigned_hosts])
|
||||
running = [h for h, is_running in zip(assigned_hosts, checks, strict=True) if is_running]
|
||||
return stack, running if running else None
|
||||
@property
|
||||
def stray_hosts(self) -> list[str]:
|
||||
"""Hosts where stack is running but shouldn't be."""
|
||||
return [h for h in self.running_hosts if h not in self.configured_hosts]
|
||||
|
||||
# Single-host: check assigned host first, then others
|
||||
if await check_stack_running(cfg, stack, assigned_hosts[0]):
|
||||
return stack, assigned_hosts[0]
|
||||
for host in cfg.hosts:
|
||||
if host != assigned_hosts[0] and await check_stack_running(cfg, stack, host):
|
||||
return stack, host
|
||||
return stack, None
|
||||
@property
|
||||
def missing_hosts(self) -> list[str]:
|
||||
"""Hosts where stack should be running but isn't."""
|
||||
return [h for h in self.configured_hosts if h not in self.running_hosts]
|
||||
|
||||
@property
|
||||
def is_stray(self) -> bool:
|
||||
"""Stack is running on unauthorized host(s)."""
|
||||
return len(self.stray_hosts) > 0
|
||||
|
||||
@property
|
||||
def is_duplicate(self) -> bool:
|
||||
"""Single-host stack running on multiple hosts."""
|
||||
return not self.is_multi_host and len(self.running_hosts) > 1
|
||||
|
||||
|
||||
async def check_stack_requirements(
|
||||
@@ -177,18 +185,38 @@ def _report_preflight_failures(
|
||||
print_error(f" missing device: {dev}")
|
||||
|
||||
|
||||
def build_up_cmd(
|
||||
*,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
service: str | None = None,
|
||||
) -> str:
|
||||
"""Build compose 'up' subcommand with optional flags."""
|
||||
parts = ["up", "-d"]
|
||||
if pull:
|
||||
parts.append("--pull always")
|
||||
if build:
|
||||
parts.append("--build")
|
||||
if service:
|
||||
parts.append(service)
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
async def _up_multi_host_stack(
|
||||
cfg: Config,
|
||||
stack: str,
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start a multi-host stack on all configured hosts."""
|
||||
host_names = cfg.get_hosts(stack)
|
||||
results: list[CommandResult] = []
|
||||
compose_path = cfg.get_compose_path(stack)
|
||||
command = f"docker compose -f {compose_path} up -d"
|
||||
stack_dir = cfg.get_stack_dir(stack)
|
||||
# Use cd to let docker compose find the compose file on the remote host
|
||||
command = f'cd "{stack_dir}" && docker compose {build_up_cmd(pull=pull, build=build)}'
|
||||
|
||||
# Pre-flight checks on all hosts
|
||||
for host_name in host_names:
|
||||
@@ -261,6 +289,8 @@ async def _up_single_stack(
|
||||
prefix: str,
|
||||
*,
|
||||
raw: bool,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host stack with migration support."""
|
||||
target_host = cfg.get_hosts(stack)[0]
|
||||
@@ -289,7 +319,7 @@ async def _up_single_stack(
|
||||
|
||||
# Start on target host
|
||||
console.print(f"{prefix} Starting on [magenta]{target_host}[/]...")
|
||||
up_result = await _run_compose_step(cfg, stack, "up -d", raw=raw)
|
||||
up_result = await _run_compose_step(cfg, stack, build_up_cmd(pull=pull, build=build), raw=raw)
|
||||
|
||||
# Update state on success, or rollback on failure
|
||||
if up_result.success:
|
||||
@@ -308,24 +338,101 @@ async def _up_single_stack(
|
||||
return up_result
|
||||
|
||||
|
||||
async def _up_stack_simple(
|
||||
cfg: Config,
|
||||
stack: str,
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> CommandResult:
|
||||
"""Start a single-host stack without migration (parallel-safe)."""
|
||||
target_host = cfg.get_hosts(stack)[0]
|
||||
|
||||
# Pre-flight check
|
||||
preflight = await check_stack_requirements(cfg, stack, target_host)
|
||||
if not preflight.ok:
|
||||
_report_preflight_failures(stack, target_host, preflight)
|
||||
return CommandResult(stack=stack, exit_code=1, success=False)
|
||||
|
||||
# Run with streaming for parallel output
|
||||
result = await run_compose(cfg, stack, build_up_cmd(pull=pull, build=build), raw=raw)
|
||||
if raw:
|
||||
print()
|
||||
if result.interrupted:
|
||||
raise OperationInterruptedError
|
||||
|
||||
# Update state on success
|
||||
if result.success:
|
||||
set_stack_host(cfg, stack, target_host)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def up_stacks(
|
||||
cfg: Config,
|
||||
stacks: list[str],
|
||||
*,
|
||||
raw: bool = False,
|
||||
pull: bool = False,
|
||||
build: bool = False,
|
||||
) -> list[CommandResult]:
|
||||
"""Start stacks with automatic migration if host changed."""
|
||||
"""Start stacks with automatic migration if host changed.
|
||||
|
||||
Stacks without migration run in parallel. Migration stacks run sequentially.
|
||||
"""
|
||||
# Categorize stacks
|
||||
multi_host: list[str] = []
|
||||
needs_migration: list[str] = []
|
||||
simple: list[str] = []
|
||||
|
||||
for stack in stacks:
|
||||
if cfg.is_multi_host(stack):
|
||||
multi_host.append(stack)
|
||||
else:
|
||||
target = cfg.get_hosts(stack)[0]
|
||||
current = get_stack_host(cfg, stack)
|
||||
if current and current != target:
|
||||
needs_migration.append(stack)
|
||||
else:
|
||||
simple.append(stack)
|
||||
|
||||
results: list[CommandResult] = []
|
||||
total = len(stacks)
|
||||
|
||||
try:
|
||||
for idx, stack in enumerate(stacks, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{stack}][/]"
|
||||
# Simple stacks: run in parallel (no migration needed)
|
||||
if simple:
|
||||
use_raw = raw and len(simple) == 1
|
||||
simple_results = await asyncio.gather(
|
||||
*[
|
||||
_up_stack_simple(cfg, stack, raw=use_raw, pull=pull, build=build)
|
||||
for stack in simple
|
||||
]
|
||||
)
|
||||
results.extend(simple_results)
|
||||
|
||||
# Multi-host stacks: run in parallel
|
||||
if multi_host:
|
||||
multi_results = await asyncio.gather(
|
||||
*[
|
||||
_up_multi_host_stack(
|
||||
cfg, stack, f"[cyan]\\[{stack}][/]", raw=raw, pull=pull, build=build
|
||||
)
|
||||
for stack in multi_host
|
||||
]
|
||||
)
|
||||
for result_list in multi_results:
|
||||
results.extend(result_list)
|
||||
|
||||
# Migration stacks: run sequentially for clear output and rollback
|
||||
if needs_migration:
|
||||
total = len(needs_migration)
|
||||
for idx, stack in enumerate(needs_migration, 1):
|
||||
prefix = f"[dim][{idx}/{total}][/] [cyan]\\[{stack}][/]"
|
||||
results.append(
|
||||
await _up_single_stack(cfg, stack, prefix, raw=raw, pull=pull, build=build)
|
||||
)
|
||||
|
||||
if cfg.is_multi_host(stack):
|
||||
results.extend(await _up_multi_host_stack(cfg, stack, prefix, raw=raw))
|
||||
else:
|
||||
results.append(await _up_single_stack(cfg, stack, prefix, raw=raw))
|
||||
except OperationInterruptedError:
|
||||
raise KeyboardInterrupt from None
|
||||
|
||||
@@ -359,26 +466,33 @@ async def check_host_compatibility(
|
||||
return results
|
||||
|
||||
|
||||
async def stop_orphaned_stacks(cfg: Config) -> list[CommandResult]:
|
||||
"""Stop orphaned stacks (in state but not in config).
|
||||
async def _stop_stacks_on_hosts(
|
||||
cfg: Config,
|
||||
stacks_to_hosts: dict[str, list[str]],
|
||||
label: str = "",
|
||||
) -> list[CommandResult]:
|
||||
"""Stop stacks on specific hosts.
|
||||
|
||||
Runs docker compose down on each stack on its tracked host(s).
|
||||
Only removes from state on successful stop.
|
||||
Shared helper for stop_orphaned_stacks and stop_stray_stacks.
|
||||
|
||||
Args:
|
||||
cfg: Config object.
|
||||
stacks_to_hosts: Dict mapping stack name to list of hosts to stop on.
|
||||
label: Optional label for success message (e.g., "stray", "orphaned").
|
||||
|
||||
Returns:
|
||||
List of CommandResults for each stack@host.
|
||||
|
||||
Returns list of CommandResults for each stack@host.
|
||||
"""
|
||||
orphaned = get_orphaned_stacks(cfg)
|
||||
if not orphaned:
|
||||
if not stacks_to_hosts:
|
||||
return []
|
||||
|
||||
results: list[CommandResult] = []
|
||||
tasks: list[tuple[str, str, asyncio.Task[CommandResult]]] = []
|
||||
suffix = f" ({label})" if label else ""
|
||||
|
||||
# Build list of (stack, host, task) for all orphaned stacks
|
||||
for stack, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
for host in host_list:
|
||||
# Skip hosts no longer in config
|
||||
for stack, hosts in stacks_to_hosts.items():
|
||||
for host in hosts:
|
||||
if host not in cfg.hosts:
|
||||
print_warning(f"{stack}@{host}: host no longer in config, skipping")
|
||||
results.append(
|
||||
@@ -393,30 +507,48 @@ async def stop_orphaned_stacks(cfg: Config) -> list[CommandResult]:
|
||||
coro = run_compose_on_host(cfg, stack, host, "down")
|
||||
tasks.append((stack, host, asyncio.create_task(coro)))
|
||||
|
||||
# Run all down commands in parallel
|
||||
if tasks:
|
||||
for stack, host, task in tasks:
|
||||
try:
|
||||
result = await task
|
||||
results.append(result)
|
||||
if result.success:
|
||||
print_success(f"{stack}@{host}: stopped")
|
||||
else:
|
||||
print_error(f"{stack}@{host}: {result.stderr or 'failed'}")
|
||||
except Exception as e:
|
||||
print_error(f"{stack}@{host}: {e}")
|
||||
results.append(
|
||||
CommandResult(
|
||||
stack=f"{stack}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr=str(e),
|
||||
)
|
||||
for stack, host, task in tasks:
|
||||
try:
|
||||
result = await task
|
||||
results.append(result)
|
||||
if result.success:
|
||||
print_success(f"{stack}@{host}: stopped{suffix}")
|
||||
else:
|
||||
print_error(f"{stack}@{host}: {result.stderr or 'failed'}")
|
||||
except Exception as e:
|
||||
print_error(f"{stack}@{host}: {e}")
|
||||
results.append(
|
||||
CommandResult(
|
||||
stack=f"{stack}@{host}",
|
||||
exit_code=1,
|
||||
success=False,
|
||||
stderr=str(e),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def stop_orphaned_stacks(cfg: Config) -> list[CommandResult]:
|
||||
"""Stop orphaned stacks (in state but not in config).
|
||||
|
||||
Runs docker compose down on each stack on its tracked host(s).
|
||||
Only removes from state on successful stop.
|
||||
|
||||
Returns list of CommandResults for each stack@host.
|
||||
"""
|
||||
orphaned = get_orphaned_stacks(cfg)
|
||||
if not orphaned:
|
||||
return []
|
||||
|
||||
normalized: dict[str, list[str]] = {
|
||||
stack: (hosts if isinstance(hosts, list) else [hosts]) for stack, hosts in orphaned.items()
|
||||
}
|
||||
|
||||
results = await _stop_stacks_on_hosts(cfg, normalized)
|
||||
|
||||
# Remove from state only for stacks where ALL hosts succeeded
|
||||
for stack, hosts in orphaned.items():
|
||||
host_list = hosts if isinstance(hosts, list) else [hosts]
|
||||
for stack in normalized:
|
||||
all_succeeded = all(
|
||||
r.success for r in results if r.stack.startswith(f"{stack}@") or r.stack == stack
|
||||
)
|
||||
@@ -424,3 +556,77 @@ async def stop_orphaned_stacks(cfg: Config) -> list[CommandResult]:
|
||||
remove_stack(cfg, stack)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def stop_stray_stacks(
|
||||
cfg: Config,
|
||||
strays: dict[str, list[str]],
|
||||
) -> list[CommandResult]:
|
||||
"""Stop stacks running on unauthorized hosts.
|
||||
|
||||
Args:
|
||||
cfg: Config object.
|
||||
strays: Dict mapping stack name to list of stray hosts.
|
||||
|
||||
Returns:
|
||||
List of CommandResults for each stack@host stopped.
|
||||
|
||||
"""
|
||||
return await _stop_stacks_on_hosts(cfg, strays, label="stray")
|
||||
|
||||
|
||||
def build_discovery_results(
|
||||
cfg: Config,
|
||||
running_on_host: dict[str, set[str]],
|
||||
stacks: list[str] | None = None,
|
||||
) -> tuple[dict[str, str | list[str]], dict[str, list[str]], dict[str, list[str]]]:
|
||||
"""Build discovery results from per-host running stacks.
|
||||
|
||||
Takes the raw data of which stacks are running on which hosts and
|
||||
categorizes them into discovered (running correctly), strays (wrong host),
|
||||
and duplicates (single-host stack on multiple hosts).
|
||||
|
||||
Args:
|
||||
cfg: Config object.
|
||||
running_on_host: Dict mapping host -> set of running stack names.
|
||||
stacks: Optional list of stacks to check. Defaults to all configured stacks.
|
||||
|
||||
Returns:
|
||||
Tuple of (discovered, strays, duplicates):
|
||||
- discovered: stack -> host(s) where running correctly
|
||||
- strays: stack -> list of unauthorized hosts
|
||||
- duplicates: stack -> list of all hosts (for single-host stacks on multiple)
|
||||
|
||||
"""
|
||||
stack_list = stacks if stacks is not None else list(cfg.stacks)
|
||||
all_hosts = list(running_on_host.keys())
|
||||
|
||||
# Build StackDiscoveryResult for each stack
|
||||
results: list[StackDiscoveryResult] = [
|
||||
StackDiscoveryResult(
|
||||
stack=stack,
|
||||
configured_hosts=cfg.get_hosts(stack),
|
||||
running_hosts=[h for h in all_hosts if stack in running_on_host[h]],
|
||||
)
|
||||
for stack in stack_list
|
||||
]
|
||||
|
||||
discovered: dict[str, str | list[str]] = {}
|
||||
strays: dict[str, list[str]] = {}
|
||||
duplicates: dict[str, list[str]] = {}
|
||||
|
||||
for result in results:
|
||||
correct_hosts = [h for h in result.running_hosts if h in result.configured_hosts]
|
||||
if correct_hosts:
|
||||
if result.is_multi_host:
|
||||
discovered[result.stack] = correct_hosts
|
||||
else:
|
||||
discovered[result.stack] = correct_hosts[0]
|
||||
|
||||
if result.is_stray:
|
||||
strays[result.stack] = result.stray_hosts
|
||||
|
||||
if result.is_duplicate:
|
||||
duplicates[result.stack] = result.running_hosts
|
||||
|
||||
return discovered, strays, duplicates
|
||||
|
||||
@@ -11,9 +11,19 @@ def xdg_config_home() -> Path:
|
||||
return Path(os.environ.get("XDG_CONFIG_HOME", Path.home() / ".config"))
|
||||
|
||||
|
||||
def config_dir() -> Path:
|
||||
"""Get the compose-farm config directory."""
|
||||
return xdg_config_home() / "compose-farm"
|
||||
|
||||
|
||||
def default_config_path() -> Path:
|
||||
"""Get the default user config path."""
|
||||
return xdg_config_home() / "compose-farm" / "compose-farm.yaml"
|
||||
return config_dir() / "compose-farm.yaml"
|
||||
|
||||
|
||||
def backup_dir() -> Path:
|
||||
"""Get the backup directory for file edits."""
|
||||
return config_dir() / "backups"
|
||||
|
||||
|
||||
def config_search_paths() -> list[Path]:
|
||||
|
||||
220
src/compose_farm/registry.py
Normal file
220
src/compose_farm/registry.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""Container registry API client for tag discovery."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
|
||||
# Image reference pattern: [registry/][namespace/]name[:tag][@digest]
|
||||
IMAGE_PATTERN = re.compile(
|
||||
r"^(?:(?P<registry>[^/]+\.[^/]+)/)?(?:(?P<namespace>[^/:@]+)/)?(?P<name>[^/:@]+)(?::(?P<tag>[^@]+))?(?:@(?P<digest>.+))?$"
|
||||
)
|
||||
|
||||
# Docker Hub aliases
|
||||
DOCKER_HUB_ALIASES = frozenset(
|
||||
{"docker.io", "index.docker.io", "registry.hub.docker.com", "registry-1.docker.io"}
|
||||
)
|
||||
|
||||
# Token endpoints per registry: (url, extra_params)
|
||||
TOKEN_ENDPOINTS: dict[str, tuple[str, dict[str, str]]] = {
|
||||
"docker.io": ("https://auth.docker.io/token", {"service": "registry.docker.io"}),
|
||||
"ghcr.io": ("https://ghcr.io/token", {}),
|
||||
}
|
||||
|
||||
# Registry URL overrides (Docker Hub uses a different host for API)
|
||||
REGISTRY_URLS: dict[str, str] = {
|
||||
"docker.io": "https://registry-1.docker.io",
|
||||
}
|
||||
|
||||
HTTP_OK = 200
|
||||
|
||||
MANIFEST_ACCEPT = (
|
||||
"application/vnd.docker.distribution.manifest.v2+json, "
|
||||
"application/vnd.oci.image.manifest.v1+json, "
|
||||
"application/vnd.oci.image.index.v1+json"
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ImageRef:
|
||||
"""Parsed container image reference."""
|
||||
|
||||
registry: str
|
||||
namespace: str
|
||||
name: str
|
||||
tag: str
|
||||
digest: str | None = None
|
||||
|
||||
@property
|
||||
def full_name(self) -> str:
|
||||
"""Full image name with namespace."""
|
||||
return f"{self.namespace}/{self.name}" if self.namespace else self.name
|
||||
|
||||
@property
|
||||
def display_name(self) -> str:
|
||||
"""Display name (omits docker.io/library for official images)."""
|
||||
if self.registry in DOCKER_HUB_ALIASES:
|
||||
if self.namespace == "library":
|
||||
return self.name
|
||||
return self.full_name
|
||||
return f"{self.registry}/{self.full_name}"
|
||||
|
||||
@classmethod
|
||||
def parse(cls, image: str) -> ImageRef:
|
||||
"""Parse image string into components."""
|
||||
match = IMAGE_PATTERN.match(image)
|
||||
if not match:
|
||||
return cls("docker.io", "library", image.split(":")[0].split("@")[0], "latest")
|
||||
|
||||
groups = match.groupdict()
|
||||
registry = groups.get("registry") or "docker.io"
|
||||
namespace = groups.get("namespace") or ""
|
||||
name = groups.get("name") or image
|
||||
tag = groups.get("tag") or "latest"
|
||||
digest = groups.get("digest")
|
||||
|
||||
# Docker Hub official images have implicit "library" namespace
|
||||
if registry in DOCKER_HUB_ALIASES and not namespace:
|
||||
namespace = "library"
|
||||
|
||||
return cls(registry, namespace, name, tag, digest)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TagCheckResult:
|
||||
"""Result of checking tags for an image."""
|
||||
|
||||
image: ImageRef
|
||||
current_digest: str
|
||||
available_updates: list[str] = field(default_factory=list)
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class RegistryClient:
|
||||
"""Unified OCI Distribution API client."""
|
||||
|
||||
def __init__(self, registry: str) -> None:
|
||||
"""Initialize for a specific registry."""
|
||||
self.registry = registry.lower()
|
||||
# Normalize Docker Hub aliases
|
||||
if self.registry in DOCKER_HUB_ALIASES:
|
||||
self.registry = "docker.io"
|
||||
|
||||
self.registry_url = REGISTRY_URLS.get(self.registry, f"https://{self.registry}")
|
||||
self._token_cache: dict[str, str] = {}
|
||||
|
||||
async def _get_token(self, image: ImageRef, client: httpx.AsyncClient) -> str | None:
|
||||
"""Get auth token for the registry (cached per image)."""
|
||||
cache_key = image.full_name
|
||||
if cache_key in self._token_cache:
|
||||
return self._token_cache[cache_key]
|
||||
|
||||
endpoint = TOKEN_ENDPOINTS.get(self.registry)
|
||||
if not endpoint:
|
||||
return None # No auth needed or unknown registry
|
||||
|
||||
url, extra_params = endpoint
|
||||
params = {"scope": f"repository:{image.full_name}:pull", **extra_params}
|
||||
resp = await client.get(url, params=params)
|
||||
|
||||
if resp.status_code == HTTP_OK:
|
||||
token: str | None = resp.json().get("token")
|
||||
if token:
|
||||
self._token_cache[cache_key] = token
|
||||
return token
|
||||
return None
|
||||
|
||||
async def get_tags(self, image: ImageRef, client: httpx.AsyncClient) -> list[str]:
|
||||
"""Fetch available tags for an image."""
|
||||
headers = {}
|
||||
token = await self._get_token(image, client)
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
url = f"{self.registry_url}/v2/{image.full_name}/tags/list"
|
||||
resp = await client.get(url, headers=headers)
|
||||
|
||||
if resp.status_code != HTTP_OK:
|
||||
return []
|
||||
tags: list[str] = resp.json().get("tags", [])
|
||||
return tags
|
||||
|
||||
async def get_digest(self, image: ImageRef, tag: str, client: httpx.AsyncClient) -> str | None:
|
||||
"""Get digest for a specific tag."""
|
||||
headers = {"Accept": MANIFEST_ACCEPT}
|
||||
token = await self._get_token(image, client)
|
||||
if token:
|
||||
headers["Authorization"] = f"Bearer {token}"
|
||||
|
||||
url = f"{self.registry_url}/v2/{image.full_name}/manifests/{tag}"
|
||||
resp = await client.head(url, headers=headers)
|
||||
|
||||
if resp.status_code == HTTP_OK:
|
||||
digest: str | None = resp.headers.get("docker-content-digest")
|
||||
return digest
|
||||
return None
|
||||
|
||||
|
||||
def _parse_version(tag: str) -> tuple[int, ...] | None:
|
||||
"""Parse version string into comparable tuple."""
|
||||
tag = tag.lstrip("vV")
|
||||
parts = tag.split(".")
|
||||
try:
|
||||
return tuple(int(p) for p in parts)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _find_updates(current_tag: str, tags: list[str]) -> list[str]:
|
||||
"""Find tags newer than current based on version comparison."""
|
||||
current_version = _parse_version(current_tag)
|
||||
if current_version is None:
|
||||
return []
|
||||
|
||||
updates = []
|
||||
for tag in tags:
|
||||
tag_version = _parse_version(tag)
|
||||
if tag_version and tag_version > current_version:
|
||||
updates.append(tag)
|
||||
|
||||
updates.sort(key=lambda t: _parse_version(t) or (), reverse=True)
|
||||
return updates
|
||||
|
||||
|
||||
async def check_image_updates(
|
||||
image_str: str,
|
||||
client: httpx.AsyncClient,
|
||||
) -> TagCheckResult:
|
||||
"""Check if newer versions are available for an image.
|
||||
|
||||
Args:
|
||||
image_str: Image string like "nginx:1.25" or "ghcr.io/user/repo:tag"
|
||||
client: httpx async client
|
||||
|
||||
Returns:
|
||||
TagCheckResult with available updates
|
||||
|
||||
"""
|
||||
image = ImageRef.parse(image_str)
|
||||
registry_client = RegistryClient(image.registry)
|
||||
|
||||
try:
|
||||
tags = await registry_client.get_tags(image, client)
|
||||
updates = _find_updates(image.tag, tags)
|
||||
current_digest = await registry_client.get_digest(image, image.tag, client) or ""
|
||||
|
||||
return TagCheckResult(
|
||||
image=image,
|
||||
current_digest=current_digest,
|
||||
available_updates=updates,
|
||||
)
|
||||
except Exception as e:
|
||||
return TagCheckResult(
|
||||
image=image,
|
||||
current_digest="",
|
||||
error=str(e),
|
||||
)
|
||||
@@ -64,8 +64,11 @@ def load_state(config: Config) -> dict[str, str | list[str]]:
|
||||
|
||||
|
||||
def _sorted_dict(d: dict[str, str | list[str]]) -> dict[str, str | list[str]]:
|
||||
"""Return a dictionary sorted by keys."""
|
||||
return dict(sorted(d.items(), key=lambda item: item[0]))
|
||||
"""Return a dictionary sorted by keys, with list values also sorted."""
|
||||
return {
|
||||
k: sorted(v) if isinstance(v, list) else v
|
||||
for k, v in sorted(d.items(), key=lambda item: item[0])
|
||||
}
|
||||
|
||||
|
||||
def save_state(config: Config, deployed: dict[str, str | list[str]]) -> None:
|
||||
@@ -109,10 +112,46 @@ def set_multi_host_stack(config: Config, stack: str, hosts: list[str]) -> None:
|
||||
state[stack] = hosts
|
||||
|
||||
|
||||
def remove_stack(config: Config, stack: str) -> None:
|
||||
"""Remove a stack from the state (after down)."""
|
||||
def remove_stack(config: Config, stack: str, host: str | None = None) -> None:
|
||||
"""Remove a stack from the state (after down).
|
||||
|
||||
If host is provided, only removes that host from a multi-host stack.
|
||||
If the list becomes empty, removes the stack entirely.
|
||||
For single-host stacks with host specified, removes only if host matches.
|
||||
"""
|
||||
with _modify_state(config) as state:
|
||||
state.pop(stack, None)
|
||||
if stack not in state:
|
||||
return
|
||||
if host is None:
|
||||
state.pop(stack, None)
|
||||
return
|
||||
current = state[stack]
|
||||
if isinstance(current, list):
|
||||
new_hosts = [h for h in current if h != host]
|
||||
if new_hosts:
|
||||
state[stack] = new_hosts
|
||||
else:
|
||||
del state[stack]
|
||||
elif current == host:
|
||||
del state[stack]
|
||||
|
||||
|
||||
def add_stack_host(config: Config, stack: str, host: str) -> None:
|
||||
"""Add a single host to a stack's state.
|
||||
|
||||
For multi-host stacks, adds the host to the list if not present.
|
||||
For single-host stacks or new entries, sets the host directly.
|
||||
"""
|
||||
with _modify_state(config) as state:
|
||||
current = state.get(stack)
|
||||
if current is None:
|
||||
state[stack] = host
|
||||
elif isinstance(current, list):
|
||||
if host not in current:
|
||||
state[stack] = [*current, host]
|
||||
elif current != host:
|
||||
# Convert single host to list
|
||||
state[stack] = [current, host]
|
||||
|
||||
|
||||
def get_stacks_needing_migration(config: Config) -> list[str]:
|
||||
|
||||
@@ -8,6 +8,7 @@ use host-published ports for cross-host reachability.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -383,3 +384,53 @@ def render_traefik_config(dynamic: dict[str, Any]) -> str:
|
||||
"""Render Traefik dynamic config as YAML with a header comment."""
|
||||
body = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
return _TRAEFIK_CONFIG_HEADER + body
|
||||
|
||||
|
||||
_HOST_RULE_PATTERN = re.compile(r"Host\(`([^`]+)`\)")
|
||||
|
||||
|
||||
def extract_website_urls(config: Config, stack: str) -> list[str]:
|
||||
"""Extract website URLs from Traefik labels in a stack's compose file.
|
||||
|
||||
Reuses generate_traefik_config to parse labels, then extracts Host() rules
|
||||
from router configurations.
|
||||
|
||||
Returns a list of unique URLs, preferring HTTPS over HTTP.
|
||||
"""
|
||||
try:
|
||||
dynamic, _ = generate_traefik_config(config, [stack], check_all=True)
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
routers = dynamic.get("http", {}).get("routers", {})
|
||||
if not routers:
|
||||
return []
|
||||
|
||||
# Track URLs with their scheme preference (https > http)
|
||||
urls: dict[str, str] = {} # host -> scheme
|
||||
|
||||
for router_info in routers.values():
|
||||
if not isinstance(router_info, dict):
|
||||
continue
|
||||
|
||||
rule = router_info.get("rule", "")
|
||||
entrypoints = router_info.get("entrypoints", [])
|
||||
|
||||
# entrypoints can be a list or string
|
||||
if isinstance(entrypoints, list):
|
||||
entrypoints_str = ",".join(entrypoints)
|
||||
else:
|
||||
entrypoints_str = str(entrypoints)
|
||||
|
||||
# Determine scheme from entrypoint
|
||||
scheme = "https" if "websecure" in entrypoints_str else "http"
|
||||
|
||||
# Extract host(s) from rule
|
||||
for match in _HOST_RULE_PATTERN.finditer(str(rule)):
|
||||
host = match.group(1)
|
||||
# Prefer https over http
|
||||
if host not in urls or scheme == "https":
|
||||
urls[host] = scheme
|
||||
|
||||
# Build URL list, sorted for consistency
|
||||
return sorted(f"{scheme}://{host}" for host, scheme in urls.items())
|
||||
|
||||
@@ -3,17 +3,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import logging
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import ValidationError
|
||||
from rich.logging import RichHandler
|
||||
|
||||
from compose_farm.web.deps import STATIC_DIR, get_config
|
||||
from compose_farm.web.routes import actions, api, pages
|
||||
from compose_farm.web.routes import actions, api, containers, pages
|
||||
from compose_farm.web.streaming import TASK_TTL_SECONDS, cleanup_stale_tasks
|
||||
from compose_farm.web.ws import router as ws_router
|
||||
|
||||
# Configure logging with Rich handler for compose_farm.web modules
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(message)s",
|
||||
datefmt="[%X]",
|
||||
handlers=[RichHandler(rich_tracebacks=True, show_path=False)],
|
||||
)
|
||||
# Set our web modules to INFO level (uvicorn handles its own logging)
|
||||
logging.getLogger("compose_farm.web").setLevel(logging.INFO)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator
|
||||
@@ -52,17 +65,17 @@ def create_app() -> FastAPI:
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# Enable Gzip compression for faster transfers over slow networks
|
||||
app.add_middleware(cast("Any", GZipMiddleware), minimum_size=1000)
|
||||
|
||||
# Mount static files
|
||||
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
|
||||
|
||||
app.include_router(pages.router)
|
||||
app.include_router(containers.router)
|
||||
app.include_router(api.router, prefix="/api")
|
||||
app.include_router(actions.router, prefix="/api")
|
||||
|
||||
# WebSocket routes use Unix-only modules (fcntl, pty)
|
||||
if sys.platform != "win32":
|
||||
from compose_farm.web.ws import router as ws_router # noqa: PLC0415
|
||||
|
||||
app.include_router(ws_router)
|
||||
app.include_router(ws_router)
|
||||
|
||||
return app
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user