mirror of
https://github.com/basnijholt/compose-farm.git
synced 2026-02-07 16:02:10 +00:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27f17a2451 | ||
|
|
98c2492d21 | ||
|
|
04339cbb9a | ||
|
|
cdb3b1d257 | ||
|
|
0913769729 | ||
|
|
3a1d5b77b5 | ||
|
|
e12002ce86 | ||
|
|
676a6fe72d | ||
|
|
f29f8938fe | ||
|
|
4c0e147786 | ||
|
|
cba61118de | ||
|
|
32dc6b3665 | ||
|
|
7d98e664e9 | ||
|
|
6763403700 | ||
|
|
feb0e13bfd | ||
|
|
b86f6d190f |
37
README.md
37
README.md
@@ -117,33 +117,38 @@ Compose files are expected at `{compose_dir}/{service}/compose.yaml` (also suppo
|
||||
|
||||
## Usage
|
||||
|
||||
The CLI is available as both `compose-farm` and the shorter `cf` alias.
|
||||
|
||||
```bash
|
||||
# Start services (auto-migrates if host changed in config)
|
||||
compose-farm up plex jellyfin
|
||||
compose-farm up --all
|
||||
cf up plex jellyfin
|
||||
cf up --all
|
||||
|
||||
# Stop services
|
||||
compose-farm down plex
|
||||
cf down plex
|
||||
|
||||
# Pull latest images
|
||||
compose-farm pull --all
|
||||
cf pull --all
|
||||
|
||||
# Restart (down + up)
|
||||
compose-farm restart plex
|
||||
cf restart plex
|
||||
|
||||
# Update (pull + down + up) - the end-to-end update command
|
||||
compose-farm update --all
|
||||
cf update --all
|
||||
|
||||
# Sync state with reality (discovers running services + captures image digests)
|
||||
compose-farm sync # updates state.yaml and dockerfarm-log.toml
|
||||
compose-farm sync --dry-run # preview without writing
|
||||
cf sync # updates state.yaml and dockerfarm-log.toml
|
||||
cf sync --dry-run # preview without writing
|
||||
|
||||
# Check config vs disk (find missing services, validate traefik labels)
|
||||
cf check
|
||||
|
||||
# View logs
|
||||
compose-farm logs plex
|
||||
compose-farm logs -f plex # follow
|
||||
cf logs plex
|
||||
cf logs -f plex # follow
|
||||
|
||||
# Show status
|
||||
compose-farm ps
|
||||
cf ps
|
||||
```
|
||||
|
||||
### Auto-Migration
|
||||
@@ -158,7 +163,7 @@ When you change a service's host assignment in config and run `up`, Compose Farm
|
||||
services:
|
||||
plex: nas01
|
||||
|
||||
# After: change to nas02, then run `compose-farm up plex`
|
||||
# After: change to nas02, then run `cf up plex`
|
||||
services:
|
||||
plex: nas02 # Compose Farm will migrate automatically
|
||||
```
|
||||
@@ -211,7 +216,7 @@ providers:
|
||||
**Generate the fragment**
|
||||
|
||||
```bash
|
||||
compose-farm traefik-file --all --output /mnt/data/traefik/dynamic.d/compose-farm.yml
|
||||
cf traefik-file --all --output /mnt/data/traefik/dynamic.d/compose-farm.yml
|
||||
```
|
||||
|
||||
Re‑run this after changing Traefik labels, moving a service to another host, or changing
|
||||
@@ -238,7 +243,7 @@ services:
|
||||
The `traefik_service` option specifies which service runs Traefik. Services on the same host
|
||||
are skipped in the file-provider config since Traefik's docker provider handles them directly.
|
||||
|
||||
Now `compose-farm up plex` will update the Traefik config automatically—no separate
|
||||
Now `cf up plex` will update the Traefik config automatically—no separate
|
||||
`traefik-file` command needed.
|
||||
|
||||
**Combining with existing config**
|
||||
@@ -249,7 +254,7 @@ directory and Traefik will merge all files:
|
||||
```bash
|
||||
mkdir -p /opt/traefik/dynamic.d
|
||||
mv /opt/traefik/dynamic.yml /opt/traefik/dynamic.d/manual.yml
|
||||
compose-farm traefik-file --all -o /opt/traefik/dynamic.d/compose-farm.yml
|
||||
cf traefik-file --all -o /opt/traefik/dynamic.d/compose-farm.yml
|
||||
```
|
||||
|
||||
Update your Traefik config to use directory watching instead of a single file:
|
||||
@@ -272,7 +277,7 @@ Update your Traefik config to use directory watching instead of a single file:
|
||||
|
||||
## How It Works
|
||||
|
||||
1. You run `compose-farm up plex`
|
||||
1. You run `cf up plex`
|
||||
2. Compose Farm looks up which host runs `plex` (e.g., `nas01`)
|
||||
3. It SSHs to `nas01` (or runs locally if `localhost`)
|
||||
4. It executes `docker compose -f /opt/compose/plex/docker-compose.yml up -d`
|
||||
|
||||
@@ -12,10 +12,12 @@ dependencies = [
|
||||
"pydantic>=2.0.0",
|
||||
"asyncssh>=2.14.0",
|
||||
"pyyaml>=6.0",
|
||||
"rich>=13.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
compose-farm = "compose_farm.cli:app"
|
||||
cf = "compose_farm.cli:app"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling", "hatch-vcs"]
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Annotated, TypeVar
|
||||
|
||||
import typer
|
||||
import yaml
|
||||
from rich.console import Console
|
||||
|
||||
from . import __version__
|
||||
from .config import Config, load_config
|
||||
@@ -28,6 +29,18 @@ if TYPE_CHECKING:
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
console = Console(highlight=False)
|
||||
err_console = Console(stderr=True, highlight=False)
|
||||
|
||||
|
||||
def _load_config_or_exit(config_path: Path | None) -> Config:
|
||||
"""Load config or exit with a friendly error message."""
|
||||
try:
|
||||
return load_config(config_path)
|
||||
except FileNotFoundError as e:
|
||||
err_console.print(f"[red]✗[/] {e}")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
def _maybe_regenerate_traefik(cfg: Config) -> None:
|
||||
"""Regenerate traefik config if traefik_file is configured."""
|
||||
@@ -38,11 +51,12 @@ def _maybe_regenerate_traefik(cfg: Config) -> None:
|
||||
dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys()))
|
||||
cfg.traefik_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
cfg.traefik_file.write_text(yaml.safe_dump(dynamic, sort_keys=False))
|
||||
typer.echo(f"Traefik config updated: {cfg.traefik_file}")
|
||||
console.print() # Ensure we're on a new line after streaming output
|
||||
console.print(f"[green]✓[/] Traefik config updated: {cfg.traefik_file}")
|
||||
for warning in warnings:
|
||||
typer.echo(warning, err=True)
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
typer.echo(f"Warning: Failed to update traefik config: {exc}", err=True)
|
||||
err_console.print(f"[yellow]![/] Failed to update traefik config: {exc}")
|
||||
|
||||
|
||||
def _version_callback(value: bool) -> None:
|
||||
@@ -56,6 +70,7 @@ app = typer.Typer(
|
||||
name="compose-farm",
|
||||
help="Compose Farm - run docker compose commands across multiple hosts",
|
||||
no_args_is_help=True,
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
)
|
||||
|
||||
|
||||
@@ -81,12 +96,12 @@ def _get_services(
|
||||
config_path: Path | None,
|
||||
) -> tuple[list[str], Config]:
|
||||
"""Resolve service list and load config."""
|
||||
config = load_config(config_path)
|
||||
config = _load_config_or_exit(config_path)
|
||||
|
||||
if all_services:
|
||||
return list(config.services.keys()), config
|
||||
if not services:
|
||||
typer.echo("Error: Specify services or use --all", err=True)
|
||||
err_console.print("[red]✗[/] Specify services or use --all")
|
||||
raise typer.Exit(1)
|
||||
return list(services), config
|
||||
|
||||
@@ -101,7 +116,9 @@ def _report_results(results: list[CommandResult]) -> None:
|
||||
failed = [r for r in results if not r.success]
|
||||
if failed:
|
||||
for r in failed:
|
||||
typer.echo(f"[{r.service}] Failed with exit code {r.exit_code}", err=True)
|
||||
err_console.print(
|
||||
f"[cyan]\\[{r.service}][/] [red]Failed with exit code {r.exit_code}[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@@ -137,15 +154,18 @@ async def _up_with_migration(
|
||||
# If service is deployed elsewhere, migrate it
|
||||
if current_host and current_host != target_host:
|
||||
if current_host in cfg.hosts:
|
||||
typer.echo(f"[{service}] Migrating from {current_host} to {target_host}...")
|
||||
console.print(
|
||||
f"[cyan]\\[{service}][/] Migrating from "
|
||||
f"[magenta]{current_host}[/] → [magenta]{target_host}[/]..."
|
||||
)
|
||||
down_result = await run_compose_on_host(cfg, service, current_host, "down")
|
||||
if not down_result.success:
|
||||
results.append(down_result)
|
||||
continue
|
||||
else:
|
||||
typer.echo(
|
||||
f"[{service}] Warning: was on {current_host} (not in config), skipping down",
|
||||
err=True,
|
||||
err_console.print(
|
||||
f"[cyan]\\[{service}][/] [yellow]![/] was on "
|
||||
f"[magenta]{current_host}[/] (not in config), skipping down"
|
||||
)
|
||||
|
||||
# Start on target host
|
||||
@@ -159,7 +179,7 @@ async def _up_with_migration(
|
||||
return results
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def up(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -172,7 +192,7 @@ def up(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def down(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -191,7 +211,7 @@ def down(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def pull(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -203,7 +223,7 @@ def pull(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def restart(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -216,7 +236,7 @@ def restart(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Lifecycle")
|
||||
def update(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -229,7 +249,7 @@ def update(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def logs(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -246,17 +266,17 @@ def logs(
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Monitoring")
|
||||
def ps(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Show status of all services."""
|
||||
cfg = load_config(config)
|
||||
cfg = _load_config_or_exit(config)
|
||||
results = _run_async(run_on_services(cfg, list(cfg.services.keys()), "ps"))
|
||||
_report_results(results)
|
||||
|
||||
|
||||
@app.command("traefik-file")
|
||||
@app.command("traefik-file", rich_help_panel="Configuration")
|
||||
def traefik_file(
|
||||
services: ServicesArg = None,
|
||||
all_services: AllOption = False,
|
||||
@@ -275,7 +295,7 @@ def traefik_file(
|
||||
try:
|
||||
dynamic, warnings = generate_traefik_config(cfg, svc_list)
|
||||
except (FileNotFoundError, ValueError) as exc:
|
||||
typer.echo(str(exc), err=True)
|
||||
err_console.print(f"[red]✗[/] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
rendered = yaml.safe_dump(dynamic, sort_keys=False)
|
||||
@@ -283,12 +303,12 @@ def traefik_file(
|
||||
if output:
|
||||
output.parent.mkdir(parents=True, exist_ok=True)
|
||||
output.write_text(rendered)
|
||||
typer.echo(f"Traefik config written to {output}")
|
||||
console.print(f"[green]✓[/] Traefik config written to {output}")
|
||||
else:
|
||||
typer.echo(rendered)
|
||||
console.print(rendered)
|
||||
|
||||
for warning in warnings:
|
||||
typer.echo(warning, err=True)
|
||||
err_console.print(f"[yellow]![/] {warning}")
|
||||
|
||||
|
||||
async def _discover_running_services(cfg: Config) -> dict[str, str]:
|
||||
@@ -324,22 +344,27 @@ def _report_sync_changes(
|
||||
) -> None:
|
||||
"""Report sync changes to the user."""
|
||||
if added:
|
||||
typer.echo(f"\nNew services found ({len(added)}):")
|
||||
console.print(f"\nNew services found ({len(added)}):")
|
||||
for service in sorted(added):
|
||||
typer.echo(f" + {service} on {discovered[service]}")
|
||||
console.print(f" [green]+[/] [cyan]{service}[/] on [magenta]{discovered[service]}[/]")
|
||||
|
||||
if changed:
|
||||
typer.echo(f"\nServices on different hosts ({len(changed)}):")
|
||||
console.print(f"\nServices on different hosts ({len(changed)}):")
|
||||
for service, old_host, new_host in sorted(changed):
|
||||
typer.echo(f" ~ {service}: {old_host} -> {new_host}")
|
||||
console.print(
|
||||
f" [yellow]~[/] [cyan]{service}[/]: "
|
||||
f"[magenta]{old_host}[/] → [magenta]{new_host}[/]"
|
||||
)
|
||||
|
||||
if removed:
|
||||
typer.echo(f"\nServices no longer running ({len(removed)}):")
|
||||
console.print(f"\nServices no longer running ({len(removed)}):")
|
||||
for service in sorted(removed):
|
||||
typer.echo(f" - {service} (was on {current_state[service]})")
|
||||
console.print(
|
||||
f" [red]-[/] [cyan]{service}[/] (was on [magenta]{current_state[service]}[/])"
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def sync(
|
||||
config: ConfigOption = None,
|
||||
log_path: LogPathOption = None,
|
||||
@@ -354,10 +379,10 @@ def sync(
|
||||
file, and captures image digests. Combines service discovery with
|
||||
image snapshot into a single command.
|
||||
"""
|
||||
cfg = load_config(config)
|
||||
cfg = _load_config_or_exit(config)
|
||||
current_state = load_state(cfg)
|
||||
|
||||
typer.echo("Discovering running services...")
|
||||
console.print("Discovering running services...")
|
||||
discovered = _run_async(_discover_running_services(cfg))
|
||||
|
||||
# Calculate changes
|
||||
@@ -374,25 +399,69 @@ def sync(
|
||||
if state_changed:
|
||||
_report_sync_changes(added, removed, changed, discovered, current_state)
|
||||
else:
|
||||
typer.echo("State is already in sync.")
|
||||
console.print("[green]✓[/] State is already in sync.")
|
||||
|
||||
if dry_run:
|
||||
typer.echo("\n(dry-run: no changes made)")
|
||||
console.print("\n[dim](dry-run: no changes made)[/]")
|
||||
return
|
||||
|
||||
# Update state file
|
||||
if state_changed:
|
||||
save_state(cfg, discovered)
|
||||
typer.echo(f"\nState updated: {len(discovered)} services tracked.")
|
||||
console.print(f"\n[green]✓[/] State updated: {len(discovered)} services tracked.")
|
||||
|
||||
# Capture image digests for running services
|
||||
if discovered:
|
||||
typer.echo("\nCapturing image digests...")
|
||||
console.print("\nCapturing image digests...")
|
||||
try:
|
||||
path = _run_async(snapshot_services(cfg, list(discovered.keys()), log_path=log_path))
|
||||
typer.echo(f"Digests written to {path}")
|
||||
console.print(f"[green]✓[/] Digests written to {path}")
|
||||
except RuntimeError as exc:
|
||||
typer.echo(f"Warning: {exc}", err=True)
|
||||
err_console.print(f"[yellow]![/] {exc}")
|
||||
|
||||
|
||||
@app.command(rich_help_panel="Configuration")
|
||||
def check(
|
||||
config: ConfigOption = None,
|
||||
) -> None:
|
||||
"""Check for compose directories not in config (and vice versa)."""
|
||||
cfg = _load_config_or_exit(config)
|
||||
configured = set(cfg.services.keys())
|
||||
on_disk = cfg.discover_compose_dirs()
|
||||
|
||||
missing_from_config = sorted(on_disk - configured)
|
||||
missing_from_disk = sorted(configured - on_disk)
|
||||
|
||||
if missing_from_config:
|
||||
console.print(f"\n[yellow]Not in config[/] ({len(missing_from_config)}):")
|
||||
for name in missing_from_config:
|
||||
console.print(f" [yellow]+[/] [cyan]{name}[/]")
|
||||
|
||||
if missing_from_disk:
|
||||
console.print(f"\n[red]No compose file found[/] ({len(missing_from_disk)}):")
|
||||
for name in missing_from_disk:
|
||||
console.print(f" [red]-[/] [cyan]{name}[/]")
|
||||
|
||||
if not missing_from_config and not missing_from_disk:
|
||||
console.print("[green]✓[/] All compose directories are in config.")
|
||||
elif missing_from_config:
|
||||
console.print(f"\n[dim]To add missing services, append to {cfg.config_path}:[/]")
|
||||
for name in missing_from_config:
|
||||
console.print(f"[dim] {name}: docker-debian[/]")
|
||||
|
||||
# Check traefik labels have matching ports
|
||||
try:
|
||||
_, traefik_warnings = generate_traefik_config(
|
||||
cfg, list(cfg.services.keys()), check_all=True
|
||||
)
|
||||
if traefik_warnings:
|
||||
console.print(f"\n[yellow]Traefik issues[/] ({len(traefik_warnings)}):")
|
||||
for warning in traefik_warnings:
|
||||
console.print(f" [yellow]![/] {warning}")
|
||||
elif not missing_from_config and not missing_from_disk:
|
||||
console.print("[green]✓[/] All traefik services have published ports.")
|
||||
except (FileNotFoundError, ValueError):
|
||||
pass # Skip traefik check if config can't be loaded
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -65,6 +65,25 @@ class Config(BaseModel):
|
||||
# Default to compose.yaml if none exist (will error later)
|
||||
return service_dir / "compose.yaml"
|
||||
|
||||
def discover_compose_dirs(self) -> set[str]:
|
||||
"""Find all directories in compose_dir that contain a compose file."""
|
||||
compose_filenames = {
|
||||
"compose.yaml",
|
||||
"compose.yml",
|
||||
"docker-compose.yml",
|
||||
"docker-compose.yaml",
|
||||
}
|
||||
found: set[str] = set()
|
||||
if not self.compose_dir.exists():
|
||||
return found
|
||||
for subdir in self.compose_dir.iterdir():
|
||||
if subdir.is_dir():
|
||||
for filename in compose_filenames:
|
||||
if (subdir / filename).exists():
|
||||
found.add(subdir.name)
|
||||
break
|
||||
return found
|
||||
|
||||
|
||||
def _parse_hosts(raw_hosts: dict[str, str | dict[str, str | int]]) -> dict[str, Host]:
|
||||
"""Parse hosts from config, handling both simple and full forms."""
|
||||
|
||||
@@ -3,18 +3,42 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import socket
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import asyncssh
|
||||
from rich.console import Console
|
||||
from rich.markup import escape
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .config import Config, Host
|
||||
|
||||
_console = Console(highlight=False)
|
||||
_err_console = Console(stderr=True, highlight=False)
|
||||
|
||||
LOCAL_ADDRESSES = frozenset({"local", "localhost", "127.0.0.1", "::1"})
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _get_local_ips() -> frozenset[str]:
|
||||
"""Get all IP addresses of the current machine."""
|
||||
ips: set[str] = set()
|
||||
try:
|
||||
hostname = socket.gethostname()
|
||||
# Get all addresses for hostname
|
||||
for info in socket.getaddrinfo(hostname, None):
|
||||
ips.add(info[4][0])
|
||||
# Also try getting the default outbound IP
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
||||
s.connect(("8.8.8.8", 80))
|
||||
ips.add(s.getsockname()[0])
|
||||
except OSError:
|
||||
pass
|
||||
return frozenset(ips)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommandResult:
|
||||
"""Result of a command execution."""
|
||||
@@ -28,7 +52,11 @@ class CommandResult:
|
||||
|
||||
def _is_local(host: Host) -> bool:
|
||||
"""Check if host should run locally (no SSH)."""
|
||||
return host.address.lower() in LOCAL_ADDRESSES
|
||||
addr = host.address.lower()
|
||||
if addr in LOCAL_ADDRESSES:
|
||||
return True
|
||||
# Check if address matches any of this machine's IPs
|
||||
return addr in _get_local_ips()
|
||||
|
||||
|
||||
async def _run_local_command(
|
||||
@@ -53,12 +81,14 @@ async def _run_local_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
output = sys.stderr if is_stderr else sys.stdout
|
||||
console = _err_console if is_stderr else _console
|
||||
while True:
|
||||
line = await reader.readline()
|
||||
if not line:
|
||||
break
|
||||
print(f"[{prefix}] {line.decode()}", end="", file=output, flush=True)
|
||||
text = line.decode()
|
||||
if text.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(text)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -80,7 +110,7 @@ async def _run_local_command(
|
||||
stderr=stderr_data.decode() if stderr_data else "",
|
||||
)
|
||||
except OSError as e:
|
||||
print(f"[{service}] Local error: {e}", file=sys.stderr)
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]Local error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
@@ -111,9 +141,10 @@ async def _run_ssh_command(
|
||||
*,
|
||||
is_stderr: bool = False,
|
||||
) -> None:
|
||||
output = sys.stderr if is_stderr else sys.stdout
|
||||
console = _err_console if is_stderr else _console
|
||||
async for line in reader:
|
||||
print(f"[{prefix}] {line}", end="", file=output, flush=True)
|
||||
if line.strip(): # Skip empty lines
|
||||
console.print(f"[cyan]\\[{prefix}][/] {escape(line)}", end="")
|
||||
|
||||
await asyncio.gather(
|
||||
read_stream(proc.stdout, service),
|
||||
@@ -135,7 +166,7 @@ async def _run_ssh_command(
|
||||
stderr=stderr_data,
|
||||
)
|
||||
except (OSError, asyncssh.Error) as e:
|
||||
print(f"[{service}] SSH error: {e}", file=sys.stderr)
|
||||
_err_console.print(f"[cyan]\\[{service}][/] [red]SSH error:[/] {e}")
|
||||
return CommandResult(service=service, exit_code=1, success=False)
|
||||
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@ class PortMapping:
|
||||
|
||||
target: int
|
||||
published: int | None
|
||||
protocol: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -121,7 +120,7 @@ def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: P
|
||||
for item in items:
|
||||
if isinstance(item, str):
|
||||
interpolated = _interpolate(item, env)
|
||||
port_spec, _, protocol = interpolated.partition("/")
|
||||
port_spec, _, _ = interpolated.partition("/")
|
||||
parts = port_spec.split(":")
|
||||
published: int | None = None
|
||||
target: int | None = None
|
||||
@@ -136,9 +135,7 @@ def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: P
|
||||
target = int(parts[-1])
|
||||
|
||||
if target is not None:
|
||||
mappings.append(
|
||||
PortMapping(target=target, published=published, protocol=protocol or None)
|
||||
)
|
||||
mappings.append(PortMapping(target=target, published=published))
|
||||
elif isinstance(item, dict):
|
||||
target_raw = item.get("target")
|
||||
if isinstance(target_raw, str):
|
||||
@@ -158,14 +155,7 @@ def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: P
|
||||
published_val = int(str(published_raw)) if published_raw is not None else None
|
||||
except (TypeError, ValueError):
|
||||
published_val = None
|
||||
protocol_val = item.get("protocol")
|
||||
mappings.append(
|
||||
PortMapping(
|
||||
target=target_val,
|
||||
published=published_val,
|
||||
protocol=str(protocol_val) if protocol_val else None,
|
||||
)
|
||||
)
|
||||
mappings.append(PortMapping(target=target_val, published=published_val))
|
||||
|
||||
return mappings
|
||||
|
||||
@@ -400,10 +390,28 @@ def _process_service_label(
|
||||
source.scheme = str(_parse_value(key_without_prefix, label_value))
|
||||
|
||||
|
||||
def _get_ports_for_service(
|
||||
definition: dict[str, Any],
|
||||
all_services: dict[str, Any],
|
||||
env: dict[str, str],
|
||||
) -> list[PortMapping]:
|
||||
"""Get ports for a service, following network_mode: service:X if present."""
|
||||
network_mode = definition.get("network_mode", "")
|
||||
if isinstance(network_mode, str) and network_mode.startswith("service:"):
|
||||
# Service uses another service's network - get ports from that service
|
||||
ref_service = network_mode[len("service:") :]
|
||||
if ref_service in all_services:
|
||||
ref_def = all_services[ref_service]
|
||||
if isinstance(ref_def, dict):
|
||||
return _parse_ports(ref_def.get("ports"), env)
|
||||
return _parse_ports(definition.get("ports"), env)
|
||||
|
||||
|
||||
def _process_service_labels(
|
||||
stack: str,
|
||||
compose_service: str,
|
||||
definition: dict[str, Any],
|
||||
all_services: dict[str, Any],
|
||||
host_address: str,
|
||||
env: dict[str, str],
|
||||
dynamic: dict[str, Any],
|
||||
@@ -417,7 +425,7 @@ def _process_service_labels(
|
||||
if enable_raw is not None and _parse_value("enable", enable_raw) is False:
|
||||
return
|
||||
|
||||
ports = _parse_ports(definition.get("ports"), env)
|
||||
ports = _get_ports_for_service(definition, all_services, env)
|
||||
routers: dict[str, bool] = {}
|
||||
service_names: set[str] = set()
|
||||
|
||||
@@ -452,10 +460,19 @@ def _process_service_labels(
|
||||
def generate_traefik_config(
|
||||
config: Config,
|
||||
services: list[str],
|
||||
*,
|
||||
check_all: bool = False,
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
"""Generate Traefik dynamic config from compose labels.
|
||||
|
||||
Args:
|
||||
config: The compose-farm config.
|
||||
services: List of service names to process.
|
||||
check_all: If True, check all services for warnings (ignore host filtering).
|
||||
Used by the check command to validate all traefik labels.
|
||||
|
||||
Returns (config_dict, warnings).
|
||||
|
||||
"""
|
||||
dynamic: dict[str, Any] = {}
|
||||
warnings: list[str] = []
|
||||
@@ -463,7 +480,7 @@ def generate_traefik_config(
|
||||
|
||||
# Determine Traefik's host from service assignment
|
||||
traefik_host = None
|
||||
if config.traefik_service:
|
||||
if config.traefik_service and not check_all:
|
||||
traefik_host = config.services.get(config.traefik_service)
|
||||
|
||||
for stack in services:
|
||||
@@ -471,10 +488,12 @@ def generate_traefik_config(
|
||||
stack_host = config.services.get(stack)
|
||||
|
||||
# Skip services on Traefik's host - docker provider handles them directly
|
||||
if host_address.lower() in LOCAL_ADDRESSES:
|
||||
continue
|
||||
if traefik_host and stack_host == traefik_host:
|
||||
continue
|
||||
# (unless check_all is True, for validation purposes)
|
||||
if not check_all:
|
||||
if host_address.lower() in LOCAL_ADDRESSES:
|
||||
continue
|
||||
if traefik_host and stack_host == traefik_host:
|
||||
continue
|
||||
|
||||
for compose_service, definition in raw_services.items():
|
||||
if not isinstance(definition, dict):
|
||||
@@ -483,6 +502,7 @@ def generate_traefik_config(
|
||||
stack,
|
||||
compose_service,
|
||||
definition,
|
||||
raw_services,
|
||||
host_address,
|
||||
env,
|
||||
dynamic,
|
||||
|
||||
@@ -171,4 +171,4 @@ class TestReportSyncChanges:
|
||||
)
|
||||
captured = capsys.readouterr()
|
||||
assert "Services on different hosts (1)" in captured.out
|
||||
assert "~ plex: nas01 -> nas02" in captured.out
|
||||
assert "~ plex: nas01 → nas02" in captured.out
|
||||
|
||||
@@ -193,3 +193,51 @@ def test_generate_skips_services_with_enable_false(tmp_path: Path) -> None:
|
||||
|
||||
assert dynamic == {}
|
||||
assert warnings == []
|
||||
|
||||
|
||||
def test_generate_follows_network_mode_service_for_ports(tmp_path: Path) -> None:
|
||||
"""Services using network_mode: service:X should use ports from service X."""
|
||||
cfg = Config(
|
||||
compose_dir=tmp_path,
|
||||
hosts={"nas01": Host(address="192.168.1.10")},
|
||||
services={"vpn-stack": "nas01"},
|
||||
)
|
||||
compose_path = tmp_path / "vpn-stack" / "docker-compose.yml"
|
||||
_write_compose(
|
||||
compose_path,
|
||||
{
|
||||
"services": {
|
||||
"vpn": {
|
||||
"image": "gluetun",
|
||||
"ports": ["5080:5080", "9696:9696"],
|
||||
},
|
||||
"qbittorrent": {
|
||||
"image": "qbittorrent",
|
||||
"network_mode": "service:vpn",
|
||||
"labels": [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.torrent.rule=Host(`torrent.example.com`)",
|
||||
"traefik.http.services.torrent.loadbalancer.server.port=5080",
|
||||
],
|
||||
},
|
||||
"prowlarr": {
|
||||
"image": "prowlarr",
|
||||
"network_mode": "service:vpn",
|
||||
"labels": [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.prowlarr.rule=Host(`prowlarr.example.com`)",
|
||||
"traefik.http.services.prowlarr.loadbalancer.server.port=9696",
|
||||
],
|
||||
},
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
dynamic, warnings = generate_traefik_config(cfg, ["vpn-stack"])
|
||||
|
||||
assert warnings == []
|
||||
# Both services should get their ports from the vpn service
|
||||
torrent_servers = dynamic["http"]["services"]["torrent"]["loadbalancer"]["servers"]
|
||||
assert torrent_servers == [{"url": "http://192.168.1.10:5080"}]
|
||||
prowlarr_servers = dynamic["http"]["services"]["prowlarr"]["loadbalancer"]["servers"]
|
||||
assert prowlarr_servers == [{"url": "http://192.168.1.10:9696"}]
|
||||
|
||||
2
uv.lock
generated
2
uv.lock
generated
@@ -131,6 +131,7 @@ dependencies = [
|
||||
{ name = "asyncssh" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "rich" },
|
||||
{ name = "typer" },
|
||||
]
|
||||
|
||||
@@ -151,6 +152,7 @@ requires-dist = [
|
||||
{ name = "asyncssh", specifier = ">=2.14.0" },
|
||||
{ name = "pydantic", specifier = ">=2.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0" },
|
||||
{ name = "rich", specifier = ">=13.0.0" },
|
||||
{ name = "typer", specifier = ">=0.9.0" },
|
||||
]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user