Compare commits

...

11 Commits

Author SHA1 Message Date
Bas Nijholt
d8e54aa347 check: Fix double newline spacing in output 2025-12-14 20:35:25 -08:00
Bas Nijholt
b2b6b421ba check: Add spacing before mounts/networks progress bar 2025-12-14 20:33:36 -08:00
Bas Nijholt
c6b35f02f0 check: Add spacing before SSH progress bar 2025-12-14 20:32:16 -08:00
Bas Nijholt
7e43b0a6b8 check: Fix spacing after transient progress bar 2025-12-14 20:31:20 -08:00
Bas Nijholt
2915b287ba check: Add SSH connectivity check as first remote step
- Check SSH connectivity to all remote hosts before mount/network checks
- Skip local hosts (no SSH needed)
- Show progress bar during SSH checks
- Report unreachable hosts with clear error messages
- Add newline spacing for better output formatting
2025-12-14 20:30:36 -08:00
Bas Nijholt
ae561db0c9 check: Add progress bar and parallelize mount/network checks
- Parallelize mount and network checking for all services
- Add Rich progress bar showing count, elapsed time, and service name
- Move all inline imports to top-level (contextlib, datetime, logs)
- Also sort state file entries alphabetically for consistency
2025-12-14 20:24:54 -08:00
Bas Nijholt
2d132747c4 sync: Enhance progress bars with count and elapsed time
Show "Discovering ━━━━ 32/65 • 0:00:05 • service-name" format with:
- M of N complete count
- Elapsed time
- Current service name
2025-12-14 20:15:39 -08:00
Bas Nijholt
2848163a04 sync: Add progress bars and parallelize operations
- Parallelize service discovery across all services
- Parallelize image digest capture
- Show transient Rich progress bars during both operations
- Significantly faster sync due to concurrent SSH connections
2025-12-14 20:13:42 -08:00
Bas Nijholt
76aa6e11d2 logs: Make --all and --host mutually exclusive
These options conflict conceptually - --all means all services across
all hosts, while --host means all services on a specific host.
2025-12-14 20:10:28 -08:00
Bas Nijholt
d377df15b4 logs: Add --host filter and contextual --tail default
- Add --host/-H option to filter logs to services on a specific host
- Default --tail to 20 lines when showing multiple services (--all, --host, or >1 service)
- Default to 100 lines for single service
- Add tests for contextual default and host filtering
2025-12-14 20:04:40 -08:00
Bas Nijholt
334c17cc28 logs: Use contextual default for --tail option
Default to 20 lines when --all is specified (quick overview),
100 lines otherwise (detailed view for specific services).
2025-12-14 19:59:12 -08:00
3 changed files with 503 additions and 19 deletions

View File

@@ -3,6 +3,8 @@
from __future__ import annotations
import asyncio
import contextlib
from datetime import UTC, datetime
from pathlib import Path
from typing import TYPE_CHECKING, Annotated, TypeVar
@@ -11,23 +13,41 @@ import yaml
from rich.console import Console
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
SpinnerColumn,
TaskID,
TaskProgressColumn,
TextColumn,
TimeElapsedColumn,
)
from rich.table import Table
from . import __version__
from .compose import parse_external_networks
from .config import Config, load_config
from .executor import CommandResult, run_command, run_on_services, run_sequential_on_services
from .logs import snapshot_services
from .executor import (
CommandResult,
_is_local,
check_networks_exist,
check_paths_exist,
check_service_running,
run_command,
run_on_services,
run_sequential_on_services,
)
from .logs import (
DEFAULT_LOG_PATH,
SnapshotEntry,
_collect_service_entries,
_isoformat,
_load_existing_entries,
_merge_entries,
_write_toml,
)
from .operations import (
check_host_compatibility,
check_mounts_on_configured_hosts,
check_networks_on_configured_hosts,
discover_running_services,
get_service_paths,
up_services,
)
from .state import get_services_needing_migration, load_state, remove_service, save_state
@@ -156,6 +176,10 @@ LogPathOption = Annotated[
Path | None,
typer.Option("--log-path", "-l", help="Path to Dockerfarm TOML log"),
]
HostOption = Annotated[
str | None,
typer.Option("--host", "-H", help="Filter to services on this host"),
]
MISSING_PATH_PREVIEW_LIMIT = 2
@@ -252,13 +276,37 @@ def update(
def logs(
services: ServicesArg = None,
all_services: AllOption = False,
host: HostOption = None,
follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False,
tail: Annotated[int, typer.Option("--tail", "-n", help="Number of lines")] = 100,
tail: Annotated[
int | None,
typer.Option("--tail", "-n", help="Number of lines (default: 20 for --all, 100 otherwise)"),
] = None,
config: ConfigOption = None,
) -> None:
"""Show service logs."""
svc_list, cfg = _get_services(services or [], all_services, config)
cmd = f"logs --tail {tail}"
if all_services and host is not None:
err_console.print("[red]✗[/] Cannot use --all and --host together")
raise typer.Exit(1)
cfg = _load_config_or_exit(config)
# Determine service list based on options
if host is not None:
if host not in cfg.hosts:
err_console.print(f"[red]✗[/] Host '{host}' not found in config")
raise typer.Exit(1)
svc_list = [s for s, h in cfg.services.items() if h == host]
if not svc_list:
err_console.print(f"[yellow]![/] No services configured for host '{host}'")
return
else:
svc_list, cfg = _get_services(services or [], all_services, config)
# Default to fewer lines when showing multiple services
many_services = all_services or host is not None or len(svc_list) > 1
effective_tail = tail if tail is not None else (20 if many_services else 100)
cmd = f"logs --tail {effective_tail}"
if follow:
cmd += " -f"
results = _run_async(run_on_services(cfg, svc_list, cmd))
@@ -292,7 +340,6 @@ def _group_services_by_host(
def _get_container_counts_with_progress(cfg: Config) -> dict[str, int]:
"""Get container counts from all hosts with a progress bar."""
import contextlib
async def get_count(host_name: str) -> tuple[str, int]:
host = cfg.hosts[host_name]
@@ -452,6 +499,217 @@ def traefik_file(
err_console.print(f"[yellow]![/] {warning}")
def _discover_services_with_progress(cfg: Config) -> dict[str, str]:
"""Discover running services with a progress bar."""
async def check_service(service: str) -> tuple[str, str | None]:
"""Check where a service is running. Returns (service, host_name) or (service, None)."""
assigned_host = cfg.services[service]
# Check assigned host first (most common case)
if await check_service_running(cfg, service, assigned_host):
return service, assigned_host
# Check other hosts
for host_name in cfg.hosts:
if host_name == assigned_host:
continue
if await check_service_running(cfg, service, host_name):
return service, host_name
return service, None
async def gather_with_progress(progress: Progress, task_id: TaskID) -> dict[str, str]:
services = list(cfg.services.keys())
tasks = [asyncio.create_task(check_service(s)) for s in services]
discovered: dict[str, str] = {}
for coro in asyncio.as_completed(tasks):
service, host = await coro
if host is not None:
discovered[service] = host
progress.update(task_id, advance=1, description=f"[cyan]{service}[/]")
return discovered
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Discovering[/]"),
BarColumn(),
MofNCompleteColumn(),
TextColumn(""),
TimeElapsedColumn(),
TextColumn(""),
TextColumn("[progress.description]{task.description}"),
console=console,
transient=True,
) as progress:
task_id = progress.add_task("", total=len(cfg.services))
return asyncio.run(gather_with_progress(progress, task_id))
def _snapshot_services_with_progress(
cfg: Config,
services: list[str],
log_path: Path | None,
) -> Path:
"""Capture image digests with a progress bar."""
async def collect_service(service: str, now: datetime) -> list[SnapshotEntry]:
try:
return await _collect_service_entries(cfg, service, now=now)
except RuntimeError:
return []
async def gather_with_progress(
progress: Progress, task_id: TaskID, now: datetime, svc_list: list[str]
) -> list[SnapshotEntry]:
# Map tasks to service names so we can update description
task_to_service = {asyncio.create_task(collect_service(s, now)): s for s in svc_list}
all_entries: list[SnapshotEntry] = []
for coro in asyncio.as_completed(list(task_to_service.keys())):
entries = await coro
all_entries.extend(entries)
# Find which service just completed (by checking done tasks)
for t, svc in task_to_service.items():
if t.done() and not hasattr(t, "_reported"):
t._reported = True # type: ignore[attr-defined]
progress.update(task_id, advance=1, description=f"[cyan]{svc}[/]")
break
return all_entries
effective_log_path = log_path or DEFAULT_LOG_PATH
now_dt = datetime.now(UTC)
now_iso = _isoformat(now_dt)
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Capturing[/]"),
BarColumn(),
MofNCompleteColumn(),
TextColumn(""),
TimeElapsedColumn(),
TextColumn(""),
TextColumn("[progress.description]{task.description}"),
console=console,
transient=True,
) as progress:
task_id = progress.add_task("", total=len(services))
snapshot_entries = asyncio.run(gather_with_progress(progress, task_id, now_dt, services))
if not snapshot_entries:
msg = "No image digests were captured"
raise RuntimeError(msg)
existing_entries = _load_existing_entries(effective_log_path)
merged_entries = _merge_entries(existing_entries, snapshot_entries, now_iso=now_iso)
meta = {"generated_at": now_iso, "compose_dir": str(cfg.compose_dir)}
_write_toml(effective_log_path, meta=meta, entries=merged_entries)
return effective_log_path
def _check_ssh_connectivity(cfg: Config) -> list[str]:
"""Check SSH connectivity to all hosts. Returns list of unreachable hosts."""
# Filter out local hosts - no SSH needed
remote_hosts = [h for h in cfg.hosts if not _is_local(cfg.hosts[h])]
if not remote_hosts:
return []
console.print() # Spacing before progress bar
async def check_host(host_name: str) -> tuple[str, bool]:
host = cfg.hosts[host_name]
result = await run_command(host, "echo ok", host_name, stream=False)
return host_name, result.success
async def gather_with_progress(progress: Progress, task_id: TaskID) -> list[str]:
tasks = [asyncio.create_task(check_host(h)) for h in remote_hosts]
unreachable: list[str] = []
for coro in asyncio.as_completed(tasks):
host_name, success = await coro
if not success:
unreachable.append(host_name)
progress.update(task_id, advance=1, description=f"[cyan]{host_name}[/]")
return unreachable
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Checking SSH connectivity[/]"),
BarColumn(),
MofNCompleteColumn(),
TextColumn(""),
TimeElapsedColumn(),
TextColumn(""),
TextColumn("[progress.description]{task.description}"),
console=console,
transient=True,
) as progress:
task_id = progress.add_task("", total=len(remote_hosts))
return asyncio.run(gather_with_progress(progress, task_id))
def _check_mounts_and_networks_with_progress(
cfg: Config,
services: list[str],
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]]]:
"""Check mounts and networks for all services with a progress bar.
Returns (mount_errors, network_errors) where each is a list of
(service, host, missing_item) tuples.
"""
async def check_service(
service: str,
) -> tuple[str, list[tuple[str, str, str]], list[tuple[str, str, str]]]:
"""Check mounts and networks for a single service."""
host_name = cfg.services[service]
mount_errors: list[tuple[str, str, str]] = []
network_errors: list[tuple[str, str, str]] = []
# Check mounts
paths = get_service_paths(cfg, service)
path_exists = await check_paths_exist(cfg, host_name, paths)
for path, found in path_exists.items():
if not found:
mount_errors.append((service, host_name, path))
# Check networks
networks = parse_external_networks(cfg, service)
if networks:
net_exists = await check_networks_exist(cfg, host_name, networks)
for net, found in net_exists.items():
if not found:
network_errors.append((service, host_name, net))
return service, mount_errors, network_errors
async def gather_with_progress(
progress: Progress, task_id: TaskID
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]]]:
tasks = [asyncio.create_task(check_service(s)) for s in services]
all_mount_errors: list[tuple[str, str, str]] = []
all_network_errors: list[tuple[str, str, str]] = []
for coro in asyncio.as_completed(tasks):
service, mount_errs, net_errs = await coro
all_mount_errors.extend(mount_errs)
all_network_errors.extend(net_errs)
progress.update(task_id, advance=1, description=f"[cyan]{service}[/]")
return all_mount_errors, all_network_errors
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Checking mounts/networks[/]"),
BarColumn(),
MofNCompleteColumn(),
TextColumn(""),
TimeElapsedColumn(),
TextColumn(""),
TextColumn("[progress.description]{task.description}"),
console=console,
transient=True,
) as progress:
task_id = progress.add_task("", total=len(services))
return asyncio.run(gather_with_progress(progress, task_id))
def _report_sync_changes(
added: list[str],
removed: list[str],
@@ -499,8 +757,7 @@ def sync(
cfg = _load_config_or_exit(config)
current_state = load_state(cfg)
console.print("Discovering running services...")
discovered = _run_async(discover_running_services(cfg))
discovered = _discover_services_with_progress(cfg)
# Calculate changes
added = [s for s in discovered if s not in current_state]
@@ -529,9 +786,8 @@ def sync(
# Capture image digests for running services
if discovered:
console.print("\nCapturing image digests...")
try:
path = _run_async(snapshot_services(cfg, list(discovered.keys()), log_path=log_path))
path = _snapshot_services_with_progress(cfg, list(discovered.keys()), log_path)
console.print(f"[green]✓[/] Digests written to {path}")
except RuntimeError as exc:
err_console.print(f"[yellow]![/] {exc}")
@@ -581,7 +837,7 @@ def _report_mount_errors(mount_errors: list[tuple[str, str, str]]) -> None:
for svc, host, path in mount_errors:
by_service.setdefault(svc, []).append((host, path))
console.print(f"\n[red]Missing mounts[/] ({len(mount_errors)}):")
console.print(f"[red]Missing mounts[/] ({len(mount_errors)}):")
for svc, items in sorted(by_service.items()):
host = items[0][0]
paths = [p for _, p in items]
@@ -596,7 +852,7 @@ def _report_network_errors(network_errors: list[tuple[str, str, str]]) -> None:
for svc, host, net in network_errors:
by_service.setdefault(svc, []).append((host, net))
console.print(f"\n[red]Missing networks[/] ({len(network_errors)}):")
console.print(f"[red]Missing networks[/] ({len(network_errors)}):")
for svc, items in sorted(by_service.items()):
host = items[0][0]
networks = [n for _, n in items]
@@ -605,6 +861,17 @@ def _report_network_errors(network_errors: list[tuple[str, str, str]]) -> None:
console.print(f" [red]✗[/] {net}")
def _report_ssh_status(unreachable_hosts: list[str]) -> bool:
"""Report SSH connectivity status. Returns True if there are errors."""
if unreachable_hosts:
console.print(f"[red]Unreachable hosts[/] ({len(unreachable_hosts)}):")
for host in sorted(unreachable_hosts):
console.print(f" [red]✗[/] [magenta]{host}[/]")
return True
console.print("[green]✓[/] All hosts reachable")
return False
def _report_host_compatibility(
compat: dict[str, tuple[int, int, list[str]]],
current_host: str,
@@ -662,9 +929,14 @@ def check(
_report_traefik_status(cfg, svc_list)
if not local:
console.print("\nChecking mounts and networks...")
mount_errors = _run_async(check_mounts_on_configured_hosts(cfg, svc_list))
network_errors = _run_async(check_networks_on_configured_hosts(cfg, svc_list))
# Check SSH connectivity first
if _report_ssh_status(_check_ssh_connectivity(cfg)):
has_errors = True
console.print() # Spacing before mounts/networks check
# Check mounts and networks
mount_errors, network_errors = _check_mounts_and_networks_with_progress(cfg, svc_list)
if mount_errors:
_report_mount_errors(mount_errors)

View File

@@ -26,11 +26,16 @@ def load_state(config: Config) -> dict[str, str]:
return deployed
def _sorted_dict(d: dict[str, str]) -> dict[str, str]:
"""Return a dictionary sorted by keys."""
return dict(sorted(d.items(), key=lambda item: item[0]))
def save_state(config: Config, deployed: dict[str, str]) -> None:
"""Save the deployment state."""
state_path = config.get_state_path()
with state_path.open("w") as f:
yaml.safe_dump({"deployed": deployed}, f, sort_keys=False)
yaml.safe_dump({"deployed": _sorted_dict(deployed)}, f, sort_keys=False)
def get_service_host(config: Config, service: str) -> str | None:

207
tests/test_cli_logs.py Normal file
View File

@@ -0,0 +1,207 @@
"""Tests for CLI logs command."""
from collections.abc import Coroutine
from pathlib import Path
from typing import Any
from unittest.mock import patch
from compose_farm.cli import logs
from compose_farm.config import Config, Host
from compose_farm.executor import CommandResult
def _make_config(tmp_path: Path) -> Config:
"""Create a minimal config for testing."""
compose_dir = tmp_path / "compose"
compose_dir.mkdir()
for svc in ("svc1", "svc2", "svc3"):
svc_dir = compose_dir / svc
svc_dir.mkdir()
(svc_dir / "docker-compose.yml").write_text("services: {}\n")
return Config(
compose_dir=compose_dir,
hosts={"local": Host(address="localhost"), "remote": Host(address="192.168.1.10")},
services={"svc1": "local", "svc2": "local", "svc3": "remote"},
)
def _make_result(service: str) -> CommandResult:
"""Create a successful command result."""
return CommandResult(service=service, exit_code=0, success=True, stdout="", stderr="")
def _mock_run_async_factory(
services: list[str],
) -> tuple[Any, list[CommandResult]]:
"""Create a mock _run_async that returns results for given services."""
results = [_make_result(s) for s in services]
def mock_run_async(_coro: Coroutine[Any, Any, Any]) -> list[CommandResult]:
return results
return mock_run_async, results
class TestLogsContextualDefault:
"""Tests for logs --tail contextual default behavior."""
def test_logs_all_services_defaults_to_20(self, tmp_path: Path) -> None:
"""When --all is specified, default tail should be 20."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
mock_run.return_value = None
logs(services=None, all_services=True, host=None, follow=False, tail=None, config=None)
mock_run.assert_called_once()
call_args = mock_run.call_args
assert call_args[0][2] == "logs --tail 20"
def test_logs_single_service_defaults_to_100(self, tmp_path: Path) -> None:
"""When specific services are specified, default tail should be 100."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
logs(
services=["svc1"],
all_services=False,
host=None,
follow=False,
tail=None,
config=None,
)
mock_run.assert_called_once()
call_args = mock_run.call_args
assert call_args[0][2] == "logs --tail 100"
def test_logs_explicit_tail_overrides_default(self, tmp_path: Path) -> None:
"""When --tail is explicitly provided, it should override the default."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
logs(
services=None,
all_services=True,
host=None,
follow=False,
tail=50,
config=None,
)
mock_run.assert_called_once()
call_args = mock_run.call_args
assert call_args[0][2] == "logs --tail 50"
def test_logs_follow_appends_flag(self, tmp_path: Path) -> None:
"""When --follow is specified, -f should be appended to command."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
logs(
services=["svc1"],
all_services=False,
host=None,
follow=True,
tail=None,
config=None,
)
mock_run.assert_called_once()
call_args = mock_run.call_args
assert call_args[0][2] == "logs --tail 100 -f"
class TestLogsHostFilter:
"""Tests for logs --host filter behavior."""
def test_logs_host_filter_selects_services_on_host(self, tmp_path: Path) -> None:
"""When --host is specified, only services on that host are included."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
logs(
services=None,
all_services=False,
host="local",
follow=False,
tail=None,
config=None,
)
mock_run.assert_called_once()
call_args = mock_run.call_args
# svc1 and svc2 are on "local", svc3 is on "remote"
assert set(call_args[0][1]) == {"svc1", "svc2"}
def test_logs_host_filter_defaults_to_20_lines(self, tmp_path: Path) -> None:
"""When --host is specified, default tail should be 20 (multiple services)."""
cfg = _make_config(tmp_path)
mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"])
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
patch("compose_farm.cli._run_async", side_effect=mock_run_async),
patch("compose_farm.cli.run_on_services") as mock_run,
):
logs(
services=None,
all_services=False,
host="local",
follow=False,
tail=None,
config=None,
)
mock_run.assert_called_once()
call_args = mock_run.call_args
assert call_args[0][2] == "logs --tail 20"
def test_logs_all_and_host_mutually_exclusive(self, tmp_path: Path) -> None:
"""Using --all and --host together should error."""
import pytest
import typer
cfg = _make_config(tmp_path)
with (
patch("compose_farm.cli._load_config_or_exit", return_value=cfg),
pytest.raises(typer.Exit) as exc_info,
):
logs(
services=None,
all_services=True,
host="local",
follow=False,
tail=None,
config=None,
)
assert exc_info.value.exit_code == 1