Se adoptó la versión actualmente recomandada por el equipo de docker el formado "compose.yaml" También se crearon nuevos scripts y actualizaron algunos para adaptarse a los nuevos archivos.
634 lines
23 KiB
Python
634 lines
23 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import argparse
|
|
import datetime
|
|
import json
|
|
import os
|
|
import pathlib
|
|
import shlex
|
|
import subprocess
|
|
import sys
|
|
from typing import List, Dict, Tuple, Optional, Set
|
|
|
|
PROJECT_ROOT = pathlib.Path.cwd()
|
|
COMPOSE_BASE = PROJECT_ROOT / "compose.yaml"
|
|
COMPOSE_DEV = PROJECT_ROOT / "compose.dev.yaml"
|
|
COMPOSE_PROD = PROJECT_ROOT / "compose.prod.yaml"
|
|
COMPOSE_NPM = PROJECT_ROOT / "compose.npm.yaml"
|
|
COMPOSE_DBVR = PROJECT_ROOT / "compose.dbeaver.yaml"
|
|
|
|
GLOBAL_DEFAULT_PROJECT = "suitecoffee" # proyecto global (NPM/DBeaver)
|
|
|
|
# ---------- Shell utils ----------
|
|
|
|
def run(cmd: List[str], check=True, capture_output=True, text=True) -> subprocess.CompletedProcess:
|
|
return subprocess.run(cmd, check=check, capture_output=capture_output, text=text)
|
|
|
|
def which(program: str) -> bool:
|
|
from shutil import which as _which
|
|
return _which(program) is not None
|
|
|
|
# ---------- Docker volume discovery ----------
|
|
|
|
def docker_volume_ls_json(filters: List[str]) -> List[Dict[str, str]]:
|
|
"""
|
|
Devuelve objetos de 'docker volume ls' (formato json por entrada).
|
|
Soporta filtros como '--filter label=...'.
|
|
"""
|
|
cmd = ["docker", "volume", "ls", "--format", "{{json .}}"]
|
|
for f in filters:
|
|
cmd += ["--filter", f]
|
|
try:
|
|
cp = run(cmd)
|
|
except subprocess.CalledProcessError:
|
|
return []
|
|
out = []
|
|
for line in cp.stdout.splitlines():
|
|
line = line.strip()
|
|
if not line:
|
|
continue
|
|
try:
|
|
out.append(json.loads(line))
|
|
except json.JSONDecodeError:
|
|
pass
|
|
return out
|
|
|
|
def docker_volume_ls_names(filters: List[str]) -> List[str]:
|
|
"""Devuelve solo los nombres (Name) con filtros aplicados."""
|
|
rows = docker_volume_ls_json(filters)
|
|
names = []
|
|
for v in rows:
|
|
name = v.get("Name")
|
|
if name:
|
|
names.append(name)
|
|
return names
|
|
|
|
def list_by_label_project(project: str) -> List[Dict[str, str]]:
|
|
return docker_volume_ls_json([f"label=com.docker.compose.project={project}"])
|
|
|
|
def list_by_name_prefix(prefix: str) -> List[Dict[str, str]]:
|
|
vols = docker_volume_ls_json([])
|
|
keep = []
|
|
for v in vols:
|
|
name = v.get("Name")
|
|
if not name:
|
|
continue
|
|
if name.startswith(prefix + "_") or name.startswith(prefix + "-") or name == prefix:
|
|
keep.append(v)
|
|
return keep
|
|
|
|
def normalize_project_name(p: str) -> str:
|
|
return (p or "").replace(" ", "_")
|
|
|
|
# ---------- Compose config parsing ----------
|
|
|
|
def compose_config_json(files: List[pathlib.Path]) -> Optional[dict]:
|
|
if not files or not all(p.exists() for p in files):
|
|
return None
|
|
cmd = ["docker", "compose"]
|
|
for f in files:
|
|
cmd += ["-f", str(f)]
|
|
cmd += ["config", "--format", "json"]
|
|
try:
|
|
cp = run(cmd)
|
|
return json.loads(cp.stdout or "{}")
|
|
except Exception:
|
|
return None
|
|
|
|
def extract_short_volume_names_from_config(cfg: dict) -> Set[str]:
|
|
"""
|
|
Extrae short names de volúmenes usados en services[].volumes (type: volume)
|
|
y las claves del toplevel 'volumes'.
|
|
"""
|
|
names: Set[str] = set()
|
|
if not cfg:
|
|
return names
|
|
|
|
# services[].volumes
|
|
services = cfg.get("services") or {}
|
|
for svc in services.values():
|
|
vols = svc.get("volumes") or []
|
|
for m in vols:
|
|
# en JSON canonical, cada mount es un dict con 'type', 'source', 'target', ...
|
|
if isinstance(m, dict) and m.get("type") == "volume":
|
|
src = m.get("source")
|
|
if isinstance(src, str) and src:
|
|
names.add(src)
|
|
|
|
# top-level volumes (claves)
|
|
top_vols = cfg.get("volumes") or {}
|
|
if isinstance(top_vols, dict):
|
|
for k in top_vols.keys():
|
|
if isinstance(k, str) and k:
|
|
names.add(k)
|
|
|
|
return names
|
|
|
|
def docker_compose_name_from(files: List[pathlib.Path]) -> Optional[str]:
|
|
cfg = compose_config_json(files)
|
|
if cfg and isinstance(cfg, dict):
|
|
name = cfg.get("name")
|
|
if name:
|
|
return name
|
|
return None
|
|
|
|
def read_compose_project_from_env(env_path: pathlib.Path) -> Optional[str]:
|
|
try:
|
|
if env_path.exists():
|
|
for line in env_path.read_text(encoding="utf-8").splitlines():
|
|
line = line.strip()
|
|
if not line or line.startswith("#"):
|
|
continue
|
|
if line.startswith("COMPOSE_PROJECT_NAME="):
|
|
return line.split("=", 1)[1].strip()
|
|
except Exception:
|
|
pass
|
|
return None
|
|
|
|
def base_folder_slug() -> str:
|
|
return PROJECT_ROOT.name.lower().replace(" ", "_")
|
|
|
|
def candidates_for_env(env: str) -> List[str]:
|
|
cand: List[str] = []
|
|
if env == "development":
|
|
n1 = docker_compose_name_from([COMPOSE_BASE, COMPOSE_DEV])
|
|
n2 = read_compose_project_from_env(PROJECT_ROOT / ".env.development")
|
|
n3 = f"{base_folder_slug()}_dev"
|
|
n4 = f"{base_folder_slug()}-dev"
|
|
cand.extend([n1, n2, n3, n4, base_folder_slug()])
|
|
elif env == "production":
|
|
n1 = docker_compose_name_from([COMPOSE_BASE, COMPOSE_PROD])
|
|
n2 = read_compose_project_from_env(PROJECT_ROOT / ".env.production")
|
|
n3 = f"{base_folder_slug()}_prod"
|
|
n4 = f"{base_folder_slug()}-prod"
|
|
cand.extend([n1, n2, n3, n4, base_folder_slug()])
|
|
# dedup preservando orden
|
|
seen = set(); ordered = []
|
|
for x in cand:
|
|
if x and x not in seen:
|
|
seen.add(x); ordered.append(x)
|
|
return ordered
|
|
|
|
def candidates_for_global() -> List[str]:
|
|
cand: List[str] = []
|
|
# nombres desde compose globales
|
|
if COMPOSE_NPM.exists():
|
|
n = docker_compose_name_from([COMPOSE_NPM])
|
|
if n: cand.append(n)
|
|
if COMPOSE_DBVR.exists():
|
|
n = docker_compose_name_from([COMPOSE_DBVR])
|
|
if n and n not in cand: cand.append(n)
|
|
# fallback esperados
|
|
if GLOBAL_DEFAULT_PROJECT not in cand: cand.append(GLOBAL_DEFAULT_PROJECT)
|
|
bf = base_folder_slug()
|
|
if bf not in cand: cand.append(bf)
|
|
return cand
|
|
|
|
# ---------- Nueva detección por grupo: COMPOSE + labels ----------
|
|
|
|
def detect_group_volumes_with_compose(filesets: List[List[pathlib.Path]],
|
|
project_candidates: List[str]) -> Tuple[Optional[str], str, List[str]]:
|
|
"""
|
|
filesets: lista de listas de archivos compose a considerar (dev=[base,dev], prod=[base,prod],
|
|
global=[[npm], [dbeaver]] -> dos sets para unir shortnames).
|
|
Devuelve (project_seleccionado, metodo, [nombres_de_volumen]).
|
|
"""
|
|
# 1) Unir shortnames de todos los filesets
|
|
shortnames: Set[str] = set()
|
|
for files in filesets:
|
|
cfg = compose_config_json(files)
|
|
shortnames |= extract_short_volume_names_from_config(cfg)
|
|
|
|
# 2) Si hay shortnames, probar a buscar por (project,label.volume)
|
|
if shortnames:
|
|
for proj in project_candidates:
|
|
# Buscar cada shortname con ambos labels
|
|
found: List[str] = []
|
|
for sn in sorted(shortnames):
|
|
names = docker_volume_ls_names([
|
|
f"label=com.docker.compose.project={proj}",
|
|
f"label=com.docker.compose.volume={sn}"
|
|
])
|
|
if names:
|
|
found.extend(names)
|
|
# dedup preservando orden
|
|
if found:
|
|
seen = set(); ordered = []
|
|
for n in found:
|
|
if n not in seen:
|
|
seen.add(n); ordered.append(n)
|
|
return proj, f"compose+labels:{proj}", ordered
|
|
|
|
# 3) Fallback: probar cualquier volumen del proyecto (label) o por prefijo
|
|
for proj in project_candidates:
|
|
method, rows = discover_volumes_for_project(proj)
|
|
if rows:
|
|
return proj, f"fallback:{method}", [r.get("Name") for r in rows if r.get("Name")]
|
|
|
|
# 4) Nada
|
|
first = project_candidates[0] if project_candidates else None
|
|
return first, "none", []
|
|
|
|
def discover_volumes_for_project(project_raw: str) -> Tuple[str, List[Dict[str, str]]]:
|
|
"""
|
|
Método previo de respaldo: por label de proyecto y prefijo (para CLI y fallback).
|
|
"""
|
|
project_norm = normalize_project_name(project_raw)
|
|
project_lower = project_norm.lower()
|
|
|
|
vols = list_by_label_project(project_norm)
|
|
if vols:
|
|
return f"label:{project_norm}", vols
|
|
|
|
vols2 = list_by_label_project(project_lower)
|
|
if vols2:
|
|
return f"label:{project_lower}", vols2
|
|
|
|
by_name = list_by_name_prefix(project_norm)
|
|
if by_name:
|
|
return f"name-prefix:{project_norm}", by_name
|
|
|
|
by_name2 = list_by_name_prefix(project_lower)
|
|
if by_name2:
|
|
return f"name-prefix:{project_lower}", by_name2
|
|
|
|
return "none", []
|
|
|
|
# ---------- Backup helpers ----------
|
|
|
|
def ensure_alpine_image():
|
|
try:
|
|
run(["docker", "image", "inspect", "alpine:latest"])
|
|
except subprocess.CalledProcessError:
|
|
print("Pulling alpine:latest ...")
|
|
run(["docker", "pull", "alpine:latest"], check=True, capture_output=False)
|
|
|
|
def build_archive_name(project: str, volume_name: str, ts: str) -> str:
|
|
"""
|
|
Construye el nombre del .tar.gz evitando duplicar el prefijo del proyecto.
|
|
- Si volume_name ya empieza con '<project>_' o '<project>-', se usa tal cual.
|
|
- Si no, se antepone '<project>_'.
|
|
Resultado: <project>_<shortname>-<ts>.tar.gz
|
|
"""
|
|
proj_token = project.lower().replace(" ", "_")
|
|
v_lower = volume_name.lower()
|
|
if v_lower.startswith(proj_token + "_") or v_lower.startswith(proj_token + "-"):
|
|
base = volume_name
|
|
else:
|
|
base = f"{proj_token}_{volume_name}"
|
|
return f"{base}-{ts}.tar.gz"
|
|
|
|
def backup_volume(volume_name: str, out_dir: pathlib.Path, archive_name: str, dry_run: bool = False) -> int:
|
|
out_dir.mkdir(parents=True, exist_ok=True)
|
|
out_dir_abs = out_dir.resolve()
|
|
out_path = out_dir_abs / archive_name
|
|
docker_cmd = [
|
|
"docker", "run", "--rm",
|
|
"-v", f"{volume_name}:/volume:ro",
|
|
"-v", f"{str(out_dir_abs)}:/backup",
|
|
# "--user", f"{os.getuid()}:{os.getgid()}",
|
|
"alpine:latest",
|
|
"sh", "-lc",
|
|
f"tar czf /backup/{shlex.quote(out_path.name)} -C /volume ."
|
|
]
|
|
if dry_run:
|
|
print("[DRY RUN] Would run:", " ".join(shlex.quote(c) for c in docker_cmd))
|
|
return 0
|
|
cp = subprocess.run(docker_cmd)
|
|
return cp.returncode
|
|
|
|
def backup_explicit(volume_names: List[str], ts: str, output_dir: Optional[str], dry_run: bool, prefix_project: Optional[str]) -> int:
|
|
"""
|
|
Respalda exactamente los volúmenes indicados.
|
|
- Directorio por defecto: ./docker-volumes-<ts>
|
|
- Nombre de archivo: build_archive_name(prefix_project, volume_name, ts)
|
|
"""
|
|
out_dir = pathlib.Path(output_dir) if output_dir else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
|
if not dry_run:
|
|
ensure_alpine_image()
|
|
|
|
failures = []
|
|
for vname in volume_names:
|
|
if not vname:
|
|
continue
|
|
archive = build_archive_name(prefix_project or "", vname, ts)
|
|
print(f"Backing up volume: {vname} -> {archive}")
|
|
rc = backup_volume(vname, out_dir, archive, dry_run=dry_run)
|
|
if rc != 0:
|
|
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
|
failures.append(vname)
|
|
if failures:
|
|
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
|
return 1
|
|
else:
|
|
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
|
return 0
|
|
|
|
def backup_group(project_name: str, ts: str, output_dir: Optional[str] = None,
|
|
dry_run: bool = False, excludes: Optional[List[str]] = None) -> int:
|
|
"""
|
|
Fallback legacy (label/prefix). Mantiene coherencia con nombres y directorio por defecto.
|
|
"""
|
|
method, rows = discover_volumes_for_project(project_name)
|
|
|
|
print_header(f"Proyecto '{project_name}': {len(rows)} volumen(es) detectado(s) (método: {method})")
|
|
for v in rows:
|
|
print(" -", v.get("Name"))
|
|
|
|
if not rows:
|
|
warn("No hay volúmenes para respaldar.")
|
|
return 0
|
|
|
|
vols = [v.get("Name") for v in rows if v.get("Name")]
|
|
if excludes:
|
|
excl = set(excludes)
|
|
vols = [n for n in vols if n not in excl]
|
|
if not vols:
|
|
warn("Tras aplicar exclusiones, no quedó nada por respaldar.")
|
|
return 0
|
|
|
|
out_dir = pathlib.Path(output_dir) if output_dir else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
|
if not dry_run:
|
|
ensure_alpine_image()
|
|
|
|
failures = []
|
|
for vname in vols:
|
|
archive = build_archive_name(project_name, vname, ts)
|
|
print(f"Backing up volume: {vname} -> {archive}")
|
|
rc = backup_volume(vname, out_dir, archive, dry_run=dry_run)
|
|
if rc != 0:
|
|
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
|
failures.append(vname)
|
|
|
|
if failures:
|
|
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
|
return 1
|
|
else:
|
|
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
|
return 0
|
|
|
|
# ---------- UI helpers ----------
|
|
|
|
def yes_no(prompt: str, default="n") -> bool:
|
|
default = default.lower()
|
|
hint = "[Y/n]" if default == "y" else "[y/N]"
|
|
while True:
|
|
resp = input(f"{prompt} {hint} ").strip().lower()
|
|
if not resp:
|
|
return default == "y"
|
|
if resp in ("y","yes","s","si","sí"):
|
|
return True
|
|
if resp in ("n","no"):
|
|
return False
|
|
print("Respuesta no reconocida. Por favor, responde con 'y' o 'n'.")
|
|
|
|
def print_header(title: str):
|
|
print("\n" + "=" * 60)
|
|
print(title)
|
|
print("=" * 60 + "\n")
|
|
|
|
def info(msg): print(f"• {msg}")
|
|
def ok(msg): print(f"✓ {msg}")
|
|
def warn(msg): print(f"! {msg}")
|
|
def fail(msg):
|
|
print(f"✗ {msg}")
|
|
sys.exit(1)
|
|
|
|
# ---------- Menú interactivo ----------
|
|
|
|
def interactive_menu():
|
|
if not which("docker"):
|
|
fail("ERROR: 'docker' no está en el PATH.")
|
|
try:
|
|
run(["docker", "version"], check=True, capture_output=True)
|
|
except subprocess.CalledProcessError:
|
|
fail("ERROR: No se puede hablar con el daemon de Docker. ¿Está corriendo? ¿Tu usuario está en el grupo 'docker'?")
|
|
|
|
# DEV
|
|
dev_candidates = candidates_for_env("development") if COMPOSE_BASE.exists() and COMPOSE_DEV.exists() else []
|
|
dev_proj, dev_method, dev_names = detect_group_volumes_with_compose(
|
|
filesets=[[COMPOSE_BASE, COMPOSE_DEV]] if dev_candidates else [],
|
|
project_candidates=dev_candidates
|
|
)
|
|
|
|
# PROD
|
|
prod_candidates = candidates_for_env("production") if COMPOSE_BASE.exists() and COMPOSE_PROD.exists() else []
|
|
prod_proj, prod_method, prod_names = detect_group_volumes_with_compose(
|
|
filesets=[[COMPOSE_BASE, COMPOSE_PROD]] if prod_candidates else [],
|
|
project_candidates=prod_candidates
|
|
)
|
|
|
|
# GLOBAL = NPM + DBEAVER (unir shortnames de ambos)
|
|
global_candidates = candidates_for_global()
|
|
global_filesets = []
|
|
if COMPOSE_NPM.exists():
|
|
global_filesets.append([COMPOSE_NPM])
|
|
if COMPOSE_DBVR.exists():
|
|
global_filesets.append([COMPOSE_DBVR])
|
|
glob_proj, glob_method, glob_names = detect_group_volumes_with_compose(
|
|
filesets=global_filesets,
|
|
project_candidates=global_candidates
|
|
)
|
|
|
|
# Resumen
|
|
print_header("Resumen de volúmenes detectados")
|
|
if dev_proj:
|
|
info(f"DESARROLLO ({dev_proj}): {len(dev_names)} volumen(es) (método: {dev_method})")
|
|
else:
|
|
info("DESARROLLO: archivos compose no encontrados.")
|
|
if prod_proj:
|
|
info(f"PRODUCCIÓN ({prod_proj}): {len(prod_names)} volumen(es) (método: {prod_method})")
|
|
else:
|
|
info("PRODUCCIÓN: archivos compose no encontrados.")
|
|
if glob_proj:
|
|
info(f"GLOBALES ({glob_proj}): {len(glob_names)} volumen(es) (método: {glob_method})")
|
|
else:
|
|
info("GLOBALES: no se detectaron archivos compose globales.")
|
|
print()
|
|
|
|
# Menú
|
|
options = {}
|
|
key = 1
|
|
if dev_proj:
|
|
print(f" {key}) Respaldar volúmenes de DESARROLLO ({dev_proj})")
|
|
options[str(key)] = ("backup_explicit", dev_proj, dev_names); key += 1
|
|
if prod_proj:
|
|
print(f" {key}) Respaldar volúmenes de PRODUCCIÓN ({prod_proj})")
|
|
options[str(key)] = ("backup_explicit", prod_proj, prod_names); key += 1
|
|
if glob_proj:
|
|
print(f" {key}) Respaldar volúmenes GLOBALES ({glob_proj})")
|
|
options[str(key)] = ("backup_explicit", glob_proj, glob_names); key += 1
|
|
|
|
# TODOS: unión deduplicada por nombre (respalda 1 vez cada volumen)
|
|
groups = []
|
|
if dev_proj: groups.append( (dev_proj, dev_names) )
|
|
if prod_proj: groups.append( (prod_proj, prod_names) )
|
|
if glob_proj: groups.append( (glob_proj, glob_names) )
|
|
|
|
if len(groups) >= 2:
|
|
print(f" {key}) Respaldar TODOS los grupos detectados")
|
|
options[str(key)] = ("backup_all_explicit", groups); key += 1
|
|
|
|
print(f" {key}) Salir")
|
|
exit_key = str(key)
|
|
|
|
ts = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
|
|
|
while True:
|
|
choice = input("> ").strip()
|
|
if choice == exit_key:
|
|
ok("Saliendo.")
|
|
sys.exit(0)
|
|
|
|
if choice not in options:
|
|
print("Opción inválida.")
|
|
continue
|
|
|
|
action = options[choice][0]
|
|
dry = yes_no("¿Dry-run (no escribir archivos)?", default="n")
|
|
outd = input(f"Directorio de salida (vacío = ./docker-volumes-{ts}): ").strip() or None
|
|
excl_input = input("Excluir volúmenes (nombres separados por coma, vacío = ninguno): ").strip()
|
|
excludes = set(e.strip() for e in excl_input.split(",") if e.strip()) if excl_input else set()
|
|
|
|
if action == "backup_explicit":
|
|
_, proj, names = options[choice]
|
|
names = [n for n in names if n not in excludes]
|
|
if not names:
|
|
warn("No hay volúmenes para respaldar.")
|
|
sys.exit(0)
|
|
rc = backup_explicit(names, ts, output_dir=outd, dry_run=dry, prefix_project=proj)
|
|
sys.exit(rc)
|
|
|
|
elif action == "backup_all_explicit":
|
|
_, groups_payload = options[choice]
|
|
vol_to_proj: Dict[str, str] = {}
|
|
for proj, names in groups_payload:
|
|
for n in names:
|
|
if n not in excludes and n not in vol_to_proj:
|
|
vol_to_proj[n] = proj
|
|
if not vol_to_proj:
|
|
warn("No hay volúmenes para respaldar.")
|
|
sys.exit(0)
|
|
if not dry:
|
|
ensure_alpine_image()
|
|
out_dir = pathlib.Path(outd) if outd else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
|
failures = []
|
|
for vname, proj in vol_to_proj.items():
|
|
archive = build_archive_name(proj, vname, ts)
|
|
print(f"Backing up volume: {vname} -> {archive}")
|
|
rc = backup_volume(vname, out_dir, archive, dry_run=dry)
|
|
if rc != 0:
|
|
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
|
failures.append(vname)
|
|
if failures:
|
|
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
|
sys.exit(1)
|
|
else:
|
|
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
|
sys.exit(0)
|
|
|
|
# ---------- CLI legacy (se mantiene) ----------
|
|
|
|
def detect_project_name(args_project: Optional[str]) -> str:
|
|
if args_project:
|
|
return args_project
|
|
env_name = os.environ.get("COMPOSE_PROJECT_NAME")
|
|
if env_name:
|
|
return env_name
|
|
return PROJECT_ROOT.name.replace(" ", "_")
|
|
|
|
def cli_main():
|
|
parser = argparse.ArgumentParser(description="Export (compress) every Docker volume of a Docker Compose project.")
|
|
parser.add_argument("-p", "--project", help="Compose project or prefix (see --discovery).")
|
|
parser.add_argument("-o", "--output", help="Output directory (default: ./docker-volumes-<timestamp>).")
|
|
parser.add_argument("--exclude", nargs="*", default=[], help="Volume names to exclude (space-separated).")
|
|
parser.add_argument("--dry-run", action="store_true", help="Show what would be done without doing it.")
|
|
parser.add_argument("--timestamp", default=datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
|
|
help="Timestamp to embed into filenames (default: current time).")
|
|
parser.add_argument("--discovery", choices=["auto","label","name"], default="auto",
|
|
help="How to discover volumes: 'label' (strict), 'name' (prefix), or 'auto' (default).")
|
|
parser.add_argument("--list-only", action="store_true", help="Only list volumes that would be backed up and exit.")
|
|
parser.add_argument("--menu", action="store_true", help="Launch interactive menu instead of CLI behavior.")
|
|
args = parser.parse_args()
|
|
|
|
if args.menu or not args.project:
|
|
interactive_menu()
|
|
return
|
|
|
|
if not which("docker"):
|
|
print("ERROR: 'docker' not on PATH.", file=sys.stderr)
|
|
sys.exit(2)
|
|
|
|
project_raw = detect_project_name(args.project)
|
|
project_norm = normalize_project_name(project_raw)
|
|
project_lower = project_norm.lower()
|
|
ts = args.timestamp
|
|
out_dir = pathlib.Path(args.output) if args.output else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
|
|
|
try:
|
|
run(["docker", "version"], check=True, capture_output=True)
|
|
except subprocess.CalledProcessError:
|
|
print("ERROR: Docker daemon not reachable.", file=sys.stderr)
|
|
sys.exit(2)
|
|
|
|
# Descubrimiento legacy por label/prefijo (se mantiene para compatibilidad)
|
|
selected = []
|
|
method_used = None
|
|
vols = list_by_label_project(project_norm)
|
|
if vols:
|
|
selected = vols; method_used = f"label:{project_norm}"
|
|
else:
|
|
vols2 = list_by_label_project(project_lower)
|
|
if vols2:
|
|
selected = vols2; method_used = f"label:{project_lower}"
|
|
if not selected:
|
|
by_name = list_by_name_prefix(project_norm)
|
|
if by_name:
|
|
selected = by_name; method_used = f"name-prefix:{project_norm}"
|
|
else:
|
|
by_name2 = list_by_name_prefix(project_lower)
|
|
if by_name2:
|
|
selected = by_name2; method_used = f"name-prefix:{project_lower}"
|
|
|
|
if not selected:
|
|
print(f"No volumes found for project/prefix '{project_raw}'.")
|
|
sys.exit(0)
|
|
|
|
exclude_set = set(args.exclude or [])
|
|
names = [v.get("Name") for v in selected if v.get("Name") not in exclude_set]
|
|
|
|
print(f"Discovery method: {method_used}")
|
|
print(f"Volumes discovered: {len(names)}")
|
|
for n in names:
|
|
print(" -", n)
|
|
|
|
if args.list_only:
|
|
return
|
|
|
|
if not args.dry_run:
|
|
ensure_alpine_image()
|
|
|
|
failures = []
|
|
for vname in names:
|
|
archive = build_archive_name(project_lower, vname, ts)
|
|
print(f"Backing up volume: {vname} -> {archive}")
|
|
rc = backup_volume(vname, out_dir, archive, dry_run=args.dry_run)
|
|
if rc != 0:
|
|
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
|
failures.append(vname)
|
|
|
|
if failures:
|
|
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
|
sys.exit(1)
|
|
else:
|
|
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
|
|
|
# ---------- Entry point ----------
|
|
|
|
if __name__ == "__main__":
|
|
if len(sys.argv) == 1:
|
|
interactive_menu()
|
|
else:
|
|
cli_main()
|