Compare commits
3 Commits
492d844523
...
c42814f963
| Author | SHA1 | Date | |
|---|---|---|---|
| c42814f963 | |||
| 0d1de7f7e2 | |||
| b34433a71e |
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,6 +1,9 @@
|
||||
# Ignorar los directorios de dependencias
|
||||
node_modules/
|
||||
|
||||
# Ignorar los volumenes respaldados
|
||||
docker-volumes*
|
||||
|
||||
# Ignorar las carpetas de bases de datos
|
||||
.db/
|
||||
|
||||
|
||||
633
backup_compose_volumes.py
Normal file
633
backup_compose_volumes.py
Normal file
@ -0,0 +1,633 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import List, Dict, Tuple, Optional, Set
|
||||
|
||||
PROJECT_ROOT = pathlib.Path.cwd()
|
||||
COMPOSE_BASE = PROJECT_ROOT / "compose.yaml"
|
||||
COMPOSE_DEV = PROJECT_ROOT / "compose.dev.yaml"
|
||||
COMPOSE_PROD = PROJECT_ROOT / "compose.prod.yaml"
|
||||
COMPOSE_NPM = PROJECT_ROOT / "compose.npm.yaml"
|
||||
COMPOSE_DBVR = PROJECT_ROOT / "compose.dbeaver.yaml"
|
||||
|
||||
GLOBAL_DEFAULT_PROJECT = "suitecoffee" # proyecto global (NPM/DBeaver)
|
||||
|
||||
# ---------- Shell utils ----------
|
||||
|
||||
def run(cmd: List[str], check=True, capture_output=True, text=True) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(cmd, check=check, capture_output=capture_output, text=text)
|
||||
|
||||
def which(program: str) -> bool:
|
||||
from shutil import which as _which
|
||||
return _which(program) is not None
|
||||
|
||||
# ---------- Docker volume discovery ----------
|
||||
|
||||
def docker_volume_ls_json(filters: List[str]) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Devuelve objetos de 'docker volume ls' (formato json por entrada).
|
||||
Soporta filtros como '--filter label=...'.
|
||||
"""
|
||||
cmd = ["docker", "volume", "ls", "--format", "{{json .}}"]
|
||||
for f in filters:
|
||||
cmd += ["--filter", f]
|
||||
try:
|
||||
cp = run(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
out = []
|
||||
for line in cp.stdout.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
out.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return out
|
||||
|
||||
def docker_volume_ls_names(filters: List[str]) -> List[str]:
|
||||
"""Devuelve solo los nombres (Name) con filtros aplicados."""
|
||||
rows = docker_volume_ls_json(filters)
|
||||
names = []
|
||||
for v in rows:
|
||||
name = v.get("Name")
|
||||
if name:
|
||||
names.append(name)
|
||||
return names
|
||||
|
||||
def list_by_label_project(project: str) -> List[Dict[str, str]]:
|
||||
return docker_volume_ls_json([f"label=com.docker.compose.project={project}"])
|
||||
|
||||
def list_by_name_prefix(prefix: str) -> List[Dict[str, str]]:
|
||||
vols = docker_volume_ls_json([])
|
||||
keep = []
|
||||
for v in vols:
|
||||
name = v.get("Name")
|
||||
if not name:
|
||||
continue
|
||||
if name.startswith(prefix + "_") or name.startswith(prefix + "-") or name == prefix:
|
||||
keep.append(v)
|
||||
return keep
|
||||
|
||||
def normalize_project_name(p: str) -> str:
|
||||
return (p or "").replace(" ", "_")
|
||||
|
||||
# ---------- Compose config parsing ----------
|
||||
|
||||
def compose_config_json(files: List[pathlib.Path]) -> Optional[dict]:
|
||||
if not files or not all(p.exists() for p in files):
|
||||
return None
|
||||
cmd = ["docker", "compose"]
|
||||
for f in files:
|
||||
cmd += ["-f", str(f)]
|
||||
cmd += ["config", "--format", "json"]
|
||||
try:
|
||||
cp = run(cmd)
|
||||
return json.loads(cp.stdout or "{}")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def extract_short_volume_names_from_config(cfg: dict) -> Set[str]:
|
||||
"""
|
||||
Extrae short names de volúmenes usados en services[].volumes (type: volume)
|
||||
y las claves del toplevel 'volumes'.
|
||||
"""
|
||||
names: Set[str] = set()
|
||||
if not cfg:
|
||||
return names
|
||||
|
||||
# services[].volumes
|
||||
services = cfg.get("services") or {}
|
||||
for svc in services.values():
|
||||
vols = svc.get("volumes") or []
|
||||
for m in vols:
|
||||
# en JSON canonical, cada mount es un dict con 'type', 'source', 'target', ...
|
||||
if isinstance(m, dict) and m.get("type") == "volume":
|
||||
src = m.get("source")
|
||||
if isinstance(src, str) and src:
|
||||
names.add(src)
|
||||
|
||||
# top-level volumes (claves)
|
||||
top_vols = cfg.get("volumes") or {}
|
||||
if isinstance(top_vols, dict):
|
||||
for k in top_vols.keys():
|
||||
if isinstance(k, str) and k:
|
||||
names.add(k)
|
||||
|
||||
return names
|
||||
|
||||
def docker_compose_name_from(files: List[pathlib.Path]) -> Optional[str]:
|
||||
cfg = compose_config_json(files)
|
||||
if cfg and isinstance(cfg, dict):
|
||||
name = cfg.get("name")
|
||||
if name:
|
||||
return name
|
||||
return None
|
||||
|
||||
def read_compose_project_from_env(env_path: pathlib.Path) -> Optional[str]:
|
||||
try:
|
||||
if env_path.exists():
|
||||
for line in env_path.read_text(encoding="utf-8").splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
if line.startswith("COMPOSE_PROJECT_NAME="):
|
||||
return line.split("=", 1)[1].strip()
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
def base_folder_slug() -> str:
|
||||
return PROJECT_ROOT.name.lower().replace(" ", "_")
|
||||
|
||||
def candidates_for_env(env: str) -> List[str]:
|
||||
cand: List[str] = []
|
||||
if env == "development":
|
||||
n1 = docker_compose_name_from([COMPOSE_BASE, COMPOSE_DEV])
|
||||
n2 = read_compose_project_from_env(PROJECT_ROOT / ".env.development")
|
||||
n3 = f"{base_folder_slug()}_dev"
|
||||
n4 = f"{base_folder_slug()}-dev"
|
||||
cand.extend([n1, n2, n3, n4, base_folder_slug()])
|
||||
elif env == "production":
|
||||
n1 = docker_compose_name_from([COMPOSE_BASE, COMPOSE_PROD])
|
||||
n2 = read_compose_project_from_env(PROJECT_ROOT / ".env.production")
|
||||
n3 = f"{base_folder_slug()}_prod"
|
||||
n4 = f"{base_folder_slug()}-prod"
|
||||
cand.extend([n1, n2, n3, n4, base_folder_slug()])
|
||||
# dedup preservando orden
|
||||
seen = set(); ordered = []
|
||||
for x in cand:
|
||||
if x and x not in seen:
|
||||
seen.add(x); ordered.append(x)
|
||||
return ordered
|
||||
|
||||
def candidates_for_global() -> List[str]:
|
||||
cand: List[str] = []
|
||||
# nombres desde compose globales
|
||||
if COMPOSE_NPM.exists():
|
||||
n = docker_compose_name_from([COMPOSE_NPM])
|
||||
if n: cand.append(n)
|
||||
if COMPOSE_DBVR.exists():
|
||||
n = docker_compose_name_from([COMPOSE_DBVR])
|
||||
if n and n not in cand: cand.append(n)
|
||||
# fallback esperados
|
||||
if GLOBAL_DEFAULT_PROJECT not in cand: cand.append(GLOBAL_DEFAULT_PROJECT)
|
||||
bf = base_folder_slug()
|
||||
if bf not in cand: cand.append(bf)
|
||||
return cand
|
||||
|
||||
# ---------- Nueva detección por grupo: COMPOSE + labels ----------
|
||||
|
||||
def detect_group_volumes_with_compose(filesets: List[List[pathlib.Path]],
|
||||
project_candidates: List[str]) -> Tuple[Optional[str], str, List[str]]:
|
||||
"""
|
||||
filesets: lista de listas de archivos compose a considerar (dev=[base,dev], prod=[base,prod],
|
||||
global=[[npm], [dbeaver]] -> dos sets para unir shortnames).
|
||||
Devuelve (project_seleccionado, metodo, [nombres_de_volumen]).
|
||||
"""
|
||||
# 1) Unir shortnames de todos los filesets
|
||||
shortnames: Set[str] = set()
|
||||
for files in filesets:
|
||||
cfg = compose_config_json(files)
|
||||
shortnames |= extract_short_volume_names_from_config(cfg)
|
||||
|
||||
# 2) Si hay shortnames, probar a buscar por (project,label.volume)
|
||||
if shortnames:
|
||||
for proj in project_candidates:
|
||||
# Buscar cada shortname con ambos labels
|
||||
found: List[str] = []
|
||||
for sn in sorted(shortnames):
|
||||
names = docker_volume_ls_names([
|
||||
f"label=com.docker.compose.project={proj}",
|
||||
f"label=com.docker.compose.volume={sn}"
|
||||
])
|
||||
if names:
|
||||
found.extend(names)
|
||||
# dedup preservando orden
|
||||
if found:
|
||||
seen = set(); ordered = []
|
||||
for n in found:
|
||||
if n not in seen:
|
||||
seen.add(n); ordered.append(n)
|
||||
return proj, f"compose+labels:{proj}", ordered
|
||||
|
||||
# 3) Fallback: probar cualquier volumen del proyecto (label) o por prefijo
|
||||
for proj in project_candidates:
|
||||
method, rows = discover_volumes_for_project(proj)
|
||||
if rows:
|
||||
return proj, f"fallback:{method}", [r.get("Name") for r in rows if r.get("Name")]
|
||||
|
||||
# 4) Nada
|
||||
first = project_candidates[0] if project_candidates else None
|
||||
return first, "none", []
|
||||
|
||||
def discover_volumes_for_project(project_raw: str) -> Tuple[str, List[Dict[str, str]]]:
|
||||
"""
|
||||
Método previo de respaldo: por label de proyecto y prefijo (para CLI y fallback).
|
||||
"""
|
||||
project_norm = normalize_project_name(project_raw)
|
||||
project_lower = project_norm.lower()
|
||||
|
||||
vols = list_by_label_project(project_norm)
|
||||
if vols:
|
||||
return f"label:{project_norm}", vols
|
||||
|
||||
vols2 = list_by_label_project(project_lower)
|
||||
if vols2:
|
||||
return f"label:{project_lower}", vols2
|
||||
|
||||
by_name = list_by_name_prefix(project_norm)
|
||||
if by_name:
|
||||
return f"name-prefix:{project_norm}", by_name
|
||||
|
||||
by_name2 = list_by_name_prefix(project_lower)
|
||||
if by_name2:
|
||||
return f"name-prefix:{project_lower}", by_name2
|
||||
|
||||
return "none", []
|
||||
|
||||
# ---------- Backup helpers ----------
|
||||
|
||||
def ensure_alpine_image():
|
||||
try:
|
||||
run(["docker", "image", "inspect", "alpine:latest"])
|
||||
except subprocess.CalledProcessError:
|
||||
print("Pulling alpine:latest ...")
|
||||
run(["docker", "pull", "alpine:latest"], check=True, capture_output=False)
|
||||
|
||||
def build_archive_name(project: str, volume_name: str, ts: str) -> str:
|
||||
"""
|
||||
Construye el nombre del .tar.gz evitando duplicar el prefijo del proyecto.
|
||||
- Si volume_name ya empieza con '<project>_' o '<project>-', se usa tal cual.
|
||||
- Si no, se antepone '<project>_'.
|
||||
Resultado: <project>_<shortname>-<ts>.tar.gz
|
||||
"""
|
||||
proj_token = project.lower().replace(" ", "_")
|
||||
v_lower = volume_name.lower()
|
||||
if v_lower.startswith(proj_token + "_") or v_lower.startswith(proj_token + "-"):
|
||||
base = volume_name
|
||||
else:
|
||||
base = f"{proj_token}_{volume_name}"
|
||||
return f"{base}-{ts}.tar.gz"
|
||||
|
||||
def backup_volume(volume_name: str, out_dir: pathlib.Path, archive_name: str, dry_run: bool = False) -> int:
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
out_dir_abs = out_dir.resolve()
|
||||
out_path = out_dir_abs / archive_name
|
||||
docker_cmd = [
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{volume_name}:/volume:ro",
|
||||
"-v", f"{str(out_dir_abs)}:/backup",
|
||||
# "--user", f"{os.getuid()}:{os.getgid()}",
|
||||
"alpine:latest",
|
||||
"sh", "-lc",
|
||||
f"tar czf /backup/{shlex.quote(out_path.name)} -C /volume ."
|
||||
]
|
||||
if dry_run:
|
||||
print("[DRY RUN] Would run:", " ".join(shlex.quote(c) for c in docker_cmd))
|
||||
return 0
|
||||
cp = subprocess.run(docker_cmd)
|
||||
return cp.returncode
|
||||
|
||||
def backup_explicit(volume_names: List[str], ts: str, output_dir: Optional[str], dry_run: bool, prefix_project: Optional[str]) -> int:
|
||||
"""
|
||||
Respalda exactamente los volúmenes indicados.
|
||||
- Directorio por defecto: ./docker-volumes-<ts>
|
||||
- Nombre de archivo: build_archive_name(prefix_project, volume_name, ts)
|
||||
"""
|
||||
out_dir = pathlib.Path(output_dir) if output_dir else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
||||
if not dry_run:
|
||||
ensure_alpine_image()
|
||||
|
||||
failures = []
|
||||
for vname in volume_names:
|
||||
if not vname:
|
||||
continue
|
||||
archive = build_archive_name(prefix_project or "", vname, ts)
|
||||
print(f"Backing up volume: {vname} -> {archive}")
|
||||
rc = backup_volume(vname, out_dir, archive, dry_run=dry_run)
|
||||
if rc != 0:
|
||||
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
||||
failures.append(vname)
|
||||
if failures:
|
||||
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
||||
return 1
|
||||
else:
|
||||
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
||||
return 0
|
||||
|
||||
def backup_group(project_name: str, ts: str, output_dir: Optional[str] = None,
|
||||
dry_run: bool = False, excludes: Optional[List[str]] = None) -> int:
|
||||
"""
|
||||
Fallback legacy (label/prefix). Mantiene coherencia con nombres y directorio por defecto.
|
||||
"""
|
||||
method, rows = discover_volumes_for_project(project_name)
|
||||
|
||||
print_header(f"Proyecto '{project_name}': {len(rows)} volumen(es) detectado(s) (método: {method})")
|
||||
for v in rows:
|
||||
print(" -", v.get("Name"))
|
||||
|
||||
if not rows:
|
||||
warn("No hay volúmenes para respaldar.")
|
||||
return 0
|
||||
|
||||
vols = [v.get("Name") for v in rows if v.get("Name")]
|
||||
if excludes:
|
||||
excl = set(excludes)
|
||||
vols = [n for n in vols if n not in excl]
|
||||
if not vols:
|
||||
warn("Tras aplicar exclusiones, no quedó nada por respaldar.")
|
||||
return 0
|
||||
|
||||
out_dir = pathlib.Path(output_dir) if output_dir else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
||||
if not dry_run:
|
||||
ensure_alpine_image()
|
||||
|
||||
failures = []
|
||||
for vname in vols:
|
||||
archive = build_archive_name(project_name, vname, ts)
|
||||
print(f"Backing up volume: {vname} -> {archive}")
|
||||
rc = backup_volume(vname, out_dir, archive, dry_run=dry_run)
|
||||
if rc != 0:
|
||||
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
||||
failures.append(vname)
|
||||
|
||||
if failures:
|
||||
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
||||
return 1
|
||||
else:
|
||||
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
||||
return 0
|
||||
|
||||
# ---------- UI helpers ----------
|
||||
|
||||
def yes_no(prompt: str, default="n") -> bool:
|
||||
default = default.lower()
|
||||
hint = "[Y/n]" if default == "y" else "[y/N]"
|
||||
while True:
|
||||
resp = input(f"{prompt} {hint} ").strip().lower()
|
||||
if not resp:
|
||||
return default == "y"
|
||||
if resp in ("y","yes","s","si","sí"):
|
||||
return True
|
||||
if resp in ("n","no"):
|
||||
return False
|
||||
print("Respuesta no reconocida. Por favor, responde con 'y' o 'n'.")
|
||||
|
||||
def print_header(title: str):
|
||||
print("\n" + "=" * 60)
|
||||
print(title)
|
||||
print("=" * 60 + "\n")
|
||||
|
||||
def info(msg): print(f"• {msg}")
|
||||
def ok(msg): print(f"✓ {msg}")
|
||||
def warn(msg): print(f"! {msg}")
|
||||
def fail(msg):
|
||||
print(f"✗ {msg}")
|
||||
sys.exit(1)
|
||||
|
||||
# ---------- Menú interactivo ----------
|
||||
|
||||
def interactive_menu():
|
||||
if not which("docker"):
|
||||
fail("ERROR: 'docker' no está en el PATH.")
|
||||
try:
|
||||
run(["docker", "version"], check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError:
|
||||
fail("ERROR: No se puede hablar con el daemon de Docker. ¿Está corriendo? ¿Tu usuario está en el grupo 'docker'?")
|
||||
|
||||
# DEV
|
||||
dev_candidates = candidates_for_env("development") if COMPOSE_BASE.exists() and COMPOSE_DEV.exists() else []
|
||||
dev_proj, dev_method, dev_names = detect_group_volumes_with_compose(
|
||||
filesets=[[COMPOSE_BASE, COMPOSE_DEV]] if dev_candidates else [],
|
||||
project_candidates=dev_candidates
|
||||
)
|
||||
|
||||
# PROD
|
||||
prod_candidates = candidates_for_env("production") if COMPOSE_BASE.exists() and COMPOSE_PROD.exists() else []
|
||||
prod_proj, prod_method, prod_names = detect_group_volumes_with_compose(
|
||||
filesets=[[COMPOSE_BASE, COMPOSE_PROD]] if prod_candidates else [],
|
||||
project_candidates=prod_candidates
|
||||
)
|
||||
|
||||
# GLOBAL = NPM + DBEAVER (unir shortnames de ambos)
|
||||
global_candidates = candidates_for_global()
|
||||
global_filesets = []
|
||||
if COMPOSE_NPM.exists():
|
||||
global_filesets.append([COMPOSE_NPM])
|
||||
if COMPOSE_DBVR.exists():
|
||||
global_filesets.append([COMPOSE_DBVR])
|
||||
glob_proj, glob_method, glob_names = detect_group_volumes_with_compose(
|
||||
filesets=global_filesets,
|
||||
project_candidates=global_candidates
|
||||
)
|
||||
|
||||
# Resumen
|
||||
print_header("Resumen de volúmenes detectados")
|
||||
if dev_proj:
|
||||
info(f"DESARROLLO ({dev_proj}): {len(dev_names)} volumen(es) (método: {dev_method})")
|
||||
else:
|
||||
info("DESARROLLO: archivos compose no encontrados.")
|
||||
if prod_proj:
|
||||
info(f"PRODUCCIÓN ({prod_proj}): {len(prod_names)} volumen(es) (método: {prod_method})")
|
||||
else:
|
||||
info("PRODUCCIÓN: archivos compose no encontrados.")
|
||||
if glob_proj:
|
||||
info(f"GLOBALES ({glob_proj}): {len(glob_names)} volumen(es) (método: {glob_method})")
|
||||
else:
|
||||
info("GLOBALES: no se detectaron archivos compose globales.")
|
||||
print()
|
||||
|
||||
# Menú
|
||||
options = {}
|
||||
key = 1
|
||||
if dev_proj:
|
||||
print(f" {key}) Respaldar volúmenes de DESARROLLO ({dev_proj})")
|
||||
options[str(key)] = ("backup_explicit", dev_proj, dev_names); key += 1
|
||||
if prod_proj:
|
||||
print(f" {key}) Respaldar volúmenes de PRODUCCIÓN ({prod_proj})")
|
||||
options[str(key)] = ("backup_explicit", prod_proj, prod_names); key += 1
|
||||
if glob_proj:
|
||||
print(f" {key}) Respaldar volúmenes GLOBALES ({glob_proj})")
|
||||
options[str(key)] = ("backup_explicit", glob_proj, glob_names); key += 1
|
||||
|
||||
# TODOS: unión deduplicada por nombre (respalda 1 vez cada volumen)
|
||||
groups = []
|
||||
if dev_proj: groups.append( (dev_proj, dev_names) )
|
||||
if prod_proj: groups.append( (prod_proj, prod_names) )
|
||||
if glob_proj: groups.append( (glob_proj, glob_names) )
|
||||
|
||||
if len(groups) >= 2:
|
||||
print(f" {key}) Respaldar TODOS los grupos detectados")
|
||||
options[str(key)] = ("backup_all_explicit", groups); key += 1
|
||||
|
||||
print(f" {key}) Salir")
|
||||
exit_key = str(key)
|
||||
|
||||
ts = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
while True:
|
||||
choice = input("> ").strip()
|
||||
if choice == exit_key:
|
||||
ok("Saliendo.")
|
||||
sys.exit(0)
|
||||
|
||||
if choice not in options:
|
||||
print("Opción inválida.")
|
||||
continue
|
||||
|
||||
action = options[choice][0]
|
||||
dry = yes_no("¿Dry-run (no escribir archivos)?", default="n")
|
||||
outd = input(f"Directorio de salida (vacío = ./docker-volumes-{ts}): ").strip() or None
|
||||
excl_input = input("Excluir volúmenes (nombres separados por coma, vacío = ninguno): ").strip()
|
||||
excludes = set(e.strip() for e in excl_input.split(",") if e.strip()) if excl_input else set()
|
||||
|
||||
if action == "backup_explicit":
|
||||
_, proj, names = options[choice]
|
||||
names = [n for n in names if n not in excludes]
|
||||
if not names:
|
||||
warn("No hay volúmenes para respaldar.")
|
||||
sys.exit(0)
|
||||
rc = backup_explicit(names, ts, output_dir=outd, dry_run=dry, prefix_project=proj)
|
||||
sys.exit(rc)
|
||||
|
||||
elif action == "backup_all_explicit":
|
||||
_, groups_payload = options[choice]
|
||||
vol_to_proj: Dict[str, str] = {}
|
||||
for proj, names in groups_payload:
|
||||
for n in names:
|
||||
if n not in excludes and n not in vol_to_proj:
|
||||
vol_to_proj[n] = proj
|
||||
if not vol_to_proj:
|
||||
warn("No hay volúmenes para respaldar.")
|
||||
sys.exit(0)
|
||||
if not dry:
|
||||
ensure_alpine_image()
|
||||
out_dir = pathlib.Path(outd) if outd else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
||||
failures = []
|
||||
for vname, proj in vol_to_proj.items():
|
||||
archive = build_archive_name(proj, vname, ts)
|
||||
print(f"Backing up volume: {vname} -> {archive}")
|
||||
rc = backup_volume(vname, out_dir, archive, dry_run=dry)
|
||||
if rc != 0:
|
||||
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
||||
failures.append(vname)
|
||||
if failures:
|
||||
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
||||
sys.exit(0)
|
||||
|
||||
# ---------- CLI legacy (se mantiene) ----------
|
||||
|
||||
def detect_project_name(args_project: Optional[str]) -> str:
|
||||
if args_project:
|
||||
return args_project
|
||||
env_name = os.environ.get("COMPOSE_PROJECT_NAME")
|
||||
if env_name:
|
||||
return env_name
|
||||
return PROJECT_ROOT.name.replace(" ", "_")
|
||||
|
||||
def cli_main():
|
||||
parser = argparse.ArgumentParser(description="Export (compress) every Docker volume of a Docker Compose project.")
|
||||
parser.add_argument("-p", "--project", help="Compose project or prefix (see --discovery).")
|
||||
parser.add_argument("-o", "--output", help="Output directory (default: ./docker-volumes-<timestamp>).")
|
||||
parser.add_argument("--exclude", nargs="*", default=[], help="Volume names to exclude (space-separated).")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show what would be done without doing it.")
|
||||
parser.add_argument("--timestamp", default=datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
|
||||
help="Timestamp to embed into filenames (default: current time).")
|
||||
parser.add_argument("--discovery", choices=["auto","label","name"], default="auto",
|
||||
help="How to discover volumes: 'label' (strict), 'name' (prefix), or 'auto' (default).")
|
||||
parser.add_argument("--list-only", action="store_true", help="Only list volumes that would be backed up and exit.")
|
||||
parser.add_argument("--menu", action="store_true", help="Launch interactive menu instead of CLI behavior.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.menu or not args.project:
|
||||
interactive_menu()
|
||||
return
|
||||
|
||||
if not which("docker"):
|
||||
print("ERROR: 'docker' not on PATH.", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
project_raw = detect_project_name(args.project)
|
||||
project_norm = normalize_project_name(project_raw)
|
||||
project_lower = project_norm.lower()
|
||||
ts = args.timestamp
|
||||
out_dir = pathlib.Path(args.output) if args.output else (PROJECT_ROOT / f"docker-volumes-{ts}")
|
||||
|
||||
try:
|
||||
run(["docker", "version"], check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError:
|
||||
print("ERROR: Docker daemon not reachable.", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Descubrimiento legacy por label/prefijo (se mantiene para compatibilidad)
|
||||
selected = []
|
||||
method_used = None
|
||||
vols = list_by_label_project(project_norm)
|
||||
if vols:
|
||||
selected = vols; method_used = f"label:{project_norm}"
|
||||
else:
|
||||
vols2 = list_by_label_project(project_lower)
|
||||
if vols2:
|
||||
selected = vols2; method_used = f"label:{project_lower}"
|
||||
if not selected:
|
||||
by_name = list_by_name_prefix(project_norm)
|
||||
if by_name:
|
||||
selected = by_name; method_used = f"name-prefix:{project_norm}"
|
||||
else:
|
||||
by_name2 = list_by_name_prefix(project_lower)
|
||||
if by_name2:
|
||||
selected = by_name2; method_used = f"name-prefix:{project_lower}"
|
||||
|
||||
if not selected:
|
||||
print(f"No volumes found for project/prefix '{project_raw}'.")
|
||||
sys.exit(0)
|
||||
|
||||
exclude_set = set(args.exclude or [])
|
||||
names = [v.get("Name") for v in selected if v.get("Name") not in exclude_set]
|
||||
|
||||
print(f"Discovery method: {method_used}")
|
||||
print(f"Volumes discovered: {len(names)}")
|
||||
for n in names:
|
||||
print(" -", n)
|
||||
|
||||
if args.list_only:
|
||||
return
|
||||
|
||||
if not args.dry_run:
|
||||
ensure_alpine_image()
|
||||
|
||||
failures = []
|
||||
for vname in names:
|
||||
archive = build_archive_name(project_lower, vname, ts)
|
||||
print(f"Backing up volume: {vname} -> {archive}")
|
||||
rc = backup_volume(vname, out_dir, archive, dry_run=args.dry_run)
|
||||
if rc != 0:
|
||||
print(f" ERROR: backup failed for volume '{vname}' (exit code {rc})", file=sys.stderr)
|
||||
failures.append(vname)
|
||||
|
||||
if failures:
|
||||
print("\nCompleted with errors. Failed volumes:", ", ".join(failures))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\nAll done. Archives written to:", str(out_dir.resolve()))
|
||||
|
||||
# ---------- Entry point ----------
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 1:
|
||||
interactive_menu()
|
||||
else:
|
||||
cli_main()
|
||||
47
compose.dbeaver.yaml
Normal file
47
compose.dbeaver.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
# compose.dbeaver.yaml
|
||||
|
||||
name: suitecoffee
|
||||
|
||||
services:
|
||||
dbeaver:
|
||||
image: dbeaver/cloudbeaver:latest
|
||||
ports:
|
||||
- 8978:8978
|
||||
environment:
|
||||
TZ: America/Montevideo
|
||||
volumes:
|
||||
- dbeaver_logs:/opt/cloudbeaver/logs
|
||||
- dbeaver_workspace:/opt/cloudbeaver/workspace
|
||||
networks:
|
||||
suitecoffee_prod_net:
|
||||
aliases:
|
||||
- prod-auth
|
||||
- prod-app
|
||||
- prod-db
|
||||
- prod-tenants
|
||||
suitecoffee_dev_net:
|
||||
aliases:
|
||||
- dev-auth
|
||||
- dev-app
|
||||
- dev-db
|
||||
- dev-tenants
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:81 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
# networks:
|
||||
# - suitecoffee_dev_net
|
||||
# - suitecoffee_prod_net
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
suitecoffee_dev_net:
|
||||
external: true
|
||||
suitecoffee_prod_net:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
dbeaver_logs:
|
||||
dbeaver_workspace:
|
||||
110
compose.dev.yaml
Normal file
110
compose.dev.yaml
Normal file
@ -0,0 +1,110 @@
|
||||
# docker-compose.overrride.yml
|
||||
# Docker Comose para entorno de desarrollo o development.
|
||||
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
# depends_on:
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
# tenants:
|
||||
# condition: service_healthy
|
||||
image: node:20-bookworm
|
||||
expose:
|
||||
- ${APP_LOCAL_PORT}
|
||||
working_dir: /app
|
||||
user: "${UID:-1000}:${GID:-1000}"
|
||||
volumes:
|
||||
- ./services/app:/app:rw
|
||||
- ./services/app/node_modules:/app/node_modules
|
||||
env_file:
|
||||
- ./services/app/.env.development
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
# healthcheck:
|
||||
# # IMPORTANTE: asegurate de tener curl instalado en la imagen de app (ver nota abajo)
|
||||
# test: ["CMD-SHELL", "curl -fsS http://localhost:${APP_DOCKER_PORT}/health || exit 1"]
|
||||
# interval: 10s
|
||||
# timeout: 3s
|
||||
# retries: 10
|
||||
# start_period: 20s
|
||||
# restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: [dev-app]
|
||||
command: npm run dev
|
||||
|
||||
auth:
|
||||
image: node:20-bookworm
|
||||
# depends_on:
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
expose:
|
||||
- ${AUTH_LOCAL_PORT}
|
||||
working_dir: /app
|
||||
user: "${UID:-1000}:${GID:-1000}"
|
||||
volumes:
|
||||
- ./services/auth:/app:rw
|
||||
- ./services/auth/node_modules:/app/node_modules
|
||||
env_file:
|
||||
- ./services/auth/.env.development
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run dev
|
||||
# restart: unless-stopped
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "curl -fsS http://localhost:${AUTH_DOCKER_PORT}/health || exit 1"]
|
||||
# interval: 10s
|
||||
# timeout: 3s
|
||||
# retries: 10
|
||||
# start_period: 15s
|
||||
networks:
|
||||
net:
|
||||
aliases: [dev-auth]
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${DB_NAME}
|
||||
POSTGRES_USER: ${DB_USER}
|
||||
POSTGRES_PASSWORD: ${DB_PASS}
|
||||
volumes:
|
||||
- suitecoffee-db:/var/lib/postgresql/data
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
|
||||
# interval: 5s
|
||||
# timeout: 3s
|
||||
# retries: 20
|
||||
# start_period: 10s
|
||||
networks:
|
||||
net:
|
||||
aliases: [dev-db]
|
||||
# restart: unless-stopped
|
||||
|
||||
tenants:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${TENANTS_DB_NAME}
|
||||
POSTGRES_USER: ${TENANTS_DB_USER}
|
||||
POSTGRES_PASSWORD: ${TENANTS_DB_PASS}
|
||||
volumes:
|
||||
- tenants-db:/var/lib/postgresql/data
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "pg_isready -U ${TENANTS_DB_USER} -d ${TENANTS_DB_NAME}"]
|
||||
# interval: 5s
|
||||
# timeout: 3s
|
||||
# retries: 20
|
||||
# start_period: 10s
|
||||
networks:
|
||||
net:
|
||||
aliases: [dev-tenants]
|
||||
# restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
tenants-db:
|
||||
suitecoffee-db:
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: bridge
|
||||
46
compose.npm.yaml
Normal file
46
compose.npm.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
# compose.npm.yaml
|
||||
|
||||
name: suitecoffee
|
||||
|
||||
services:
|
||||
|
||||
npm:
|
||||
image: jc21/nginx-proxy-manager:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80" # HTTP público
|
||||
- "81:81" # UI de administración
|
||||
environment:
|
||||
TZ: America/Montevideo
|
||||
volumes:
|
||||
- npm_data:/data
|
||||
- npm_letsencrypt:/etc/letsencrypt
|
||||
networks:
|
||||
suitecoffee_prod_net:
|
||||
aliases:
|
||||
- prod-auth
|
||||
- prod-app
|
||||
- prod-db
|
||||
- prod-tenants
|
||||
suitecoffee_dev_net:
|
||||
aliases:
|
||||
- dev-auth
|
||||
- dev-app
|
||||
- dev-db
|
||||
- dev-tenants
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:81 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
|
||||
networks:
|
||||
suitecoffee_dev_net:
|
||||
external: true
|
||||
suitecoffee_prod_net:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
npm_data:
|
||||
npm_letsencrypt:
|
||||
108
compose.prod.yaml
Normal file
108
compose.prod.yaml
Normal file
@ -0,0 +1,108 @@
|
||||
# compose.prod.yml
|
||||
# Docker Comose para entorno de producción o production.
|
||||
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
# depends_on:
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
# tenants:
|
||||
# condition: service_healthy
|
||||
build:
|
||||
context: ./services/app
|
||||
dockerfile: Dockerfile.production
|
||||
expose:
|
||||
- ${APP_LOCAL_PORT}
|
||||
volumes:
|
||||
- ./services/app:/app
|
||||
env_file:
|
||||
- ./services/app/.env.production
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
# healthcheck:
|
||||
# # IMPORTANTE: asegurate de tener curl instalado en la imagen de app (ver nota abajo)
|
||||
# test: ["CMD-SHELL", "curl -fsS http://localhost:${APP_DOCKER_PORT}/health || exit 1"]
|
||||
# interval: 10s
|
||||
# timeout: 3s
|
||||
# retries: 10
|
||||
# start_period: 20s
|
||||
# restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: [prod-app]
|
||||
command: npm run start
|
||||
|
||||
auth:
|
||||
# depends_on:
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
build:
|
||||
context: ./services/auth
|
||||
dockerfile: Dockerfile.production
|
||||
expose:
|
||||
- ${AUTH_LOCAL_PORT}
|
||||
volumes:
|
||||
- ./services/auth:/app
|
||||
env_file:
|
||||
- ./services/auth/.env.production
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run start
|
||||
# restart: unless-stopped
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "curl -fsS http://localhost:${AUTH_DOCKER_PORT}/health || exit 1"]
|
||||
# interval: 10s
|
||||
# timeout: 3s
|
||||
# retries: 10
|
||||
# start_period: 15s
|
||||
networks:
|
||||
net:
|
||||
aliases: [prod-auth]
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${DB_NAME}
|
||||
POSTGRES_USER: ${DB_USER}
|
||||
POSTGRES_PASSWORD: ${DB_PASS}
|
||||
volumes:
|
||||
- suitecoffee-db:/var/lib/postgresql/data
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
|
||||
# interval: 5s
|
||||
# timeout: 3s
|
||||
# retries: 20
|
||||
# start_period: 10s
|
||||
networks:
|
||||
net:
|
||||
aliases: [prod-db]
|
||||
# restart: unless-stopped
|
||||
|
||||
tenants:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${TENANTS_DB_NAME}
|
||||
POSTGRES_USER: ${TENANTS_DB_USER}
|
||||
POSTGRES_PASSWORD: ${TENANTS_DB_PASS}
|
||||
volumes:
|
||||
- tenants-db:/var/lib/postgresql/data
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "pg_isready -U ${TENANTS_DB_USER} -d ${TENANTS_DB_NAME}"]
|
||||
# interval: 5s
|
||||
# timeout: 3s
|
||||
# retries: 20
|
||||
# start_period: 10s
|
||||
networks:
|
||||
net:
|
||||
aliases: [prod-tenants]
|
||||
# restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
tenants-db:
|
||||
suitecoffee-db:
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: bridge
|
||||
108
compose.yaml
Normal file
108
compose.yaml
Normal file
@ -0,0 +1,108 @@
|
||||
# compose.yml
|
||||
# Comose base
|
||||
name: ${COMPOSE_PROJECT_NAME:-suitecoffee}
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
tenants:
|
||||
condition: service_healthy
|
||||
# build:
|
||||
# context: ./services/app
|
||||
# dockerfile: Dockerfile.production
|
||||
# expose:
|
||||
# - ${APP_LOCAL_PORT}
|
||||
# volumes:
|
||||
# - ./services/app:/app
|
||||
# env_file:
|
||||
# - ./services/app/.env.production
|
||||
# environment:
|
||||
# - NODE_ENV=${NODE_ENV}
|
||||
# command: npm run start
|
||||
healthcheck:
|
||||
# IMPORTANTE: asegurate de tener curl instalado en la imagen de app (ver nota abajo)
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${APP_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
# networks:
|
||||
# net:
|
||||
# aliases: [prod-app]
|
||||
restart: unless-stopped
|
||||
|
||||
auth:
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
# build:
|
||||
# context: ./services/auth
|
||||
# dockerfile: Dockerfile.production
|
||||
# expose:
|
||||
# - ${AUTH_LOCAL_PORT}
|
||||
# volumes:
|
||||
# - ./services/auth:/app
|
||||
# env_file:
|
||||
# - ./services/auth/.env.production
|
||||
# environment:
|
||||
# - NODE_ENV=${NODE_ENV}
|
||||
# command: npm run start
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${AUTH_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
# networks:
|
||||
# net:
|
||||
# aliases: [prod-auth]
|
||||
restart: unless-stopped
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
# environment:
|
||||
# POSTGRES_DB: ${DB_NAME}
|
||||
# POSTGRES_USER: ${DB_USER}
|
||||
# POSTGRES_PASSWORD: ${DB_PASS}
|
||||
# volumes:
|
||||
# - suitecoffee-db:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
# networks:
|
||||
# net:
|
||||
# aliases: [prod-db]
|
||||
restart: unless-stopped
|
||||
|
||||
tenants:
|
||||
image: postgres:16
|
||||
# environment:
|
||||
# POSTGRES_DB: ${TENANTS_DB_NAME}
|
||||
# POSTGRES_USER: ${TENANTS_DB_USER}
|
||||
# POSTGRES_PASSWORD: ${TENANTS_DB_PASS}
|
||||
# volumes:
|
||||
# - tenants-db:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${TENANTS_DB_USER} -d ${TENANTS_DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
# networks:
|
||||
# net:
|
||||
# aliases: [prod-tenants]
|
||||
restart: unless-stopped
|
||||
|
||||
# volumes:
|
||||
# tenants-db:
|
||||
# suitecoffee-db:
|
||||
|
||||
# networks:
|
||||
# net:
|
||||
# driver: bridge
|
||||
@ -1,148 +0,0 @@
|
||||
# docker-compose.overrride.yml
|
||||
# Docker Comose para entorno de desarrollo o development.
|
||||
|
||||
|
||||
services:
|
||||
npm:
|
||||
image: jc21/nginx-proxy-manager:latest
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
app:
|
||||
condition: service_healthy
|
||||
auth:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "80:80" # HTTP público
|
||||
- "81:81" # UI de administración NPM
|
||||
- "443:443" # HTTPS público
|
||||
volumes:
|
||||
- npm_data:/data # config + DB (SQLite)
|
||||
- npm_letsencrypt:/etc/letsencrypt
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
app:
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
tenants:
|
||||
condition: service_healthy
|
||||
image: node:20-bookworm
|
||||
ports:
|
||||
- 3000:3000
|
||||
working_dir: /app
|
||||
user: "${UID:-1000}:${GID:-1000}"
|
||||
volumes:
|
||||
- ./services/app:/app:rw
|
||||
- ./services/app/node_modules:/app/node_modules
|
||||
env_file:
|
||||
- ./services/app/.env.development
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run dev
|
||||
healthcheck:
|
||||
# IMPORTANTE: asegurate de tener curl instalado en la imagen de app (ver nota abajo)
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${APP_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
auth:
|
||||
image: node:20-bookworm
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- 4000:4000
|
||||
working_dir: /app
|
||||
user: "${UID:-1000}:${GID:-1000}"
|
||||
volumes:
|
||||
- ./services/auth:/app:rw
|
||||
- ./services/auth/node_modules:/app/node_modules
|
||||
env_file:
|
||||
- ./services/auth/.env.development
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run dev
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${AUTH_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${DB_NAME}
|
||||
POSTGRES_USER: ${DB_USER}
|
||||
POSTGRES_PASSWORD: ${DB_PASS}
|
||||
ports:
|
||||
- ${DB_LOCAL_PORT}:${DB_DOCKER_PORT}
|
||||
volumes:
|
||||
- suitecoffee-db:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
tenants:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${TENANTS_DB_NAME}
|
||||
POSTGRES_USER: ${TENANTS_DB_USER}
|
||||
POSTGRES_PASSWORD: ${TENANTS_DB_PASS}
|
||||
volumes:
|
||||
- tenants-db:/var/lib/postgresql/data
|
||||
ports:
|
||||
- ${TENANTS_DB_LOCAL_PORT}:${TENANTS_DB_DOCKER_PORT}
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${TENANTS_DB_USER} -d ${TENANTS_DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
dbeaver:
|
||||
image: dbeaver/cloudbeaver:latest
|
||||
# depends_on:
|
||||
# tenants:
|
||||
# condition: service_healthy
|
||||
# db:
|
||||
# condition: service_healthy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8978:8978
|
||||
volumes:
|
||||
- dbeaver_logs:/opt/cloudbeaver/logs
|
||||
- dbeaver_workspace:/opt/cloudbeaver/workspace
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
volumes:
|
||||
tenants-db:
|
||||
suitecoffee-db:
|
||||
|
||||
npm_data:
|
||||
npm_letsencrypt:
|
||||
dbeaver_logs:
|
||||
dbeaver_workspace:
|
||||
|
||||
networks:
|
||||
suitecoffee-net:
|
||||
driver: bridge
|
||||
@ -1,138 +0,0 @@
|
||||
# docker-compose.yml
|
||||
# Docker Comose para entorno de producción o production.
|
||||
name: ${COMPOSE_PROJECT_NAME:-suitecoffee}
|
||||
|
||||
services:
|
||||
npm:
|
||||
image: jc21/nginx-proxy-manager:latest
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
app:
|
||||
condition: service_healthy
|
||||
auth:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "80:80" # HTTP público
|
||||
- "81:81" # UI de administración NPM
|
||||
- "443:443" # HTTPS público
|
||||
volumes:
|
||||
- npm_data:/data # config + DB (SQLite)
|
||||
- npm_letsencrypt:/etc/letsencrypt
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
app:
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
tenants:
|
||||
condition: service_healthy
|
||||
build:
|
||||
context: ./services/app
|
||||
dockerfile: Dockerfile.production
|
||||
volumes:
|
||||
- ./services/app:/app
|
||||
env_file:
|
||||
- ./services/app/.env.production
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run start
|
||||
healthcheck:
|
||||
# IMPORTANTE: asegurate de tener curl instalado en la imagen de app (ver nota abajo)
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${APP_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
auth:
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
build:
|
||||
context: ./services/auth
|
||||
dockerfile: Dockerfile.production
|
||||
volumes:
|
||||
- ./services/auth:/app
|
||||
env_file:
|
||||
- ./services/auth/.env.production
|
||||
environment:
|
||||
- NODE_ENV=${NODE_ENV}
|
||||
command: npm run start
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -fsS http://localhost:${AUTH_DOCKER_PORT}/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${DB_NAME}
|
||||
POSTGRES_USER: ${DB_USER}
|
||||
POSTGRES_PASSWORD: ${DB_PASS}
|
||||
volumes:
|
||||
- suitecoffee-db:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
tenants:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: ${TENANTS_DB_NAME}
|
||||
POSTGRES_USER: ${TENANTS_DB_USER}
|
||||
POSTGRES_PASSWORD: ${TENANTS_DB_PASS}
|
||||
volumes:
|
||||
- tenants-db:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${TENANTS_DB_USER} -d ${TENANTS_DB_NAME}"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 20
|
||||
start_period: 10s
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
dbeaver:
|
||||
image: dbeaver/cloudbeaver:latest
|
||||
depends_on:
|
||||
tenants:
|
||||
condition: service_healthy
|
||||
db:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8978:8978"
|
||||
volumes:
|
||||
- dbeaver_logs:/opt/cloudbeaver/logs
|
||||
- dbeaver_workspace:/opt/cloudbeaver/workspace
|
||||
networks:
|
||||
- suitecoffee-net
|
||||
|
||||
volumes:
|
||||
tenants-db:
|
||||
suitecoffee-db:
|
||||
|
||||
npm_data:
|
||||
npm_letsencrypt:
|
||||
dbeaver_logs:
|
||||
dbeaver_workspace:
|
||||
|
||||
networks:
|
||||
suitecoffee-net:
|
||||
driver: bridge
|
||||
466
restore_compose_volumes.py
Normal file
466
restore_compose_volumes.py
Normal file
@ -0,0 +1,466 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
restore_compose_volumes.py
|
||||
--------------------------
|
||||
Restaura volúmenes desde backups generados por backup_compose_volumes.py.
|
||||
|
||||
- Busca carpetas ./docker-volumes-<timestamp>
|
||||
- Lee .tar.gz (nombres: <volume_name>-<YYYYMMDD-HHMMSS>.tar.gz)
|
||||
- Dos modos:
|
||||
1) Tradicional (sin labels)
|
||||
2) Reconocido por Compose (aplica labels com.docker.compose.* para evitar el warning)
|
||||
|
||||
Además:
|
||||
- Si un volumen existe y está en uso, ofrece detener y eliminar contenedores que lo usan
|
||||
para poder recrearlo con labels correctos (solo en modo 2).
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import shlex
|
||||
import pathlib
|
||||
import subprocess
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
PROJECT_ROOT = pathlib.Path.cwd()
|
||||
BACKUP_DIR_PATTERN = re.compile(r"^docker-volumes-\d{8}-\d{6}$")
|
||||
ARCHIVE_PATTERN = re.compile(r"^(?P<basename>.+)-(?P<ts>\d{8}-\d{6})\.tar\.gz$")
|
||||
|
||||
# ---------- utils ----------
|
||||
|
||||
def run(cmd: List[str], check: bool = False, capture_output: bool = True, text: bool = True) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(cmd, check=check, capture_output=capture_output, text=text)
|
||||
|
||||
def which(prog: str) -> bool:
|
||||
from shutil import which as _w
|
||||
return _w(prog) is not None
|
||||
|
||||
def fail(msg: str):
|
||||
print(f"✗ {msg}")
|
||||
sys.exit(1)
|
||||
|
||||
def ok(msg: str):
|
||||
print(f"✓ {msg}")
|
||||
|
||||
def info(msg: str):
|
||||
print(f"• {msg}")
|
||||
|
||||
def warn(msg: str):
|
||||
print(f"! {msg}")
|
||||
|
||||
def yes_no(prompt: str, default: str = "n") -> bool:
|
||||
default = default.lower()
|
||||
hint = "[Y/n]" if default == "y" else "[y/N]"
|
||||
while True:
|
||||
resp = input(f"{prompt} {hint} ").strip().lower()
|
||||
if not resp:
|
||||
return default == "y"
|
||||
if resp in ("y","yes","s","si","sí"):
|
||||
return True
|
||||
if resp in ("n","no"):
|
||||
return False
|
||||
print("Respuesta no reconocida. Responde 'y' o 'n'.")
|
||||
|
||||
# ---------- docker helpers ----------
|
||||
|
||||
def ensure_alpine_image():
|
||||
try:
|
||||
run(["docker", "image", "inspect", "alpine:latest"], check=True)
|
||||
except subprocess.CalledProcessError:
|
||||
info("Descargando alpine:latest ...")
|
||||
run(["docker", "pull", "alpine:latest"], check=True, capture_output=False, text=True)
|
||||
|
||||
def volume_exists(name: str) -> bool:
|
||||
try:
|
||||
run(["docker", "volume", "inspect", name], check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
def inspect_volume_labels(name: str) -> Dict[str, str]:
|
||||
try:
|
||||
cp = run(["docker", "volume", "inspect", name, "--format", "{{json .Labels}}"], check=True)
|
||||
return json.loads(cp.stdout or "null") or {}
|
||||
except subprocess.CalledProcessError:
|
||||
return {}
|
||||
|
||||
def containers_using_volume(name: str) -> List[str]:
|
||||
# docker ps soporta --filter volume=<name>
|
||||
try:
|
||||
cp = run(["docker", "ps", "-a", "--filter", f"volume={name}", "-q"], check=True)
|
||||
return [l.strip() for l in cp.stdout.splitlines() if l.strip()]
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
def stop_containers(ids: List[str]) -> None:
|
||||
if not ids:
|
||||
return
|
||||
info("Deteniendo contenedores que usan el volumen...")
|
||||
run(["docker", "stop"] + ids, check=False, capture_output=False)
|
||||
|
||||
def remove_containers(ids: List[str]) -> None:
|
||||
if not ids:
|
||||
return
|
||||
info("Eliminando contenedores detenidos que usan el volumen...")
|
||||
run(["docker", "rm"] + ids, check=False, capture_output=False)
|
||||
|
||||
def remove_volume(name: str) -> bool:
|
||||
try:
|
||||
run(["docker", "volume", "rm", "-f", name], check=True, capture_output=False)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
warn(f"No se pudo eliminar volumen '{name}': {e}")
|
||||
return False
|
||||
|
||||
def create_volume(name: str, labels: Optional[Dict[str,str]] = None) -> bool:
|
||||
cmd = ["docker", "volume", "create"]
|
||||
if labels:
|
||||
for k, v in labels.items():
|
||||
cmd += ["--label", f"{k}={v}"]
|
||||
cmd.append(name)
|
||||
try:
|
||||
run(cmd, check=True, capture_output=False)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
warn(f"Fallo creando volumen '{name}': {e}")
|
||||
return False
|
||||
|
||||
def restore_into_volume(volume_name: str, backup_dir: pathlib.Path, archive_file: pathlib.Path) -> int:
|
||||
bdir_abs = backup_dir.resolve()
|
||||
docker_cmd = [
|
||||
"docker", "run", "--rm",
|
||||
"-v", f"{volume_name}:/volume",
|
||||
"-v", f"{str(bdir_abs)}:/backup",
|
||||
"alpine:latest",
|
||||
"sh", "-lc",
|
||||
f"tar xzf /backup/{shlex.quote(archive_file.name)} -C /volume"
|
||||
]
|
||||
proc = subprocess.run(docker_cmd)
|
||||
return proc.returncode
|
||||
|
||||
# ---------- parsing helpers ----------
|
||||
|
||||
def find_backup_dirs(root: pathlib.Path) -> List[pathlib.Path]:
|
||||
dirs = [p for p in root.iterdir() if p.is_dir() and BACKUP_DIR_PATTERN.match(p.name)]
|
||||
dirs.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
return dirs
|
||||
|
||||
def find_archives(dirpath: pathlib.Path) -> List[pathlib.Path]:
|
||||
files = [p for p in dirpath.iterdir() if p.is_file() and p.name.endswith(".tar.gz")]
|
||||
files.sort(key=lambda p: p.name)
|
||||
return files
|
||||
|
||||
def parse_archive_basename(archive_name: str) -> Optional[str]:
|
||||
m = ARCHIVE_PATTERN.match(archive_name)
|
||||
if not m:
|
||||
return None
|
||||
return m.group("basename")
|
||||
|
||||
# ---------- compose label helpers ----------
|
||||
|
||||
def derive_labels_auto(volume_name: str) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""
|
||||
project = parte antes del primer '_' o '-'
|
||||
short = resto luego del separador
|
||||
"""
|
||||
for sep in ("_", "-"):
|
||||
if sep in volume_name:
|
||||
idx = volume_name.find(sep)
|
||||
return volume_name[:idx], volume_name[idx+1:]
|
||||
return None, None
|
||||
|
||||
def derive_labels_with_fixed_project(volume_name: str, project: str) -> Tuple[str, str]:
|
||||
"""
|
||||
Usa project fijo. Si volume_name empieza con '<project>_' o '<project>-', recorta.
|
||||
"""
|
||||
p = project
|
||||
if volume_name.startswith(p + "_"):
|
||||
return p, volume_name[len(p) + 1:]
|
||||
if volume_name.startswith(p + "-"):
|
||||
return p, volume_name[len(p) + 1:]
|
||||
return p, volume_name
|
||||
|
||||
def labels_match_compose(name: str, project: str, short: str) -> bool:
|
||||
labels = inspect_volume_labels(name)
|
||||
return (
|
||||
labels.get("com.docker.compose.project") == project and
|
||||
labels.get("com.docker.compose.volume") == short
|
||||
)
|
||||
|
||||
# ---------- UI flows ----------
|
||||
|
||||
def pick_backup_dir(dirs: List[pathlib.Path]) -> Optional[pathlib.Path]:
|
||||
if not dirs:
|
||||
warn("No se encontraron carpetas de backup 'docker-volumes-<timestamp>'.")
|
||||
return None
|
||||
print("\nCarpetas de backup encontradas:")
|
||||
for i, d in enumerate(dirs, 1):
|
||||
print(f" {i}) {d.name}")
|
||||
while True:
|
||||
sel = input("> Elige una carpeta (número) o Enter para cancelar: ").strip()
|
||||
if not sel:
|
||||
return None
|
||||
if sel.isdigit() and 1 <= int(sel) <= len(dirs):
|
||||
return dirs[int(sel) - 1]
|
||||
print("Opción inválida.")
|
||||
|
||||
def pick_archives(files: List[pathlib.Path]) -> List[pathlib.Path]:
|
||||
if not files:
|
||||
warn("No hay archivos .tar.gz en esa carpeta.")
|
||||
return []
|
||||
print("\nBackups disponibles:")
|
||||
for i, f in enumerate(files, 1):
|
||||
base = parse_archive_basename(f.name) or f.name
|
||||
print(f" {i}) {f.name} -> volumen: {base}")
|
||||
print("\nOpciones:")
|
||||
print(" a) Restaurar TODOS")
|
||||
print(" s) Seleccionar algunos (ej: 1,3,5)")
|
||||
while True:
|
||||
sel = input("> Elige 'a' o 's': ").strip().lower()
|
||||
if sel == "a":
|
||||
return files
|
||||
if sel == "s":
|
||||
picks = input("> Números separados por coma: ").strip()
|
||||
idxs = []
|
||||
try:
|
||||
for tok in picks.split(","):
|
||||
tok = tok.strip()
|
||||
if tok:
|
||||
idx = int(tok)
|
||||
idxs.append(idx - 1)
|
||||
chosen = [files[i] for i in sorted(set(i for i in idxs if 0 <= i < len(files)))]
|
||||
if chosen:
|
||||
return chosen
|
||||
except Exception:
|
||||
pass
|
||||
print("Selección inválida.")
|
||||
else:
|
||||
print("Opción inválida.")
|
||||
|
||||
def pick_restore_mode() -> str:
|
||||
print("\nModo de restauración:")
|
||||
print(" 1) Tradicional (sin labels)")
|
||||
print(" 2) Reconocido por Compose (aplica labels para evitar el warning)")
|
||||
while True:
|
||||
sel = input("> Elige 1 o 2: ").strip()
|
||||
if sel in ("1", "2"):
|
||||
return sel
|
||||
print("Opción inválida.")
|
||||
|
||||
def confirm_overwrite(volume_name: str) -> bool:
|
||||
return yes_no(f"El volumen '{volume_name}' ya existe. ¿Sobrescribir (recrear)?", default="n")
|
||||
|
||||
# ---------- restore flows ----------
|
||||
|
||||
def restore_traditional(backup_dir: pathlib.Path, archives: List[pathlib.Path]):
|
||||
ensure_alpine_image()
|
||||
print("\n=== Restauración TRADICIONAL ===\n")
|
||||
for arch in archives:
|
||||
vname = parse_archive_basename(arch.name)
|
||||
if not vname:
|
||||
warn(f"Nombre de backup no reconocible: {arch.name}, se omite.")
|
||||
continue
|
||||
info(f"Volumen: {vname}")
|
||||
|
||||
# Tradicional: no cambiamos labels; si existe, restauramos sobre volumen nuevo (recreándolo)
|
||||
if volume_exists(vname):
|
||||
# Intentar eliminar: si está en uso, ofrecer detener/remover contenedores
|
||||
if not confirm_overwrite(vname):
|
||||
info(" → Omitido (ya existe).")
|
||||
continue
|
||||
ids = containers_using_volume(vname)
|
||||
if ids:
|
||||
info(f"Contenedores que usan '{vname}': {', '.join(ids)}")
|
||||
if yes_no("¿Detener y eliminar esos contenedores para continuar?", default="y"):
|
||||
stop_containers(ids)
|
||||
remove_containers(ids)
|
||||
else:
|
||||
warn(" → No se puede recrear el volumen en uso. Omitido.")
|
||||
continue
|
||||
if not remove_volume(vname):
|
||||
warn(" → No se pudo eliminar el volumen. Omitido.")
|
||||
continue
|
||||
|
||||
if not create_volume(vname):
|
||||
warn(" → No se pudo crear el volumen, se omite.")
|
||||
continue
|
||||
rc = restore_into_volume(vname, backup_dir, arch)
|
||||
if rc == 0:
|
||||
ok(" Restaurado.")
|
||||
else:
|
||||
warn(f" Falló la restauración (rc={rc}).")
|
||||
|
||||
def restore_with_compose_labels(backup_dir: pathlib.Path, archives: List[pathlib.Path]):
|
||||
"""
|
||||
Restaura creando volúmenes con labels de Compose para que NO aparezca el warning:
|
||||
"volume ... already exists but was not created by Docker Compose..."
|
||||
"""
|
||||
ensure_alpine_image()
|
||||
print("\n=== Restauración RECONOCIDA POR COMPOSE (con labels) ===\n")
|
||||
print("Estrategia de etiquetado:")
|
||||
print(" 1) Auto (project = prefijo de <vol> antes de '_' o '-', short = resto)")
|
||||
print(" 2) Fijar un 'project' para todos (p. ej. suitecoffee, suitecoffee_dev, suitecoffee_prod)")
|
||||
mode = ""
|
||||
while mode not in ("1", "2"):
|
||||
mode = input("> Elige 1 o 2: ").strip()
|
||||
|
||||
fixed_project = None
|
||||
if mode == "2":
|
||||
fixed_project = input("> Indica el 'project' de Compose (exacto): ").strip()
|
||||
if not fixed_project:
|
||||
warn("Project vacío, cancelado.")
|
||||
return
|
||||
|
||||
# Previsualización de etiquetas
|
||||
preview = []
|
||||
for arch in archives:
|
||||
vname = parse_archive_basename(arch.name)
|
||||
if not vname:
|
||||
continue
|
||||
if mode == "1":
|
||||
proj, short = derive_labels_auto(vname)
|
||||
else:
|
||||
proj, short = derive_labels_with_fixed_project(vname, fixed_project)
|
||||
preview.append((arch, vname, proj, short))
|
||||
|
||||
print("\nVista previa de etiquetas (project / volume):")
|
||||
for _, vname, proj, short in preview:
|
||||
if proj and short:
|
||||
print(f" {vname} → project='{proj}', volume='{short}'")
|
||||
else:
|
||||
print(f" {vname} → (no derivado; se pedirá manualmente)")
|
||||
|
||||
if not yes_no("\n¿Confirmar restauración con estas etiquetas?", default="y"):
|
||||
warn("Cancelado por el usuario.")
|
||||
return
|
||||
|
||||
# Restaurar con labels
|
||||
for arch, vname, proj, short in preview:
|
||||
# completar manual si falta
|
||||
if not proj or not short:
|
||||
print(f"\nDefinir etiquetas para: {vname}")
|
||||
proj = input(" project = ").strip()
|
||||
short = input(" volume = ").strip()
|
||||
if not proj or not short:
|
||||
warn(" → Etiquetas incompletas; se omite.")
|
||||
continue
|
||||
|
||||
info(f"\nVolumen: {vname} (labels: project='{proj}', volume='{short}')")
|
||||
|
||||
if volume_exists(vname):
|
||||
# ¿ya tiene labels correctas? entonces solo restauramos datos sin recrear
|
||||
if labels_match_compose(vname, proj, short):
|
||||
info(" Volumen ya tiene labels de Compose correctas. Sobrescribiendo datos...")
|
||||
rc = restore_into_volume(vname, backup_dir, arch)
|
||||
if rc == 0:
|
||||
ok(" Restaurado (labels ya correctas).")
|
||||
else:
|
||||
warn(f" Falló la restauración (rc={rc}).")
|
||||
continue
|
||||
|
||||
# Pedir permiso para detener/eliminar contenedores y recrear volumen con labels correctas
|
||||
if not yes_no(" El volumen existe sin labels correctas. ¿Detener/eliminar contenedores y recrearlo con labels para evitar el warning?", default="y"):
|
||||
warn(" → Omitido (mantiene warning de Compose).")
|
||||
continue
|
||||
|
||||
ids = containers_using_volume(vname)
|
||||
if ids:
|
||||
info(f" Contenedores que usan '{vname}': {', '.join(ids)}")
|
||||
stop_containers(ids)
|
||||
remove_containers(ids)
|
||||
|
||||
if not remove_volume(vname):
|
||||
warn(" → No se pudo eliminar el volumen. Omitido.")
|
||||
continue
|
||||
|
||||
labels = {
|
||||
"com.docker.compose.project": proj,
|
||||
"com.docker.compose.volume": short,
|
||||
}
|
||||
if not create_volume(vname, labels=labels):
|
||||
warn(" → No se pudo crear el volumen con labels. Omitido.")
|
||||
continue
|
||||
|
||||
rc = restore_into_volume(vname, backup_dir, arch)
|
||||
if rc == 0:
|
||||
ok(" Restaurado con labels de Compose (warning resuelto).")
|
||||
else:
|
||||
warn(f" Falló la restauración (rc={rc}).")
|
||||
|
||||
# ---------- main ----------
|
||||
|
||||
def main():
|
||||
if not which("docker"):
|
||||
fail("No se encontró 'docker' en el PATH.")
|
||||
try:
|
||||
run(["docker", "version"], check=True)
|
||||
except subprocess.CalledProcessError:
|
||||
fail("No se puede comunicar con el daemon de Docker. ¿Está corriendo?")
|
||||
|
||||
# Elegir carpeta docker-volumes-<ts>
|
||||
dirs = [p for p in PROJECT_ROOT.iterdir() if p.is_dir() and BACKUP_DIR_PATTERN.match(p.name)]
|
||||
dirs.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
if not dirs:
|
||||
warn("No hay carpetas de backup 'docker-volumes-<timestamp>'.")
|
||||
return
|
||||
|
||||
print("\nCarpetas de backup encontradas:")
|
||||
for i, d in enumerate(dirs, 1):
|
||||
print(f" {i}) {d.name}")
|
||||
chosen = None
|
||||
while not chosen:
|
||||
sel = input("> Elige una carpeta (número) o Enter para salir: ").strip()
|
||||
if not sel:
|
||||
warn("Cancelado."); return
|
||||
if sel.isdigit() and 1 <= int(sel) <= len(dirs):
|
||||
chosen = dirs[int(sel)-1]
|
||||
else:
|
||||
print("Opción inválida.")
|
||||
|
||||
# Archivos en carpeta
|
||||
archives = [p for p in chosen.iterdir() if p.is_file() and p.name.endswith(".tar.gz")]
|
||||
archives.sort(key=lambda p: p.name)
|
||||
if not archives:
|
||||
warn("No hay .tar.gz en esa carpeta."); return
|
||||
|
||||
print("\nBackups disponibles:")
|
||||
for i, f in enumerate(archives, 1):
|
||||
base = parse_archive_basename(f.name) or f.name
|
||||
print(f" {i}) {f.name} -> volumen: {base}")
|
||||
|
||||
print("\nOpciones de selección:")
|
||||
print(" a) Restaurar TODOS")
|
||||
print(" s) Elegir algunos (ej: 1,3,5)")
|
||||
selected: List[pathlib.Path] = []
|
||||
while not selected:
|
||||
mode = input("> Elige 'a' o 's': ").strip().lower()
|
||||
if mode == "a":
|
||||
selected = archives
|
||||
elif mode == "s":
|
||||
picks = input("> Números separados por coma: ").strip()
|
||||
try:
|
||||
idxs = [int(x.strip())-1 for x in picks.split(",") if x.strip()]
|
||||
selected = [archives[i] for i in sorted(set(i for i in idxs if 0 <= i < len(archives)))]
|
||||
except Exception:
|
||||
selected = []
|
||||
else:
|
||||
print("Opción inválida.")
|
||||
|
||||
# Modo de restauración
|
||||
choice = pick_restore_mode()
|
||||
if choice == "1":
|
||||
restore_traditional(chosen, selected)
|
||||
else:
|
||||
restore_with_compose_labels(chosen, selected)
|
||||
|
||||
ok("\nProceso finalizado.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n✓ Interrumpido por el usuario (Ctrl+C).")
|
||||
@ -2,8 +2,8 @@
|
||||
FROM node:22.18
|
||||
|
||||
# Definir variables de entorno con valores predeterminados
|
||||
ARG NODE_ENV=production
|
||||
ARG PORT=3000
|
||||
# ARG NODE_ENV=production
|
||||
# ARG PORT=3000
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
@ -17,7 +17,4 @@ RUN npm i
|
||||
# Copia el resto de la app
|
||||
COPY . .
|
||||
|
||||
# Expone el puerto
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
@ -2,8 +2,8 @@
|
||||
FROM node:22.18
|
||||
|
||||
# Definir variables de entorno con valores predeterminados
|
||||
ARG NODE_ENV=production
|
||||
ARG PORT=4000
|
||||
# ARG NODE_ENV=production
|
||||
# ARG PORT=4000
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
||||
@ -17,7 +17,4 @@ RUN npm i
|
||||
# Copia el resto de la app
|
||||
COPY . .
|
||||
|
||||
# Expone el puerto
|
||||
EXPOSE 4000
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
432
suitecoffee.py
432
suitecoffee.py
@ -8,98 +8,81 @@ from shutil import which
|
||||
|
||||
PROJECT_ROOT = os.path.abspath(os.getcwd())
|
||||
|
||||
# Archivos comunes
|
||||
BASE_COMPOSE = os.path.join(PROJECT_ROOT, "docker-compose.yml")
|
||||
OVERRIDE_COMPOSE = os.path.join(PROJECT_ROOT, "docker-compose.override.yml")
|
||||
# === Archivos Compose (entornos) ===
|
||||
BASE_COMPOSE = os.path.join(PROJECT_ROOT, "compose.yaml")
|
||||
DEV_COMPOSE = os.path.join(PROJECT_ROOT, "compose.dev.yaml")
|
||||
PROD_COMPOSE = os.path.join(PROJECT_ROOT, "compose.prod.yaml")
|
||||
|
||||
# Mapeo de entornos -> archivo .env
|
||||
# === Archivos Compose (globales) ===
|
||||
NPM_COMPOSE = os.path.join(PROJECT_ROOT, "compose.npm.yaml")
|
||||
DBEAVER_COMPOSE = os.path.join(PROJECT_ROOT, "compose.dbeaver.yaml")
|
||||
|
||||
# Archivos .env
|
||||
ENV_FILES = {
|
||||
"development": ".env.development",
|
||||
"production": ".env.production",
|
||||
"production": ".env.production",
|
||||
}
|
||||
|
||||
# ---------- Nuevas utilidades ----------
|
||||
# Nombres de proyecto para permitir DEV y PROD simultáneos
|
||||
def _base_project():
|
||||
return os.path.basename(PROJECT_ROOT).lower() or "composeproj"
|
||||
|
||||
def resolve_project_name(env_file=None, include_override=True):
|
||||
"""
|
||||
Obtiene el 'project name' que usará docker compose para esta combinación de archivos/env,
|
||||
consultando a 'docker compose config --format json'. Si falla, usa el nombre de la carpeta.
|
||||
"""
|
||||
cmd = ["docker", "compose"] + compose_files_args(include_override=include_override)
|
||||
if env_file:
|
||||
cmd += ["--env-file", env_file]
|
||||
cmd += ["config", "--format", "json"]
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode == 0:
|
||||
try:
|
||||
data = json.loads(proc.stdout)
|
||||
# Compose v2 devuelve 'name' en el JSON; si no, fallback
|
||||
return data.get("name") or os.path.basename(PROJECT_ROOT)
|
||||
except Exception:
|
||||
return os.path.basename(PROJECT_ROOT)
|
||||
return os.path.basename(PROJECT_ROOT)
|
||||
PROJECT_NAMES = {
|
||||
"development": f"{_base_project()}_dev",
|
||||
"production": f"{_base_project()}_prod",
|
||||
}
|
||||
|
||||
def list_project_containers(project_name, all_states=True):
|
||||
"""
|
||||
Lista contenedores del proyecto por etiqueta com.docker.compose.project=<name>.
|
||||
Si all_states=False, solo los running.
|
||||
Devuelve lista de dicts con {id, name, status, image}.
|
||||
"""
|
||||
base = ["docker", "ps"]
|
||||
if all_states:
|
||||
base.append("-a")
|
||||
base += ["--filter", f"label=com.docker.compose.project={project_name}",
|
||||
"--format", "{{.ID}}\t{{.Names}}\t{{.Status}}\t{{.Image}}"]
|
||||
proc = run(base, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
rows = []
|
||||
for line in proc.stdout.splitlines():
|
||||
parts = line.strip().split("\t")
|
||||
if len(parts) >= 4:
|
||||
rows.append({"id": parts[0], "name": parts[1], "status": parts[2], "image": parts[3]})
|
||||
return rows
|
||||
|
||||
def print_containers_table(title, rows):
|
||||
print_header(title)
|
||||
if not rows:
|
||||
print("(ninguno)\n")
|
||||
return
|
||||
# ancho simple, sin dependencias
|
||||
print(f"{'ID':<12} {'NAME':<40} {'STATUS':<20} IMAGE")
|
||||
for r in rows:
|
||||
print(f"{r['id']:<12} {r['name']:<40} {r['status']:<20} {r['image']}")
|
||||
print()
|
||||
# Nombre de proyecto global (ambos yaml globales usan name: suitecoffee)
|
||||
GLOBAL_PROJECT_NAME = "suitecoffee"
|
||||
|
||||
# ---------- Utilidades ----------
|
||||
|
||||
def check_prereqs():
|
||||
if which("docker") is None:
|
||||
fail("No se encontró 'docker' en el PATH.")
|
||||
# Verificar que docker compose esté disponible (subcomando integrado)
|
||||
try:
|
||||
run(["docker", "compose", "version"], check=True, capture_output=True)
|
||||
except Exception:
|
||||
fail("No se pudo ejecutar 'docker compose'. Asegúrate de tener Docker Compose v2.")
|
||||
|
||||
def run(cmd, check=False, capture_output=False):
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
check=check,
|
||||
capture_output=capture_output,
|
||||
text=True
|
||||
)
|
||||
return subprocess.run(cmd, check=check, capture_output=capture_output, text=True)
|
||||
|
||||
def compose_files_args(include_override=True):
|
||||
args = []
|
||||
if os.path.exists(BASE_COMPOSE):
|
||||
args += ["-f", BASE_COMPOSE]
|
||||
def compose_files_args(env_key):
|
||||
"""
|
||||
Devuelve los -f correctos según el entorno (dev/prod) + base.
|
||||
"""
|
||||
if not os.path.exists(BASE_COMPOSE):
|
||||
fail("No se encontró compose.yaml en la raíz del proyecto.")
|
||||
|
||||
args = ["-f", BASE_COMPOSE]
|
||||
|
||||
if env_key == "development":
|
||||
if not os.path.exists(DEV_COMPOSE):
|
||||
fail("No se encontró compose.dev.yaml.")
|
||||
args += ["-f", DEV_COMPOSE]
|
||||
elif env_key == "production":
|
||||
if not os.path.exists(PROD_COMPOSE):
|
||||
fail("No se encontró compose.prod.yaml.")
|
||||
args += ["-f", PROD_COMPOSE]
|
||||
else:
|
||||
fail("No se encontró docker-compose.yml en la raíz del proyecto.")
|
||||
if include_override and os.path.exists(OVERRIDE_COMPOSE):
|
||||
args += ["-f", OVERRIDE_COMPOSE]
|
||||
fail(f"Entorno desconocido: {env_key}")
|
||||
return args
|
||||
|
||||
def compose_files_args_global(kind):
|
||||
"""
|
||||
Devuelve los -f correctos para servicios globales (npm/dbeaver).
|
||||
"""
|
||||
if kind == "npm":
|
||||
if not os.path.exists(NPM_COMPOSE):
|
||||
fail("No se encontró compose.npm.yaml.")
|
||||
return ["-f", NPM_COMPOSE]
|
||||
elif kind == "dbeaver":
|
||||
if not os.path.exists(DBEAVER_COMPOSE):
|
||||
fail("No se encontró compose.dbeaver.yaml.")
|
||||
return ["-f", DBEAVER_COMPOSE]
|
||||
else:
|
||||
fail(f"Servicio global desconocido: {kind}")
|
||||
|
||||
def env_file_path(env_key):
|
||||
fname = ENV_FILES.get(env_key)
|
||||
if not fname:
|
||||
@ -107,33 +90,33 @@ def env_file_path(env_key):
|
||||
path = os.path.join(PROJECT_ROOT, fname)
|
||||
return path if os.path.exists(path) else None
|
||||
|
||||
def compose_cmd(base_args, env_file=None, include_override=True):
|
||||
def compose_cmd(base_args, env_key, env_file=None, project_name=None):
|
||||
"""
|
||||
Construye el comando docker compose con los -f adecuados
|
||||
y opcionalmente --env-file si existe (antes del subcomando).
|
||||
Construye: docker compose -f base -f env --env-file ... -p <name> <COMANDO> [OPCIONES]
|
||||
(importante: --env-file y -p son opciones globales y van antes del subcomando)
|
||||
"""
|
||||
cmd = ["docker", "compose"]
|
||||
cmd += compose_files_args(include_override=include_override)
|
||||
cmd += compose_files_args(env_key)
|
||||
if env_file:
|
||||
cmd += ["--env-file", env_file] # opción global antes del subcomando
|
||||
cmd += ["--env-file", env_file]
|
||||
if project_name:
|
||||
cmd += ["-p", project_name]
|
||||
cmd += base_args # ["up","-d","--force-recreate"] o ["ps","--status","running","-q"]
|
||||
return cmd
|
||||
|
||||
def compose_cmd_global(base_args, kind, project_name=GLOBAL_PROJECT_NAME):
|
||||
"""
|
||||
Comandos para servicios globales (npm/dbeaver):
|
||||
docker compose -f compose.<kind>.yaml -p suitecoffee <COMANDO> ...
|
||||
"""
|
||||
cmd = ["docker", "compose"]
|
||||
cmd += compose_files_args_global(kind)
|
||||
if project_name:
|
||||
cmd += ["-p", project_name]
|
||||
cmd += base_args
|
||||
return cmd
|
||||
|
||||
def get_running_containers():
|
||||
"""
|
||||
Devuelve lista de container IDs en estado 'running' para este proyecto.
|
||||
"""
|
||||
cmd = compose_cmd(["ps", "--status", "running", "-q"])
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
lines = [l.strip() for l in proc.stdout.splitlines() if l.strip()]
|
||||
return lines
|
||||
|
||||
def yes_no(prompt, default="n"):
|
||||
"""
|
||||
Pregunta si/no. default: 'y' o 'n'
|
||||
"""
|
||||
default = default.lower()
|
||||
hint = "[Y/n]" if default == "y" else "[y/N]"
|
||||
while True:
|
||||
@ -158,13 +141,46 @@ def fail(msg):
|
||||
print(f"✗ {msg}")
|
||||
sys.exit(1)
|
||||
|
||||
# ---------- Acciones ----------
|
||||
# ---------- Helpers globales (servicios por archivo y ps por labels) ----------
|
||||
def list_services_from_compose_file(compose_path):
|
||||
"""Obtiene la lista de servicios definidos en un archivo compose específico."""
|
||||
cmd = ["docker", "compose", "-f", compose_path, "config", "--services"]
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
return [s.strip() for s in proc.stdout.splitlines() if s.strip()]
|
||||
|
||||
def bring_up(env_key, include_override=True):
|
||||
def list_services_global(kind):
|
||||
compose_path = NPM_COMPOSE if kind == "npm" else DBEAVER_COMPOSE
|
||||
return list_services_from_compose_file(compose_path)
|
||||
|
||||
def docker_ps_by_labels(project, service=None, running_only=True):
|
||||
"""Lista contenedores por labels de compose (project/service)."""
|
||||
cmd = ["docker", "ps"]
|
||||
if running_only:
|
||||
# por defecto docker ps ya lista solo running; se deja explícito por claridad
|
||||
pass
|
||||
cmd += ["--filter", f"label=com.docker.compose.project={project}"]
|
||||
if service:
|
||||
cmd += ["--filter", f"label=com.docker.compose.service={service}"]
|
||||
cmd += ["-q"]
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
return [l.strip() for l in proc.stdout.splitlines() if l.strip()]
|
||||
|
||||
# ---------- Acciones (entornos) ----------
|
||||
def bring_up(env_key, force_recreate=False):
|
||||
env_path = env_file_path(env_key)
|
||||
if not env_path:
|
||||
warn(f"No se encontró archivo de entorno para '{env_key}'. Continuando sin --env-file.")
|
||||
cmd = compose_cmd(["up", "-d"], env_file=env_path, include_override=include_override)
|
||||
pname = PROJECT_NAMES.get(env_key)
|
||||
|
||||
base_args = ["up", "-d"]
|
||||
if force_recreate:
|
||||
base_args.append("--force-recreate")
|
||||
|
||||
cmd = compose_cmd(base_args, env_key=env_key, env_file=env_path, project_name=pname)
|
||||
info("Ejecutando: " + " ".join(cmd))
|
||||
proc = run(cmd)
|
||||
if proc.returncode == 0:
|
||||
@ -172,98 +188,192 @@ def bring_up(env_key, include_override=True):
|
||||
else:
|
||||
fail(f"Fallo al levantar entorno '{env_key}'. Código: {proc.returncode}")
|
||||
|
||||
def bring_down(env_key=None):
|
||||
"""
|
||||
Intenta apagar usando el env proporcionado si existe el .env.
|
||||
Si no se pasa env_key o no existe el .env, hace un down genérico.
|
||||
"""
|
||||
env_path = env_file_path(env_key) if env_key else None
|
||||
cmd = compose_cmd(["down"], env_file=env_path)
|
||||
def bring_down(env_key):
|
||||
env_path = env_file_path(env_key)
|
||||
pname = PROJECT_NAMES.get(env_key)
|
||||
cmd = compose_cmd(["down"], env_key=env_key, env_file=env_path, project_name=pname)
|
||||
info("Ejecutando: " + " ".join(cmd))
|
||||
proc = run(cmd)
|
||||
if proc.returncode == 0:
|
||||
ok("Contenedores detenidos y red/volúmenes del proyecto desmontados (según corresponda).")
|
||||
ok(f"Entorno '{env_key}' detenido correctamente.")
|
||||
else:
|
||||
fail(f"Fallo al detener el entorno. Código: {proc.returncode}")
|
||||
fail(f"Fallo al detener entorno '{env_key}'. Código: {proc.returncode}")
|
||||
|
||||
def running_ids(env_key):
|
||||
env_path = env_file_path(env_key)
|
||||
pname = PROJECT_NAMES.get(env_key)
|
||||
cmd = compose_cmd(["ps", "--status", "running", "-q"], env_key=env_key, env_file=env_path, project_name=pname)
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
return [l.strip() for l in proc.stdout.splitlines() if l.strip()]
|
||||
|
||||
# ---------- Acciones (globales) ----------
|
||||
def bring_up_global(kind, force_recreate=False):
|
||||
base_args = ["up", "-d"]
|
||||
if force_recreate:
|
||||
base_args.append("--force-recreate")
|
||||
cmd = compose_cmd_global(base_args, kind=kind)
|
||||
info("Ejecutando: " + " ".join(cmd))
|
||||
proc = run(cmd)
|
||||
if proc.returncode == 0:
|
||||
ok(f"Servicio global '{kind}' levantado correctamente.")
|
||||
else:
|
||||
fail(f"Fallo al levantar servicio global '{kind}'. Código: {proc.returncode}")
|
||||
|
||||
def bring_down_global(kind, remove=False):
|
||||
"""
|
||||
Apaga SOLO los servicios definidos en el compose global indicado.
|
||||
- Primero 'stop <servicios>'.
|
||||
- Opcionalmente, 'rm -f <servicios>' si remove=True.
|
||||
"""
|
||||
services = list_services_global(kind)
|
||||
if not services:
|
||||
warn(f"No se encontraron servicios en compose.{kind}.yaml.")
|
||||
return
|
||||
# stop específico
|
||||
cmd = compose_cmd_global(["stop"] + services, kind=kind)
|
||||
info("Ejecutando: " + " ".join(cmd))
|
||||
proc = run(cmd)
|
||||
if proc.returncode != 0:
|
||||
fail(f"Fallo al detener servicios de '{kind}'. Código: {proc.returncode}")
|
||||
|
||||
if remove:
|
||||
cmd_rm = compose_cmd_global(["rm", "-f"] + services, kind=kind)
|
||||
info("Ejecutando: " + " ".join(cmd_rm))
|
||||
proc_rm = run(cmd_rm)
|
||||
if proc_rm.returncode != 0:
|
||||
fail(f"Fallo al remover servicios de '{kind}'. Código: {proc_rm.returncode}")
|
||||
|
||||
ok(f"Servicio(s) global(es) '{kind}' detenido(s).")
|
||||
|
||||
def running_ids_global(kind):
|
||||
"""
|
||||
Detecta si el compose global está corriendo revisando contenedores por servicio,
|
||||
filtrando por labels (project+service).
|
||||
"""
|
||||
services = list_services_global(kind)
|
||||
ids = []
|
||||
for svc in services:
|
||||
ids += docker_ps_by_labels(GLOBAL_PROJECT_NAME, service=svc, running_only=True)
|
||||
# eliminar duplicados
|
||||
return list(dict.fromkeys(ids))
|
||||
|
||||
# ---------- Estado y flujo ----------
|
||||
def detect_status_summary():
|
||||
dev_running = running_ids("development")
|
||||
prod_running = running_ids("production")
|
||||
npm_running = running_ids_global("npm")
|
||||
dbeaver_running = running_ids_global("dbeaver")
|
||||
|
||||
print_header("Estado actual")
|
||||
info(f"DESARROLLO: {len(dev_running)} contenedor(es) en ejecución.")
|
||||
info(f"PRODUCCIÓN: {len(prod_running)} contenedor(es) en ejecución.")
|
||||
info(f"NPM (global): {len(npm_running)} contenedor(es) en ejecución.")
|
||||
info(f"DBEAVER (global): {len(dbeaver_running)} contenedor(es) en ejecución.\n")
|
||||
|
||||
return bool(dev_running), bool(prod_running), bool(npm_running), bool(dbeaver_running)
|
||||
|
||||
def detect_and_optionally_shutdown():
|
||||
"""Muestra estado y ofrece (opcional) apagar dev/prod.
|
||||
Los servicios globales se gestionan desde el menú principal (levantar/apagar).
|
||||
"""
|
||||
dev_on, prod_on, _npm_on, _dbeaver_on = detect_status_summary()
|
||||
|
||||
options = []
|
||||
if dev_on:
|
||||
options.append(("1", "Apagar entorno de DESARROLLO", "development"))
|
||||
if prod_on:
|
||||
options.append(("2", "Apagar entorno de PRODUCCIÓN", "production"))
|
||||
options.append(("3", "Continuar sin detener nada", None))
|
||||
|
||||
if len(options) == 1:
|
||||
return
|
||||
|
||||
print("Selecciona una opción:")
|
||||
for opt in options:
|
||||
key, label = opt[0], opt[1]
|
||||
print(f" {key}) {label}")
|
||||
|
||||
while True:
|
||||
choice = input("> ").strip()
|
||||
selected = next((opt for opt in options if opt[0] == choice), None)
|
||||
if not selected:
|
||||
print("Opción inválida.")
|
||||
continue
|
||||
if choice == "3" or selected[2] is None:
|
||||
ok("Continuamos sin detener nada.")
|
||||
return
|
||||
env_key = selected[2]
|
||||
bring_down(env_key)
|
||||
return
|
||||
|
||||
def main_menu():
|
||||
# Consultar estado de globales para decidir si mostrar opciones de "Levantar" o "Apagar"
|
||||
_dev_on, _prod_on, npm_on, dbeaver_on = detect_status_summary()
|
||||
|
||||
print_header("Gestor de entornos Docker Compose")
|
||||
print("Selecciona una opción:")
|
||||
print(" 1) Levantar entorno de DESARROLLO")
|
||||
print(" 2) Levantar entorno de PRODUCCIÓN")
|
||||
print(" 3) Salir")
|
||||
|
||||
dynamic_keys = {}
|
||||
next_key = 3
|
||||
|
||||
# NPM: opción según estado
|
||||
if not npm_on:
|
||||
print(f" {next_key}) Levantar NPM (compose.npm.yaml)")
|
||||
dynamic_keys[str(next_key)] = ("global_up", "npm")
|
||||
else:
|
||||
print(f" {next_key}) Apagar NPM (compose.npm.yaml)")
|
||||
dynamic_keys[str(next_key)] = ("global_down", "npm")
|
||||
next_key += 1
|
||||
|
||||
# DBEAVER: opción según estado
|
||||
if not dbeaver_on:
|
||||
print(f" {next_key}) Levantar DBEAVER (compose.dbeaver.yaml)")
|
||||
dynamic_keys[str(next_key)] = ("global_up", "dbeaver")
|
||||
else:
|
||||
print(f" {next_key}) Apagar DBEAVER (compose.dbeaver.yaml)")
|
||||
dynamic_keys[str(next_key)] = ("global_down", "dbeaver")
|
||||
next_key += 1
|
||||
|
||||
# Salir
|
||||
print(f" {next_key}) Salir")
|
||||
exit_key = str(next_key)
|
||||
|
||||
while True:
|
||||
choice = input("> ").strip()
|
||||
if choice == "1":
|
||||
bring_up("development") # incluye override
|
||||
force = yes_no("¿Usar --force-recreate para DESARROLLO?", default="n")
|
||||
bring_up("development", force_recreate=force)
|
||||
return
|
||||
elif choice == "2":
|
||||
bring_up("production", include_override=False) # sin override
|
||||
force = yes_no("¿Usar --force-recreate para PRODUCCIÓN?", default="n")
|
||||
bring_up("production", force_recreate=force)
|
||||
return
|
||||
elif choice == "3":
|
||||
elif choice in dynamic_keys:
|
||||
action, kind = dynamic_keys[choice]
|
||||
if action == "global_up":
|
||||
force = yes_no(f"¿Usar --force-recreate para {kind.upper()}?", default="n")
|
||||
bring_up_global(kind, force_recreate=force)
|
||||
return
|
||||
elif action == "global_down":
|
||||
remove = yes_no(f"¿También remover contenedores de {kind.upper()}? (rm -f)", default="n")
|
||||
bring_down_global(kind, remove=remove)
|
||||
return
|
||||
elif choice == exit_key:
|
||||
ok("Saliendo.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Opción inválida. Elige 1, 2 o 3.")
|
||||
|
||||
def detect_and_offer_shutdown():
|
||||
# Paths de env (si existen)
|
||||
dev_env = env_file_path("development")
|
||||
prod_env = env_file_path("production")
|
||||
|
||||
# Helper: obtiene IDs running para una combinación de files/env
|
||||
def running_ids(env_path, include_override):
|
||||
cmd = compose_cmd(["ps", "--status", "running", "-q"],
|
||||
env_file=env_path,
|
||||
include_override=include_override)
|
||||
proc = run(cmd, capture_output=True)
|
||||
if proc.returncode != 0:
|
||||
return []
|
||||
return [l.strip() for l in proc.stdout.splitlines() if l.strip()]
|
||||
|
||||
# Dev usa override; Prod no
|
||||
dev_running = running_ids(dev_env, include_override=True)
|
||||
prod_running = running_ids(prod_env, include_override=False)
|
||||
|
||||
any_running = bool(dev_running or prod_running)
|
||||
if any_running:
|
||||
print_header("Contenedores activos detectados")
|
||||
info(f"DESARROLLO: {len(dev_running)} contenedor(es) en ejecución.")
|
||||
info(f"PRODUCCIÓN: {len(prod_running)} contenedor(es) en ejecución.\n")
|
||||
|
||||
options = []
|
||||
if dev_running:
|
||||
options.append(("1", "Apagar entorno de DESARROLLO", ("development", True)))
|
||||
if prod_running:
|
||||
options.append(("2", "Apagar entorno de PRODUCCIÓN", ("production", False)))
|
||||
options.append(("3", "Mantener todo como está y salir", None))
|
||||
|
||||
print("Selecciona una opción:")
|
||||
for key, label, _ in options:
|
||||
print(f" {key}) {label}")
|
||||
|
||||
while True:
|
||||
choice = input("> ").strip()
|
||||
selected = next((opt for opt in options if opt[0] == choice), None)
|
||||
if not selected:
|
||||
print("Opción inválida.")
|
||||
continue
|
||||
if choice == "3":
|
||||
ok("Se mantiene el estado actual.")
|
||||
sys.exit(0)
|
||||
|
||||
env_key, _include_override = selected[2]
|
||||
info(f"Intentando apagar entorno de {env_key.upper()}…")
|
||||
bring_down(env_key) # ya respeta --env-file y el include_override de prod no usa override
|
||||
ok("Listo.")
|
||||
break
|
||||
else:
|
||||
info("No hay contenedores activos del proyecto.")
|
||||
print("Opción inválida. Elige una de las opciones listadas.")
|
||||
|
||||
def main():
|
||||
try:
|
||||
check_prereqs()
|
||||
detect_and_offer_shutdown()
|
||||
# Mostrar estado y permitir opcionalmente apagar dev/prod
|
||||
detect_and_optionally_shutdown()
|
||||
# Menú de gestión (incluye globales: levantar o apagar según estado)
|
||||
main_menu()
|
||||
except KeyboardInterrupt:
|
||||
print("\n")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user