mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 09:07:20 +00:00
Compare commits
3 Commits
b62e33bb03
...
681da2767b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
681da2767b | ||
|
|
d38c7557e0 | ||
|
|
df7689f26a |
65
deploy.sh
65
deploy.sh
@@ -34,11 +34,12 @@ REMOTE_SKIP_STORAGE=0
|
||||
REMOTE_COPY_SOURCE=0
|
||||
REMOTE_ARGS_PROVIDED=0
|
||||
REMOTE_AUTO_DEPLOY=0
|
||||
REMOTE_AUTO_DEPLOY=0
|
||||
REMOTE_CLEAN_RUNTIME=0
|
||||
REMOTE_CLEAN_CONTAINERS=0
|
||||
REMOTE_STORAGE_OVERRIDE=""
|
||||
REMOTE_CONTAINER_USER_OVERRIDE=""
|
||||
REMOTE_ENV_FILE=""
|
||||
REMOTE_SKIP_ENV=0
|
||||
REMOTE_PRESERVE_CONTAINERS=0
|
||||
|
||||
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
|
||||
MODULE_STATE_INITIALIZED=0
|
||||
@@ -174,8 +175,18 @@ collect_remote_details(){
|
||||
read -rp "Stop/remove remote containers & project images during migration? [y/N]: " cleanup_answer
|
||||
cleanup_answer="${cleanup_answer:-n}"
|
||||
case "${cleanup_answer,,}" in
|
||||
y|yes) REMOTE_CLEAN_RUNTIME=1 ;;
|
||||
*) REMOTE_CLEAN_RUNTIME=0 ;;
|
||||
y|yes) REMOTE_CLEAN_CONTAINERS=1 ;;
|
||||
*)
|
||||
REMOTE_CLEAN_CONTAINERS=0
|
||||
# Offer explicit preservation when declining cleanup
|
||||
local preserve_answer
|
||||
read -rp "Preserve remote containers/images (skip cleanup)? [Y/n]: " preserve_answer
|
||||
preserve_answer="${preserve_answer:-Y}"
|
||||
case "${preserve_answer,,}" in
|
||||
n|no) REMOTE_PRESERVE_CONTAINERS=0 ;;
|
||||
*) REMOTE_PRESERVE_CONTAINERS=1 ;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -251,9 +262,11 @@ Options:
|
||||
--remote-skip-storage Skip syncing the storage directory during migration
|
||||
--remote-copy-source Copy the local project directory to remote instead of relying on git
|
||||
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
|
||||
--remote-clean-runtime Stop/remove remote containers & project images during migration
|
||||
--remote-clean-containers Stop/remove remote containers & project images during migration
|
||||
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
|
||||
--remote-container-user USER[:GROUP] Override CONTAINER_USER in the remote .env
|
||||
--remote-skip-env Do not upload .env to the remote host
|
||||
--remote-preserve-containers Skip stopping/removing remote containers during migration
|
||||
--skip-config Skip applying server configuration preset
|
||||
-h, --help Show this help
|
||||
|
||||
@@ -282,15 +295,22 @@ while [[ $# -gt 0 ]]; do
|
||||
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-clean-runtime) REMOTE_CLEAN_RUNTIME=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-clean-containers) REMOTE_CLEAN_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||
--remote-container-user) REMOTE_CONTAINER_USER_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||
--remote-skip-env) REMOTE_SKIP_ENV=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-preserve-containers) REMOTE_PRESERVE_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--skip-config) SKIP_CONFIG=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) err "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ] && [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
err "Cannot combine --remote-clean-containers with --remote-preserve-containers."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
require_cmd(){
|
||||
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
|
||||
}
|
||||
@@ -552,6 +572,27 @@ prompt_build_if_needed(){
|
||||
local build_reasons_output
|
||||
build_reasons_output=$(detect_build_needed)
|
||||
|
||||
if [ -z "$build_reasons_output" ]; then
|
||||
# Belt-and-suspenders: if C++ modules are enabled but module images missing, warn
|
||||
ensure_module_state
|
||||
if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
|
||||
local authserver_modules_image
|
||||
local worldserver_modules_image
|
||||
authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")"
|
||||
worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
|
||||
local missing_images=()
|
||||
if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then
|
||||
missing_images+=("$authserver_modules_image")
|
||||
fi
|
||||
if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then
|
||||
missing_images+=("$worldserver_modules_image")
|
||||
fi
|
||||
if [ ${#missing_images[@]} -gt 0 ]; then
|
||||
build_reasons_output=$(printf "C++ modules enabled but module images missing: %s\n" "${missing_images[*]}")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$build_reasons_output" ]; then
|
||||
return 0 # No build needed
|
||||
fi
|
||||
@@ -693,14 +734,22 @@ run_remote_migration(){
|
||||
args+=(--copy-source)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_CLEAN_RUNTIME" -eq 1 ]; then
|
||||
args+=(--cleanup-runtime)
|
||||
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
args+=(--clean-containers)
|
||||
fi
|
||||
|
||||
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||
args+=(--yes)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_SKIP_ENV" -eq 1 ]; then
|
||||
args+=(--skip-env)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
args+=(--preserve-containers)
|
||||
fi
|
||||
|
||||
if [ -n "$REMOTE_ENV_FILE" ]; then
|
||||
args+=(--env-file "$REMOTE_ENV_FILE")
|
||||
fi
|
||||
|
||||
@@ -170,8 +170,12 @@ Optional flags:
|
||||
- `--remote-port 2222` - Custom SSH port
|
||||
- `--remote-identity ~/.ssh/custom_key` - Specific SSH key
|
||||
- `--remote-skip-storage` - Don't sync storage directory (fresh install on remote)
|
||||
- `--remote-clean-containers` - Stop/remove existing `ac-*` containers and project images during migration
|
||||
- `--remote-skip-env` - Leave the remote `.env` untouched (won't upload local one)
|
||||
- `--remote-preserve-containers` - Do not stop/remove existing `ac-*` containers/images during migration
|
||||
- `--remote-storage-path /mnt/acore-storage` - Override STORAGE_PATH on the remote host (local-storage stays per .env)
|
||||
- `--remote-container-user 1001:1001` - Override CONTAINER_USER on the remote host (uid:gid)
|
||||
- Note: do not combine `--remote-clean-containers` with `--remote-preserve-containers`; the flags are mutually exclusive.
|
||||
|
||||
### Step 3: Deploy on Remote Host
|
||||
```bash
|
||||
|
||||
@@ -148,8 +148,10 @@ Options:
|
||||
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
|
||||
--storage PATH Remote storage directory (default: <project-dir>/storage)
|
||||
--skip-storage Do not sync the storage directory
|
||||
--skip-env Do not upload .env to the remote host
|
||||
--preserve-containers Skip stopping/removing existing remote containers and images
|
||||
--clean-containers Stop/remove existing ac-* containers and project images on remote
|
||||
--copy-source Copy the full local project directory instead of syncing via git
|
||||
--cleanup-runtime Stop/remove existing ac-* containers and project images on remote
|
||||
--yes, -y Auto-confirm prompts (for existing deployments)
|
||||
--help Show this help
|
||||
EOF_HELP
|
||||
@@ -165,7 +167,9 @@ REMOTE_STORAGE=""
|
||||
SKIP_STORAGE=0
|
||||
ASSUME_YES=0
|
||||
COPY_SOURCE=0
|
||||
CLEANUP_RUNTIME=0
|
||||
SKIP_ENV=0
|
||||
PRESERVE_CONTAINERS=0
|
||||
CLEAN_CONTAINERS=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -178,8 +182,10 @@ while [[ $# -gt 0 ]]; do
|
||||
--tarball) TARBALL="$2"; shift 2;;
|
||||
--storage) REMOTE_STORAGE="$2"; shift 2;;
|
||||
--skip-storage) SKIP_STORAGE=1; shift;;
|
||||
--skip-env) SKIP_ENV=1; shift;;
|
||||
--preserve-containers) PRESERVE_CONTAINERS=1; shift;;
|
||||
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||
--copy-source) COPY_SOURCE=1; shift;;
|
||||
--cleanup-runtime) CLEANUP_RUNTIME=1; shift;;
|
||||
--yes|-y) ASSUME_YES=1; shift;;
|
||||
--help|-h) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
@@ -192,6 +198,11 @@ if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$CLEAN_CONTAINERS" -eq 1 && "$PRESERVE_CONTAINERS" -eq 1 ]]; then
|
||||
echo "Cannot combine --clean-containers with --preserve-containers." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Normalize env file path if provided and recompute defaults
|
||||
if [ -n "$ENV_FILE" ] && [ -f "$ENV_FILE" ]; then
|
||||
ENV_FILE="$(cd "$(dirname "$ENV_FILE")" && pwd)/$(basename "$ENV_FILE")"
|
||||
@@ -302,14 +313,35 @@ validate_remote_environment(){
|
||||
local running_containers
|
||||
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
|
||||
if [ "$running_containers" -gt 0 ]; then
|
||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||
echo " Migration will overwrite existing deployment"
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with migration? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
echo "⚠️ Found $running_containers running AzerothCore containers; --preserve-containers set, leaving them running."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue without stopping containers? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration (containers preserved)..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
elif [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
echo "⚠️ Found $running_containers running AzerothCore containers"
|
||||
echo " --clean-containers set: they will be stopped/removed during migration."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with cleanup? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with cleanup..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
else
|
||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||
echo " Migration will NOT stop them automatically. Use --clean-containers to stop/remove."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with migration? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -325,6 +357,25 @@ validate_remote_environment(){
|
||||
echo "✅ Remote environment validation complete"
|
||||
}
|
||||
|
||||
confirm_remote_storage_overwrite(){
|
||||
if [[ $SKIP_STORAGE -ne 0 ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ "$ASSUME_YES" = "1" ]]; then
|
||||
return
|
||||
fi
|
||||
local has_content
|
||||
has_content=$(run_ssh "if [ -d '$REMOTE_STORAGE' ]; then find '$REMOTE_STORAGE' -mindepth 1 -maxdepth 1 -print -quit; fi")
|
||||
if [ -n "$has_content" ]; then
|
||||
echo "⚠️ Remote storage at $REMOTE_STORAGE contains existing data."
|
||||
read -r -p " Continue and sync local storage over it? [y/N]: " reply
|
||||
case "${reply,,}" in
|
||||
y|yes) echo " Proceeding with storage sync..." ;;
|
||||
*) echo " Skipping storage sync for this run."; SKIP_STORAGE=1 ;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
copy_source_tree(){
|
||||
echo " • Copying full local project directory..."
|
||||
ensure_remote_temp_dir
|
||||
@@ -388,11 +439,14 @@ setup_remote_repository(){
|
||||
}
|
||||
|
||||
cleanup_stale_docker_resources(){
|
||||
if [ "$CLEANUP_RUNTIME" -ne 1 ]; then
|
||||
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
echo "⋅ Skipping remote container/image cleanup (--preserve-containers)"
|
||||
return
|
||||
fi
|
||||
if [ "$CLEAN_CONTAINERS" -ne 1 ]; then
|
||||
echo "⋅ Skipping remote runtime cleanup (containers and images preserved)."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "⋅ Cleaning up stale Docker resources on remote..."
|
||||
|
||||
# Stop and remove old containers
|
||||
@@ -446,6 +500,8 @@ if [ ${#MISSING_IMAGES[@]} -gt 0 ]; then
|
||||
printf ' • %s\n' "${MISSING_IMAGES[@]}"
|
||||
fi
|
||||
|
||||
confirm_remote_storage_overwrite
|
||||
|
||||
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
||||
if [[ -d storage ]]; then
|
||||
echo "⋅ Syncing storage to remote"
|
||||
@@ -513,8 +569,34 @@ run_scp "$TARBALL" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-modules-images.tar"
|
||||
run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'"
|
||||
|
||||
if [[ -f "$ENV_FILE" ]]; then
|
||||
echo "⋅ Uploading .env"
|
||||
run_scp "$ENV_FILE" "$USER@$HOST:$PROJECT_DIR/.env"
|
||||
if [[ $SKIP_ENV -eq 1 ]]; then
|
||||
echo "⋅ Skipping .env upload (--skip-env)"
|
||||
else
|
||||
remote_env_path="$PROJECT_DIR/.env"
|
||||
upload_env=1
|
||||
|
||||
if run_ssh "test -f '$remote_env_path'"; then
|
||||
if [ "$ASSUME_YES" = "1" ]; then
|
||||
echo "⋅ Overwriting existing remote .env (auto-confirm)"
|
||||
elif [ -t 0 ]; then
|
||||
read -r -p "⚠️ Remote .env exists at $remote_env_path. Overwrite? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) ;;
|
||||
*) upload_env=0 ;;
|
||||
esac
|
||||
else
|
||||
echo "⚠️ Remote .env exists at $remote_env_path; skipping upload (no confirmation available)"
|
||||
upload_env=0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $upload_env -eq 1 ]]; then
|
||||
echo "⋅ Uploading .env"
|
||||
run_scp "$ENV_FILE" "$USER@$HOST:$remote_env_path"
|
||||
else
|
||||
echo "⋅ Keeping existing remote .env"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "⋅ Remote prepares completed"
|
||||
|
||||
@@ -9,6 +9,10 @@ from pathlib import Path
|
||||
|
||||
PROJECT_DIR = Path(__file__).resolve().parents[2]
|
||||
ENV_FILE = PROJECT_DIR / ".env"
|
||||
DEFAULT_ACORE_STANDARD_REPO = "https://github.com/azerothcore/azerothcore-wotlk.git"
|
||||
DEFAULT_ACORE_PLAYERBOTS_REPO = "https://github.com/mod-playerbots/azerothcore-wotlk.git"
|
||||
DEFAULT_ACORE_STANDARD_BRANCH = "master"
|
||||
DEFAULT_ACORE_PLAYERBOTS_BRANCH = "Playerbot"
|
||||
|
||||
def load_env():
|
||||
env = {}
|
||||
@@ -150,6 +154,195 @@ def volume_info(name, fallback=None):
|
||||
pass
|
||||
return {"name": name, "exists": False, "mountpoint": "-"}
|
||||
|
||||
def detect_source_variant(env):
|
||||
variant = read_env(env, "STACK_SOURCE_VARIANT", "").strip().lower()
|
||||
if variant in ("playerbots", "playerbot"):
|
||||
return "playerbots"
|
||||
if variant == "core":
|
||||
return "core"
|
||||
if read_env(env, "STACK_IMAGE_MODE", "").strip().lower() == "playerbots":
|
||||
return "playerbots"
|
||||
if read_env(env, "MODULE_PLAYERBOTS", "0") == "1" or read_env(env, "PLAYERBOT_ENABLED", "0") == "1":
|
||||
return "playerbots"
|
||||
return "core"
|
||||
|
||||
def repo_config_for_variant(env, variant):
|
||||
if variant == "playerbots":
|
||||
repo = read_env(env, "ACORE_REPO_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_REPO)
|
||||
branch = read_env(env, "ACORE_BRANCH_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_BRANCH)
|
||||
else:
|
||||
repo = read_env(env, "ACORE_REPO_STANDARD", DEFAULT_ACORE_STANDARD_REPO)
|
||||
branch = read_env(env, "ACORE_BRANCH_STANDARD", DEFAULT_ACORE_STANDARD_BRANCH)
|
||||
return repo, branch
|
||||
|
||||
def image_labels(image):
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["docker", "image", "inspect", "--format", "{{json .Config.Labels}}", image],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
timeout=3,
|
||||
)
|
||||
labels = json.loads(result.stdout or "{}")
|
||||
if isinstance(labels, dict):
|
||||
return {k: (v or "").strip() for k, v in labels.items()}
|
||||
except Exception:
|
||||
pass
|
||||
return {}
|
||||
|
||||
def first_label(labels, keys):
|
||||
for key in keys:
|
||||
value = labels.get(key, "")
|
||||
if value:
|
||||
return value
|
||||
return ""
|
||||
|
||||
def short_commit(commit):
|
||||
commit = commit.strip()
|
||||
if re.fullmatch(r"[0-9a-fA-F]{12,}", commit):
|
||||
return commit[:12]
|
||||
return commit
|
||||
|
||||
def git_info_from_path(path):
|
||||
repo_path = Path(path)
|
||||
if not (repo_path / ".git").exists():
|
||||
return None
|
||||
|
||||
def run_git(args):
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git"] + args,
|
||||
cwd=repo_path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
commit = run_git(["rev-parse", "HEAD"])
|
||||
if not commit:
|
||||
return None
|
||||
|
||||
return {
|
||||
"commit": commit,
|
||||
"commit_short": run_git(["rev-parse", "--short", "HEAD"]) or short_commit(commit),
|
||||
"date": run_git(["log", "-1", "--format=%cd", "--date=iso-strict"]),
|
||||
"repo": run_git(["remote", "get-url", "origin"]),
|
||||
"branch": run_git(["rev-parse", "--abbrev-ref", "HEAD"]),
|
||||
"path": str(repo_path),
|
||||
}
|
||||
|
||||
def candidate_source_paths(env, variant):
|
||||
paths = []
|
||||
for key in ("MODULES_REBUILD_SOURCE_PATH", "SOURCE_DIR"):
|
||||
value = read_env(env, key, "")
|
||||
if value:
|
||||
paths.append(value)
|
||||
|
||||
local_root = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
||||
primary_dir = "azerothcore-playerbots" if variant == "playerbots" else "azerothcore"
|
||||
fallback_dir = "azerothcore" if variant == "playerbots" else "azerothcore-playerbots"
|
||||
paths.append(os.path.join(local_root, "source", primary_dir))
|
||||
paths.append(os.path.join(local_root, "source", fallback_dir))
|
||||
|
||||
normalized = []
|
||||
for p in paths:
|
||||
expanded = expand_path(p, env)
|
||||
try:
|
||||
normalized.append(str(Path(expanded).expanduser().resolve()))
|
||||
except Exception:
|
||||
normalized.append(str(Path(expanded).expanduser()))
|
||||
# Deduplicate while preserving order
|
||||
seen = set()
|
||||
unique_paths = []
|
||||
for p in normalized:
|
||||
if p not in seen:
|
||||
seen.add(p)
|
||||
unique_paths.append(p)
|
||||
return unique_paths
|
||||
|
||||
def build_info(service_data, env):
|
||||
variant = detect_source_variant(env)
|
||||
repo, branch = repo_config_for_variant(env, variant)
|
||||
info = {
|
||||
"variant": variant,
|
||||
"repo": repo,
|
||||
"branch": branch,
|
||||
"image": "",
|
||||
"commit": "",
|
||||
"commit_date": "",
|
||||
"commit_source": "",
|
||||
"source_path": "",
|
||||
}
|
||||
|
||||
image_candidates = []
|
||||
for svc in service_data:
|
||||
if svc.get("name") in ("ac-worldserver", "ac-authserver", "ac-db-import"):
|
||||
image = svc.get("image") or ""
|
||||
if image:
|
||||
image_candidates.append(image)
|
||||
|
||||
for env_key in (
|
||||
"AC_WORLDSERVER_IMAGE_PLAYERBOTS",
|
||||
"AC_WORLDSERVER_IMAGE_MODULES",
|
||||
"AC_WORLDSERVER_IMAGE",
|
||||
"AC_AUTHSERVER_IMAGE_PLAYERBOTS",
|
||||
"AC_AUTHSERVER_IMAGE_MODULES",
|
||||
"AC_AUTHSERVER_IMAGE",
|
||||
):
|
||||
value = read_env(env, env_key, "")
|
||||
if value:
|
||||
image_candidates.append(value)
|
||||
|
||||
seen = set()
|
||||
deduped_images = []
|
||||
for img in image_candidates:
|
||||
if img not in seen:
|
||||
seen.add(img)
|
||||
deduped_images.append(img)
|
||||
|
||||
commit_label_keys = [
|
||||
"build.source_commit",
|
||||
"org.opencontainers.image.revision",
|
||||
"org.opencontainers.image.version",
|
||||
]
|
||||
date_label_keys = [
|
||||
"build.source_date",
|
||||
"org.opencontainers.image.created",
|
||||
"build.timestamp",
|
||||
]
|
||||
|
||||
for image in deduped_images:
|
||||
labels = image_labels(image)
|
||||
if not info["image"]:
|
||||
info["image"] = image
|
||||
if not labels:
|
||||
continue
|
||||
commit = short_commit(first_label(labels, commit_label_keys))
|
||||
date = first_label(labels, date_label_keys)
|
||||
if commit or date:
|
||||
info["commit"] = commit
|
||||
info["commit_date"] = date
|
||||
info["commit_source"] = "image-label"
|
||||
info["image"] = image
|
||||
return info
|
||||
|
||||
for path in candidate_source_paths(env, variant):
|
||||
git_meta = git_info_from_path(path)
|
||||
if git_meta:
|
||||
info["commit"] = git_meta.get("commit_short") or short_commit(git_meta.get("commit", ""))
|
||||
info["commit_date"] = git_meta.get("date", "")
|
||||
info["commit_source"] = "source-tree"
|
||||
info["source_path"] = git_meta.get("path", "")
|
||||
info["repo"] = git_meta.get("repo") or info["repo"]
|
||||
info["branch"] = git_meta.get("branch") or info["branch"]
|
||||
return info
|
||||
|
||||
return info
|
||||
|
||||
def expand_path(value, env):
|
||||
storage = read_env(env, "STORAGE_PATH", "./storage")
|
||||
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
||||
@@ -175,13 +368,61 @@ def mysql_query(env, database, query):
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def escape_like_prefix(prefix):
|
||||
# Basic escape for single quotes in SQL literals
|
||||
return prefix.replace("'", "''")
|
||||
|
||||
def bot_prefixes(env):
|
||||
prefixes = []
|
||||
for key in ("PLAYERBOT_ACCOUNT_PREFIXES", "PLAYERBOT_ACCOUNT_PREFIX"):
|
||||
raw = read_env(env, key, "")
|
||||
for part in raw.replace(",", " ").split():
|
||||
part = part.strip()
|
||||
if part:
|
||||
prefixes.append(part)
|
||||
# Default fallback if nothing configured
|
||||
if not prefixes:
|
||||
prefixes.extend(["playerbot", "rndbot", "bot"])
|
||||
return prefixes
|
||||
|
||||
def user_stats(env):
|
||||
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
|
||||
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
|
||||
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;")
|
||||
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;")
|
||||
prefixes = bot_prefixes(env)
|
||||
account_conditions = []
|
||||
for prefix in prefixes:
|
||||
prefix = escape_like_prefix(prefix)
|
||||
upper_prefix = prefix.upper()
|
||||
account_conditions.append(f"UPPER(username) NOT LIKE '{upper_prefix}%%'")
|
||||
account_query = "SELECT COUNT(*) FROM account"
|
||||
if account_conditions:
|
||||
account_query += " WHERE " + " AND ".join(account_conditions)
|
||||
accounts = mysql_query(env, db_auth, account_query + ";")
|
||||
|
||||
online_conditions = ["c.online = 1"]
|
||||
for prefix in prefixes:
|
||||
prefix = escape_like_prefix(prefix)
|
||||
upper_prefix = prefix.upper()
|
||||
online_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
|
||||
online_query = (
|
||||
f"SELECT COUNT(DISTINCT a.id) FROM `{db_characters}`.characters c "
|
||||
f"JOIN `{db_auth}`.account a ON a.id = c.account "
|
||||
f"WHERE {' AND '.join(online_conditions)};"
|
||||
)
|
||||
online = mysql_query(env, db_characters, online_query)
|
||||
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
|
||||
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;")
|
||||
character_conditions = []
|
||||
for prefix in prefixes:
|
||||
prefix = escape_like_prefix(prefix)
|
||||
upper_prefix = prefix.upper()
|
||||
character_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
|
||||
characters_query = (
|
||||
f"SELECT COUNT(*) FROM `{db_characters}`.characters c "
|
||||
f"JOIN `{db_auth}`.account a ON a.id = c.account"
|
||||
)
|
||||
if character_conditions:
|
||||
characters_query += " WHERE " + " AND ".join(character_conditions)
|
||||
characters = mysql_query(env, db_characters, characters_query + ";")
|
||||
return {
|
||||
"accounts": accounts,
|
||||
"online": online,
|
||||
@@ -274,6 +515,8 @@ def main():
|
||||
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
|
||||
}
|
||||
|
||||
build = build_info(service_data, env)
|
||||
|
||||
data = {
|
||||
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||
"project": project,
|
||||
@@ -285,6 +528,7 @@ def main():
|
||||
"volumes": volumes,
|
||||
"users": user_stats(env),
|
||||
"stats": docker_stats(),
|
||||
"build": build,
|
||||
}
|
||||
|
||||
print(json.dumps(data))
|
||||
|
||||
121
scripts/bash/update-remote.sh
Executable file
121
scripts/bash/update-remote.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
# Helper to push a fresh build to a remote host with minimal downtime and no data touch by default.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
DEFAULT_PROJECT_DIR="~$(printf '/%s' "$(basename "$ROOT_DIR")")"
|
||||
|
||||
HOST=""
|
||||
USER=""
|
||||
PORT=22
|
||||
IDENTITY=""
|
||||
PROJECT_DIR="$DEFAULT_PROJECT_DIR"
|
||||
PUSH_ENV=0
|
||||
PUSH_STORAGE=0
|
||||
CLEAN_CONTAINERS=0
|
||||
AUTO_DEPLOY=1
|
||||
ASSUME_YES=0
|
||||
|
||||
usage(){
|
||||
cat <<'EOF'
|
||||
Usage: scripts/bash/update-remote.sh --host HOST --user USER [options]
|
||||
|
||||
Options:
|
||||
--host HOST Remote hostname or IP (required)
|
||||
--user USER SSH username on remote host (required)
|
||||
--port PORT SSH port (default: 22)
|
||||
--identity PATH SSH private key
|
||||
--project-dir DIR Remote project directory (default: ~/<repo-name>)
|
||||
--remote-path DIR Alias for --project-dir (backward compat)
|
||||
--push-env Upload local .env to remote (default: skip)
|
||||
--push-storage Sync ./storage to remote (default: skip)
|
||||
--clean-containers Stop/remove remote ac-* containers & project images during migration (default: preserve)
|
||||
--no-auto-deploy Do not trigger remote deploy after migration
|
||||
--yes Auto-confirm prompts
|
||||
--help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host) HOST="$2"; shift 2;;
|
||||
--user) USER="$2"; shift 2;;
|
||||
--port) PORT="$2"; shift 2;;
|
||||
--identity) IDENTITY="$2"; shift 2;;
|
||||
--project-dir) PROJECT_DIR="$2"; shift 2;;
|
||||
--remote-path) PROJECT_DIR="$2"; shift 2;;
|
||||
--push-env) PUSH_ENV=1; shift;;
|
||||
--push-storage) PUSH_STORAGE=1; shift;;
|
||||
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||
--no-auto-deploy) AUTO_DEPLOY=0; shift;;
|
||||
--yes) ASSUME_YES=1; shift;;
|
||||
--help|-h) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||
echo "--host and --user are required" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
deploy_args=(--remote --remote-host "$HOST" --remote-user "$USER")
|
||||
|
||||
if [ -n "$PROJECT_DIR" ]; then
|
||||
deploy_args+=(--remote-project-dir "$PROJECT_DIR")
|
||||
fi
|
||||
if [ -n "$IDENTITY" ]; then
|
||||
deploy_args+=(--remote-identity "$IDENTITY")
|
||||
fi
|
||||
if [ "$PORT" != "22" ]; then
|
||||
deploy_args+=(--remote-port "$PORT")
|
||||
fi
|
||||
|
||||
if [ "$PUSH_STORAGE" -ne 1 ]; then
|
||||
deploy_args+=(--remote-skip-storage)
|
||||
fi
|
||||
if [ "$PUSH_ENV" -ne 1 ]; then
|
||||
deploy_args+=(--remote-skip-env)
|
||||
fi
|
||||
|
||||
if [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
deploy_args+=(--remote-clean-containers)
|
||||
else
|
||||
deploy_args+=(--remote-preserve-containers)
|
||||
fi
|
||||
|
||||
if [ "$AUTO_DEPLOY" -eq 1 ]; then
|
||||
deploy_args+=(--remote-auto-deploy)
|
||||
fi
|
||||
|
||||
deploy_args+=(--no-watch)
|
||||
|
||||
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||
deploy_args+=(--yes)
|
||||
fi
|
||||
|
||||
echo "Remote update plan:"
|
||||
echo " Host/User : ${USER}@${HOST}:${PORT}"
|
||||
echo " Project Dir : ${PROJECT_DIR}"
|
||||
echo " Push .env : $([ "$PUSH_ENV" -eq 1 ] && echo yes || echo no)"
|
||||
echo " Push storage : $([ "$PUSH_STORAGE" -eq 1 ] && echo yes || echo no)"
|
||||
echo " Cleanup mode : $([ "$CLEAN_CONTAINERS" -eq 1 ] && echo 'clean containers' || echo 'preserve containers')"
|
||||
echo " Auto deploy : $([ "$AUTO_DEPLOY" -eq 1 ] && echo yes || echo no)"
|
||||
if [ "$AUTO_DEPLOY" -eq 1 ] && [ "$PUSH_ENV" -ne 1 ]; then
|
||||
echo " ⚠️ Auto-deploy is enabled but push-env is off; remote deploy will fail without a valid .env."
|
||||
fi
|
||||
|
||||
if [ "$ASSUME_YES" -ne 1 ]; then
|
||||
read -r -p "Proceed with remote update? [y/N]: " reply
|
||||
reply="${reply:-n}"
|
||||
case "${reply,,}" in
|
||||
y|yes) ;;
|
||||
*) echo "Aborted."; exit 1 ;;
|
||||
esac
|
||||
deploy_args+=(--yes)
|
||||
fi
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
./deploy.sh "${deploy_args[@]}"
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -61,6 +63,17 @@ type Module struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type BuildInfo struct {
|
||||
Variant string `json:"variant"`
|
||||
Repo string `json:"repo"`
|
||||
Branch string `json:"branch"`
|
||||
Image string `json:"image"`
|
||||
Commit string `json:"commit"`
|
||||
CommitDate string `json:"commit_date"`
|
||||
CommitSource string `json:"commit_source"`
|
||||
SourcePath string `json:"source_path"`
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Project string `json:"project"`
|
||||
@@ -72,6 +85,7 @@ type Snapshot struct {
|
||||
Volumes map[string]VolumeInfo `json:"volumes"`
|
||||
Users UserStats `json:"users"`
|
||||
Stats map[string]ContainerStats `json:"stats"`
|
||||
Build BuildInfo `json:"build"`
|
||||
}
|
||||
|
||||
var persistentServiceOrder = []string{
|
||||
@@ -84,6 +98,81 @@ var persistentServiceOrder = []string{
|
||||
"ac-backup",
|
||||
}
|
||||
|
||||
func humanDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return "<1m"
|
||||
}
|
||||
days := d / (24 * time.Hour)
|
||||
d -= days * 24 * time.Hour
|
||||
hours := d / time.Hour
|
||||
d -= hours * time.Hour
|
||||
mins := d / time.Minute
|
||||
|
||||
switch {
|
||||
case days > 0:
|
||||
return fmt.Sprintf("%dd %dh", days, hours)
|
||||
case hours > 0:
|
||||
return fmt.Sprintf("%dh %dm", hours, mins)
|
||||
default:
|
||||
return fmt.Sprintf("%dm", mins)
|
||||
}
|
||||
}
|
||||
|
||||
func formatUptime(startedAt string) string {
|
||||
if startedAt == "" {
|
||||
return "-"
|
||||
}
|
||||
parsed, err := time.Parse(time.RFC3339Nano, startedAt)
|
||||
if err != nil {
|
||||
parsed, err = time.Parse(time.RFC3339, startedAt)
|
||||
if err != nil {
|
||||
return "-"
|
||||
}
|
||||
}
|
||||
if parsed.IsZero() {
|
||||
return "-"
|
||||
}
|
||||
uptime := time.Since(parsed)
|
||||
if uptime < 0 {
|
||||
uptime = 0
|
||||
}
|
||||
return humanDuration(uptime)
|
||||
}
|
||||
|
||||
func primaryIPv4() string {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for _, iface := range ifaces {
|
||||
if iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 {
|
||||
continue
|
||||
}
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
if ip == nil || ip.IsLoopback() {
|
||||
continue
|
||||
}
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
continue
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func runSnapshot() (*Snapshot, error) {
|
||||
cmd := exec.Command("./scripts/bash/statusjson.sh")
|
||||
output, err := cmd.Output()
|
||||
@@ -126,8 +215,8 @@ func buildServicesTable(s *Snapshot) *TableNoCol {
|
||||
runningServices, setupServices := partitionServices(s.Services)
|
||||
|
||||
table := NewTableNoCol()
|
||||
rows := [][]string{{"Group", "Service", "Status", "Health", "CPU%", "Memory"}}
|
||||
appendRows := func(groupLabel string, services []Service) {
|
||||
rows := [][]string{{"Service", "Status", "Health", "Uptime", "CPU%", "Memory"}}
|
||||
appendRows := func(services []Service) {
|
||||
for _, svc := range services {
|
||||
cpu := "-"
|
||||
mem := "-"
|
||||
@@ -139,12 +228,12 @@ func buildServicesTable(s *Snapshot) *TableNoCol {
|
||||
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
|
||||
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
|
||||
}
|
||||
rows = append(rows, []string{groupLabel, svc.Label, svc.Status, health, cpu, mem})
|
||||
rows = append(rows, []string{svc.Label, svc.Status, health, formatUptime(svc.StartedAt), cpu, mem})
|
||||
}
|
||||
}
|
||||
|
||||
appendRows("Persistent", runningServices)
|
||||
appendRows("Setup", setupServices)
|
||||
appendRows(runningServices)
|
||||
appendRows(setupServices)
|
||||
|
||||
table.Rows = rows
|
||||
table.RowSeparator = false
|
||||
@@ -223,9 +312,11 @@ func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
|
||||
}
|
||||
par := widgets.NewParagraph()
|
||||
par.Title = "Storage"
|
||||
par.Text = b.String()
|
||||
par.Text = strings.TrimRight(b.String(), "\n")
|
||||
par.Border = true
|
||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
par.PaddingLeft = 0
|
||||
par.PaddingRight = 0
|
||||
return par
|
||||
}
|
||||
|
||||
@@ -247,13 +338,75 @@ func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
|
||||
}
|
||||
par := widgets.NewParagraph()
|
||||
par.Title = "Volumes"
|
||||
par.Text = b.String()
|
||||
par.Text = strings.TrimRight(b.String(), "\n")
|
||||
par.Border = true
|
||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
par.PaddingLeft = 0
|
||||
par.PaddingRight = 0
|
||||
return par
|
||||
}
|
||||
|
||||
func simplifyRepo(repo string) string {
|
||||
repo = strings.TrimSpace(repo)
|
||||
repo = strings.TrimSuffix(repo, ".git")
|
||||
repo = strings.TrimPrefix(repo, "https://")
|
||||
repo = strings.TrimPrefix(repo, "http://")
|
||||
repo = strings.TrimPrefix(repo, "git@")
|
||||
repo = strings.TrimPrefix(repo, "github.com:")
|
||||
repo = strings.TrimPrefix(repo, "gitlab.com:")
|
||||
repo = strings.TrimPrefix(repo, "github.com/")
|
||||
repo = strings.TrimPrefix(repo, "gitlab.com/")
|
||||
return repo
|
||||
}
|
||||
|
||||
func buildInfoParagraph(s *Snapshot) *widgets.Paragraph {
|
||||
build := s.Build
|
||||
var lines []string
|
||||
|
||||
if build.Branch != "" {
|
||||
lines = append(lines, fmt.Sprintf("Branch: %s", build.Branch))
|
||||
}
|
||||
|
||||
if repo := simplifyRepo(build.Repo); repo != "" {
|
||||
lines = append(lines, fmt.Sprintf("Repo: %s", repo))
|
||||
}
|
||||
|
||||
commitLine := "Git: unknown"
|
||||
if build.Commit != "" {
|
||||
commitLine = fmt.Sprintf("Git: %s", build.Commit)
|
||||
switch build.CommitSource {
|
||||
case "image-label":
|
||||
commitLine += " [image]"
|
||||
case "source-tree":
|
||||
commitLine += " [source]"
|
||||
}
|
||||
}
|
||||
lines = append(lines, commitLine)
|
||||
|
||||
if build.Image != "" {
|
||||
// Skip image line to keep header compact
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("Updated: %s", s.Timestamp))
|
||||
|
||||
par := widgets.NewParagraph()
|
||||
par.Title = "Build"
|
||||
par.Text = strings.Join(lines, "\n")
|
||||
par.Border = true
|
||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
return par
|
||||
}
|
||||
|
||||
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil || hostname == "" {
|
||||
hostname = "unknown"
|
||||
}
|
||||
ip := primaryIPv4()
|
||||
if ip == "" {
|
||||
ip = "unknown"
|
||||
}
|
||||
|
||||
servicesTable := buildServicesTable(s)
|
||||
portsTable := buildPortsTable(s)
|
||||
for i := 1; i < len(portsTable.Rows); i++ {
|
||||
@@ -287,43 +440,43 @@ func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
||||
moduleInfoPar.Border = true
|
||||
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
|
||||
storagePar := buildStorageParagraph(s)
|
||||
storagePar.Border = true
|
||||
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
storagePar.PaddingLeft = 1
|
||||
storagePar.PaddingRight = 1
|
||||
volumesPar := buildVolumesParagraph(s)
|
||||
|
||||
header := widgets.NewParagraph()
|
||||
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp)
|
||||
header.Text = fmt.Sprintf("Host: %s\nIP: %s\nProject: %s\nNetwork: %s", hostname, ip, s.Project, s.Network)
|
||||
header.Border = true
|
||||
|
||||
buildPar := buildInfoParagraph(s)
|
||||
|
||||
usersPar := widgets.NewParagraph()
|
||||
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d)
|
||||
usersPar.Title = "Users"
|
||||
usersPar.Text = fmt.Sprintf(" Online: %d\n Accounts: %d\n Characters: %d\n Active 7d: %d", s.Users.Online, s.Users.Accounts, s.Users.Characters, s.Users.Active7d)
|
||||
usersPar.Border = true
|
||||
|
||||
grid := ui.NewGrid()
|
||||
termWidth, termHeight := ui.TerminalDimensions()
|
||||
grid.SetRect(0, 0, termWidth, termHeight)
|
||||
grid.Set(
|
||||
ui.NewRow(0.15,
|
||||
ui.NewCol(0.6, header),
|
||||
ui.NewCol(0.4, usersPar),
|
||||
ui.NewRow(0.18,
|
||||
ui.NewCol(0.34, header),
|
||||
ui.NewCol(0.33, buildPar),
|
||||
ui.NewCol(0.33, usersPar),
|
||||
),
|
||||
ui.NewRow(0.46,
|
||||
ui.NewRow(0.43,
|
||||
ui.NewCol(0.6, servicesTable),
|
||||
ui.NewCol(0.4, portsTable),
|
||||
),
|
||||
ui.NewRow(0.39,
|
||||
ui.NewCol(0.25, modulesList),
|
||||
ui.NewCol(0.15,
|
||||
ui.NewRow(0.30, helpPar),
|
||||
ui.NewRow(0.70, moduleInfoPar),
|
||||
ui.NewRow(0.32, helpPar),
|
||||
ui.NewRow(0.68, moduleInfoPar),
|
||||
),
|
||||
ui.NewCol(0.6,
|
||||
ui.NewRow(0.55,
|
||||
ui.NewRow(0.513,
|
||||
ui.NewCol(1.0, storagePar),
|
||||
),
|
||||
ui.NewRow(0.45,
|
||||
ui.NewRow(0.487,
|
||||
ui.NewCol(1.0, volumesPar),
|
||||
),
|
||||
),
|
||||
|
||||
80
setup.sh
80
setup.sh
@@ -578,8 +578,6 @@ main(){
|
||||
local CLI_PLAYERBOT_ENABLED=""
|
||||
local CLI_PLAYERBOT_MIN=""
|
||||
local CLI_PLAYERBOT_MAX=""
|
||||
local CLI_AUTO_REBUILD=0
|
||||
local CLI_MODULES_SOURCE=""
|
||||
local FORCE_OVERWRITE=0
|
||||
local CLI_ENABLE_MODULES_RAW=()
|
||||
|
||||
@@ -622,9 +620,6 @@ Options:
|
||||
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
|
||||
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
|
||||
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
|
||||
--auto-rebuild-on-deploy Enable automatic rebuild during deploys
|
||||
--modules-rebuild-source PATH Source checkout used for module rebuilds
|
||||
--deploy-after Run ./deploy.sh automatically after setup completes
|
||||
--force Overwrite existing .env without prompting
|
||||
EOF
|
||||
exit 0
|
||||
@@ -779,25 +774,10 @@ EOF
|
||||
--playerbot-max-bots=*)
|
||||
CLI_PLAYERBOT_MAX="${1#*=}"; shift
|
||||
;;
|
||||
--auto-rebuild-on-deploy)
|
||||
CLI_AUTO_REBUILD=1
|
||||
shift
|
||||
;;
|
||||
--modules-rebuild-source)
|
||||
[[ $# -ge 2 ]] || { say ERROR "--modules-rebuild-source requires a value"; exit 1; }
|
||||
CLI_MODULES_SOURCE="$2"; shift 2
|
||||
;;
|
||||
--modules-rebuild-source=*)
|
||||
CLI_MODULES_SOURCE="${1#*=}"; shift
|
||||
;;
|
||||
--force)
|
||||
FORCE_OVERWRITE=1
|
||||
shift
|
||||
;;
|
||||
--deploy-after)
|
||||
CLI_DEPLOY_AFTER=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
echo "Use --help for usage" >&2
|
||||
@@ -1210,8 +1190,6 @@ fi
|
||||
local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
|
||||
local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
|
||||
|
||||
local AUTO_REBUILD_ON_DEPLOY=$CLI_AUTO_REBUILD
|
||||
local MODULES_REBUILD_SOURCE_PATH_VALUE="${CLI_MODULES_SOURCE}"
|
||||
local NEEDS_CXX_REBUILD=0
|
||||
|
||||
local module_mode_label=""
|
||||
@@ -1473,7 +1451,6 @@ fi
|
||||
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
|
||||
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
|
||||
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
|
||||
printf " %-18s %s\n" "Source checkout:" "$default_source_rel"
|
||||
printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE"
|
||||
|
||||
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
|
||||
@@ -1520,11 +1497,6 @@ fi
|
||||
echo ""
|
||||
say WARNING "These modules require compiling AzerothCore from source."
|
||||
say INFO "Run './build.sh' to compile your custom modules before deployment."
|
||||
if [ "$CLI_AUTO_REBUILD" = "1" ]; then
|
||||
AUTO_REBUILD_ON_DEPLOY=1
|
||||
else
|
||||
AUTO_REBUILD_ON_DEPLOY=$(ask_yn "Enable automatic rebuild during future deploys?" "$( [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ] && echo y || echo n )")
|
||||
fi
|
||||
|
||||
# Set build sentinel to indicate rebuild is needed
|
||||
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
||||
@@ -1554,23 +1526,8 @@ fi
|
||||
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
|
||||
fi
|
||||
|
||||
if [ -n "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
||||
local storage_abs="$STORAGE_PATH"
|
||||
if [[ "$storage_abs" != /* ]]; then
|
||||
storage_abs="$(pwd)/${storage_abs#./}"
|
||||
fi
|
||||
local candidate_path="$MODULES_REBUILD_SOURCE_PATH_VALUE"
|
||||
if [[ "$candidate_path" != /* ]]; then
|
||||
candidate_path="$(pwd)/${candidate_path#./}"
|
||||
fi
|
||||
if [[ "$candidate_path" == "$storage_abs"* ]]; then
|
||||
say WARNING "MODULES_REBUILD_SOURCE_PATH is inside shared storage (${candidate_path}). Using local workspace ${default_source_rel} instead."
|
||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Module staging will be handled directly in the rebuild section below
|
||||
|
||||
# Persist rebuild source path for downstream build scripts
|
||||
MODULES_REBUILD_SOURCE_PATH="$default_source_rel"
|
||||
|
||||
# Confirm write
|
||||
|
||||
@@ -1586,10 +1543,6 @@ fi
|
||||
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
|
||||
fi
|
||||
|
||||
if [ -z "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
||||
fi
|
||||
|
||||
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
|
||||
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
|
||||
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
|
||||
@@ -1756,11 +1709,12 @@ BACKUP_HEALTHCHECK_GRACE_SECONDS=$BACKUP_HEALTHCHECK_GRACE_SECONDS
|
||||
|
||||
EOF
|
||||
echo
|
||||
echo "# Modules"
|
||||
for module_key in "${MODULE_KEYS[@]}"; do
|
||||
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
||||
done
|
||||
cat <<EOF
|
||||
echo "# Modules"
|
||||
for module_key in "${MODULE_KEYS[@]}"; do
|
||||
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
||||
done
|
||||
cat <<EOF
|
||||
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH
|
||||
|
||||
# Client data
|
||||
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
|
||||
@@ -1779,12 +1733,8 @@ MODULES_CPP_LIST=$MODULES_CPP_LIST
|
||||
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
|
||||
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
|
||||
|
||||
# Rebuild automation
|
||||
AUTO_REBUILD_ON_DEPLOY=$AUTO_REBUILD_ON_DEPLOY
|
||||
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH_VALUE
|
||||
|
||||
# Eluna
|
||||
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
||||
# Eluna
|
||||
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
||||
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
|
||||
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
|
||||
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
|
||||
@@ -1853,16 +1803,6 @@ EOF
|
||||
printf ' 🚀 Quick deploy: ./deploy.sh\n'
|
||||
fi
|
||||
|
||||
if [ "${CLI_DEPLOY_AFTER:-0}" = "1" ]; then
|
||||
local deploy_args=(bash "./deploy.sh" --yes)
|
||||
if [ "$MODULE_PLAYERBOTS" != "1" ]; then
|
||||
deploy_args+=(--profile standard)
|
||||
fi
|
||||
say INFO "Launching deploy after setup (--deploy-after enabled)"
|
||||
if ! "${deploy_args[@]}"; then
|
||||
say WARNING "Automatic deploy failed; please run ./deploy.sh manually."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
Reference in New Issue
Block a user