mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-12 16:48:36 +00:00
cleanup
This commit is contained in:
65
deploy.sh
65
deploy.sh
@@ -34,11 +34,12 @@ REMOTE_SKIP_STORAGE=0
|
||||
REMOTE_COPY_SOURCE=0
|
||||
REMOTE_ARGS_PROVIDED=0
|
||||
REMOTE_AUTO_DEPLOY=0
|
||||
REMOTE_AUTO_DEPLOY=0
|
||||
REMOTE_CLEAN_RUNTIME=0
|
||||
REMOTE_CLEAN_CONTAINERS=0
|
||||
REMOTE_STORAGE_OVERRIDE=""
|
||||
REMOTE_CONTAINER_USER_OVERRIDE=""
|
||||
REMOTE_ENV_FILE=""
|
||||
REMOTE_SKIP_ENV=0
|
||||
REMOTE_PRESERVE_CONTAINERS=0
|
||||
|
||||
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
|
||||
MODULE_STATE_INITIALIZED=0
|
||||
@@ -174,8 +175,18 @@ collect_remote_details(){
|
||||
read -rp "Stop/remove remote containers & project images during migration? [y/N]: " cleanup_answer
|
||||
cleanup_answer="${cleanup_answer:-n}"
|
||||
case "${cleanup_answer,,}" in
|
||||
y|yes) REMOTE_CLEAN_RUNTIME=1 ;;
|
||||
*) REMOTE_CLEAN_RUNTIME=0 ;;
|
||||
y|yes) REMOTE_CLEAN_CONTAINERS=1 ;;
|
||||
*)
|
||||
REMOTE_CLEAN_CONTAINERS=0
|
||||
# Offer explicit preservation when declining cleanup
|
||||
local preserve_answer
|
||||
read -rp "Preserve remote containers/images (skip cleanup)? [Y/n]: " preserve_answer
|
||||
preserve_answer="${preserve_answer:-Y}"
|
||||
case "${preserve_answer,,}" in
|
||||
n|no) REMOTE_PRESERVE_CONTAINERS=0 ;;
|
||||
*) REMOTE_PRESERVE_CONTAINERS=1 ;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
@@ -251,9 +262,11 @@ Options:
|
||||
--remote-skip-storage Skip syncing the storage directory during migration
|
||||
--remote-copy-source Copy the local project directory to remote instead of relying on git
|
||||
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
|
||||
--remote-clean-runtime Stop/remove remote containers & project images during migration
|
||||
--remote-clean-containers Stop/remove remote containers & project images during migration
|
||||
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
|
||||
--remote-container-user USER[:GROUP] Override CONTAINER_USER in the remote .env
|
||||
--remote-skip-env Do not upload .env to the remote host
|
||||
--remote-preserve-containers Skip stopping/removing remote containers during migration
|
||||
--skip-config Skip applying server configuration preset
|
||||
-h, --help Show this help
|
||||
|
||||
@@ -282,15 +295,22 @@ while [[ $# -gt 0 ]]; do
|
||||
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-clean-runtime) REMOTE_CLEAN_RUNTIME=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-clean-containers) REMOTE_CLEAN_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||
--remote-container-user) REMOTE_CONTAINER_USER_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||
--remote-skip-env) REMOTE_SKIP_ENV=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--remote-preserve-containers) REMOTE_PRESERVE_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||
--skip-config) SKIP_CONFIG=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) err "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ] && [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
err "Cannot combine --remote-clean-containers with --remote-preserve-containers."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
require_cmd(){
|
||||
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
|
||||
}
|
||||
@@ -552,6 +572,27 @@ prompt_build_if_needed(){
|
||||
local build_reasons_output
|
||||
build_reasons_output=$(detect_build_needed)
|
||||
|
||||
if [ -z "$build_reasons_output" ]; then
|
||||
# Belt-and-suspenders: if C++ modules are enabled but module images missing, warn
|
||||
ensure_module_state
|
||||
if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
|
||||
local authserver_modules_image
|
||||
local worldserver_modules_image
|
||||
authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")"
|
||||
worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
|
||||
local missing_images=()
|
||||
if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then
|
||||
missing_images+=("$authserver_modules_image")
|
||||
fi
|
||||
if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then
|
||||
missing_images+=("$worldserver_modules_image")
|
||||
fi
|
||||
if [ ${#missing_images[@]} -gt 0 ]; then
|
||||
build_reasons_output=$(printf "C++ modules enabled but module images missing: %s\n" "${missing_images[*]}")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$build_reasons_output" ]; then
|
||||
return 0 # No build needed
|
||||
fi
|
||||
@@ -693,14 +734,22 @@ run_remote_migration(){
|
||||
args+=(--copy-source)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_CLEAN_RUNTIME" -eq 1 ]; then
|
||||
args+=(--cleanup-runtime)
|
||||
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
args+=(--clean-containers)
|
||||
fi
|
||||
|
||||
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||
args+=(--yes)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_SKIP_ENV" -eq 1 ]; then
|
||||
args+=(--skip-env)
|
||||
fi
|
||||
|
||||
if [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
args+=(--preserve-containers)
|
||||
fi
|
||||
|
||||
if [ -n "$REMOTE_ENV_FILE" ]; then
|
||||
args+=(--env-file "$REMOTE_ENV_FILE")
|
||||
fi
|
||||
|
||||
@@ -170,8 +170,12 @@ Optional flags:
|
||||
- `--remote-port 2222` - Custom SSH port
|
||||
- `--remote-identity ~/.ssh/custom_key` - Specific SSH key
|
||||
- `--remote-skip-storage` - Don't sync storage directory (fresh install on remote)
|
||||
- `--remote-clean-containers` - Stop/remove existing `ac-*` containers and project images during migration
|
||||
- `--remote-skip-env` - Leave the remote `.env` untouched (won't upload local one)
|
||||
- `--remote-preserve-containers` - Do not stop/remove existing `ac-*` containers/images during migration
|
||||
- `--remote-storage-path /mnt/acore-storage` - Override STORAGE_PATH on the remote host (local-storage stays per .env)
|
||||
- `--remote-container-user 1001:1001` - Override CONTAINER_USER on the remote host (uid:gid)
|
||||
- Note: do not combine `--remote-clean-containers` with `--remote-preserve-containers`; the flags are mutually exclusive.
|
||||
|
||||
### Step 3: Deploy on Remote Host
|
||||
```bash
|
||||
|
||||
@@ -148,8 +148,10 @@ Options:
|
||||
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
|
||||
--storage PATH Remote storage directory (default: <project-dir>/storage)
|
||||
--skip-storage Do not sync the storage directory
|
||||
--skip-env Do not upload .env to the remote host
|
||||
--preserve-containers Skip stopping/removing existing remote containers and images
|
||||
--clean-containers Stop/remove existing ac-* containers and project images on remote
|
||||
--copy-source Copy the full local project directory instead of syncing via git
|
||||
--cleanup-runtime Stop/remove existing ac-* containers and project images on remote
|
||||
--yes, -y Auto-confirm prompts (for existing deployments)
|
||||
--help Show this help
|
||||
EOF_HELP
|
||||
@@ -165,7 +167,9 @@ REMOTE_STORAGE=""
|
||||
SKIP_STORAGE=0
|
||||
ASSUME_YES=0
|
||||
COPY_SOURCE=0
|
||||
CLEANUP_RUNTIME=0
|
||||
SKIP_ENV=0
|
||||
PRESERVE_CONTAINERS=0
|
||||
CLEAN_CONTAINERS=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
@@ -178,8 +182,10 @@ while [[ $# -gt 0 ]]; do
|
||||
--tarball) TARBALL="$2"; shift 2;;
|
||||
--storage) REMOTE_STORAGE="$2"; shift 2;;
|
||||
--skip-storage) SKIP_STORAGE=1; shift;;
|
||||
--skip-env) SKIP_ENV=1; shift;;
|
||||
--preserve-containers) PRESERVE_CONTAINERS=1; shift;;
|
||||
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||
--copy-source) COPY_SOURCE=1; shift;;
|
||||
--cleanup-runtime) CLEANUP_RUNTIME=1; shift;;
|
||||
--yes|-y) ASSUME_YES=1; shift;;
|
||||
--help|-h) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
@@ -192,6 +198,11 @@ if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$CLEAN_CONTAINERS" -eq 1 && "$PRESERVE_CONTAINERS" -eq 1 ]]; then
|
||||
echo "Cannot combine --clean-containers with --preserve-containers." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Normalize env file path if provided and recompute defaults
|
||||
if [ -n "$ENV_FILE" ] && [ -f "$ENV_FILE" ]; then
|
||||
ENV_FILE="$(cd "$(dirname "$ENV_FILE")" && pwd)/$(basename "$ENV_FILE")"
|
||||
@@ -302,14 +313,35 @@ validate_remote_environment(){
|
||||
local running_containers
|
||||
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
|
||||
if [ "$running_containers" -gt 0 ]; then
|
||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||
echo " Migration will overwrite existing deployment"
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with migration? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
echo "⚠️ Found $running_containers running AzerothCore containers; --preserve-containers set, leaving them running."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue without stopping containers? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration (containers preserved)..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
elif [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
echo "⚠️ Found $running_containers running AzerothCore containers"
|
||||
echo " --clean-containers set: they will be stopped/removed during migration."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with cleanup? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with cleanup..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
else
|
||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||
echo " Migration will NOT stop them automatically. Use --clean-containers to stop/remove."
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with migration? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -325,6 +357,25 @@ validate_remote_environment(){
|
||||
echo "✅ Remote environment validation complete"
|
||||
}
|
||||
|
||||
confirm_remote_storage_overwrite(){
|
||||
if [[ $SKIP_STORAGE -ne 0 ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ "$ASSUME_YES" = "1" ]]; then
|
||||
return
|
||||
fi
|
||||
local has_content
|
||||
has_content=$(run_ssh "if [ -d '$REMOTE_STORAGE' ]; then find '$REMOTE_STORAGE' -mindepth 1 -maxdepth 1 -print -quit; fi")
|
||||
if [ -n "$has_content" ]; then
|
||||
echo "⚠️ Remote storage at $REMOTE_STORAGE contains existing data."
|
||||
read -r -p " Continue and sync local storage over it? [y/N]: " reply
|
||||
case "${reply,,}" in
|
||||
y|yes) echo " Proceeding with storage sync..." ;;
|
||||
*) echo " Skipping storage sync for this run."; SKIP_STORAGE=1 ;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
copy_source_tree(){
|
||||
echo " • Copying full local project directory..."
|
||||
ensure_remote_temp_dir
|
||||
@@ -388,11 +439,14 @@ setup_remote_repository(){
|
||||
}
|
||||
|
||||
cleanup_stale_docker_resources(){
|
||||
if [ "$CLEANUP_RUNTIME" -ne 1 ]; then
|
||||
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||
echo "⋅ Skipping remote container/image cleanup (--preserve-containers)"
|
||||
return
|
||||
fi
|
||||
if [ "$CLEAN_CONTAINERS" -ne 1 ]; then
|
||||
echo "⋅ Skipping remote runtime cleanup (containers and images preserved)."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "⋅ Cleaning up stale Docker resources on remote..."
|
||||
|
||||
# Stop and remove old containers
|
||||
@@ -446,6 +500,8 @@ if [ ${#MISSING_IMAGES[@]} -gt 0 ]; then
|
||||
printf ' • %s\n' "${MISSING_IMAGES[@]}"
|
||||
fi
|
||||
|
||||
confirm_remote_storage_overwrite
|
||||
|
||||
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
||||
if [[ -d storage ]]; then
|
||||
echo "⋅ Syncing storage to remote"
|
||||
@@ -513,8 +569,34 @@ run_scp "$TARBALL" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-modules-images.tar"
|
||||
run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'"
|
||||
|
||||
if [[ -f "$ENV_FILE" ]]; then
|
||||
echo "⋅ Uploading .env"
|
||||
run_scp "$ENV_FILE" "$USER@$HOST:$PROJECT_DIR/.env"
|
||||
if [[ $SKIP_ENV -eq 1 ]]; then
|
||||
echo "⋅ Skipping .env upload (--skip-env)"
|
||||
else
|
||||
remote_env_path="$PROJECT_DIR/.env"
|
||||
upload_env=1
|
||||
|
||||
if run_ssh "test -f '$remote_env_path'"; then
|
||||
if [ "$ASSUME_YES" = "1" ]; then
|
||||
echo "⋅ Overwriting existing remote .env (auto-confirm)"
|
||||
elif [ -t 0 ]; then
|
||||
read -r -p "⚠️ Remote .env exists at $remote_env_path. Overwrite? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) ;;
|
||||
*) upload_env=0 ;;
|
||||
esac
|
||||
else
|
||||
echo "⚠️ Remote .env exists at $remote_env_path; skipping upload (no confirmation available)"
|
||||
upload_env=0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $upload_env -eq 1 ]]; then
|
||||
echo "⋅ Uploading .env"
|
||||
run_scp "$ENV_FILE" "$USER@$HOST:$remote_env_path"
|
||||
else
|
||||
echo "⋅ Keeping existing remote .env"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "⋅ Remote prepares completed"
|
||||
|
||||
121
scripts/bash/update-remote.sh
Executable file
121
scripts/bash/update-remote.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
# Helper to push a fresh build to a remote host with minimal downtime and no data touch by default.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
DEFAULT_PROJECT_DIR="~$(printf '/%s' "$(basename "$ROOT_DIR")")"
|
||||
|
||||
HOST=""
|
||||
USER=""
|
||||
PORT=22
|
||||
IDENTITY=""
|
||||
PROJECT_DIR="$DEFAULT_PROJECT_DIR"
|
||||
PUSH_ENV=0
|
||||
PUSH_STORAGE=0
|
||||
CLEAN_CONTAINERS=0
|
||||
AUTO_DEPLOY=1
|
||||
ASSUME_YES=0
|
||||
|
||||
usage(){
|
||||
cat <<'EOF'
|
||||
Usage: scripts/bash/update-remote.sh --host HOST --user USER [options]
|
||||
|
||||
Options:
|
||||
--host HOST Remote hostname or IP (required)
|
||||
--user USER SSH username on remote host (required)
|
||||
--port PORT SSH port (default: 22)
|
||||
--identity PATH SSH private key
|
||||
--project-dir DIR Remote project directory (default: ~/<repo-name>)
|
||||
--remote-path DIR Alias for --project-dir (backward compat)
|
||||
--push-env Upload local .env to remote (default: skip)
|
||||
--push-storage Sync ./storage to remote (default: skip)
|
||||
--clean-containers Stop/remove remote ac-* containers & project images during migration (default: preserve)
|
||||
--no-auto-deploy Do not trigger remote deploy after migration
|
||||
--yes Auto-confirm prompts
|
||||
--help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host) HOST="$2"; shift 2;;
|
||||
--user) USER="$2"; shift 2;;
|
||||
--port) PORT="$2"; shift 2;;
|
||||
--identity) IDENTITY="$2"; shift 2;;
|
||||
--project-dir) PROJECT_DIR="$2"; shift 2;;
|
||||
--remote-path) PROJECT_DIR="$2"; shift 2;;
|
||||
--push-env) PUSH_ENV=1; shift;;
|
||||
--push-storage) PUSH_STORAGE=1; shift;;
|
||||
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||
--no-auto-deploy) AUTO_DEPLOY=0; shift;;
|
||||
--yes) ASSUME_YES=1; shift;;
|
||||
--help|-h) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||
echo "--host and --user are required" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
deploy_args=(--remote --remote-host "$HOST" --remote-user "$USER")
|
||||
|
||||
if [ -n "$PROJECT_DIR" ]; then
|
||||
deploy_args+=(--remote-project-dir "$PROJECT_DIR")
|
||||
fi
|
||||
if [ -n "$IDENTITY" ]; then
|
||||
deploy_args+=(--remote-identity "$IDENTITY")
|
||||
fi
|
||||
if [ "$PORT" != "22" ]; then
|
||||
deploy_args+=(--remote-port "$PORT")
|
||||
fi
|
||||
|
||||
if [ "$PUSH_STORAGE" -ne 1 ]; then
|
||||
deploy_args+=(--remote-skip-storage)
|
||||
fi
|
||||
if [ "$PUSH_ENV" -ne 1 ]; then
|
||||
deploy_args+=(--remote-skip-env)
|
||||
fi
|
||||
|
||||
if [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||
deploy_args+=(--remote-clean-containers)
|
||||
else
|
||||
deploy_args+=(--remote-preserve-containers)
|
||||
fi
|
||||
|
||||
if [ "$AUTO_DEPLOY" -eq 1 ]; then
|
||||
deploy_args+=(--remote-auto-deploy)
|
||||
fi
|
||||
|
||||
deploy_args+=(--no-watch)
|
||||
|
||||
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||
deploy_args+=(--yes)
|
||||
fi
|
||||
|
||||
echo "Remote update plan:"
|
||||
echo " Host/User : ${USER}@${HOST}:${PORT}"
|
||||
echo " Project Dir : ${PROJECT_DIR}"
|
||||
echo " Push .env : $([ "$PUSH_ENV" -eq 1 ] && echo yes || echo no)"
|
||||
echo " Push storage : $([ "$PUSH_STORAGE" -eq 1 ] && echo yes || echo no)"
|
||||
echo " Cleanup mode : $([ "$CLEAN_CONTAINERS" -eq 1 ] && echo 'clean containers' || echo 'preserve containers')"
|
||||
echo " Auto deploy : $([ "$AUTO_DEPLOY" -eq 1 ] && echo yes || echo no)"
|
||||
if [ "$AUTO_DEPLOY" -eq 1 ] && [ "$PUSH_ENV" -ne 1 ]; then
|
||||
echo " ⚠️ Auto-deploy is enabled but push-env is off; remote deploy will fail without a valid .env."
|
||||
fi
|
||||
|
||||
if [ "$ASSUME_YES" -ne 1 ]; then
|
||||
read -r -p "Proceed with remote update? [y/N]: " reply
|
||||
reply="${reply:-n}"
|
||||
case "${reply,,}" in
|
||||
y|yes) ;;
|
||||
*) echo "Aborted."; exit 1 ;;
|
||||
esac
|
||||
deploy_args+=(--yes)
|
||||
fi
|
||||
|
||||
cd "$ROOT_DIR"
|
||||
./deploy.sh "${deploy_args[@]}"
|
||||
80
setup.sh
80
setup.sh
@@ -578,8 +578,6 @@ main(){
|
||||
local CLI_PLAYERBOT_ENABLED=""
|
||||
local CLI_PLAYERBOT_MIN=""
|
||||
local CLI_PLAYERBOT_MAX=""
|
||||
local CLI_AUTO_REBUILD=0
|
||||
local CLI_MODULES_SOURCE=""
|
||||
local FORCE_OVERWRITE=0
|
||||
local CLI_ENABLE_MODULES_RAW=()
|
||||
|
||||
@@ -622,9 +620,6 @@ Options:
|
||||
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
|
||||
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
|
||||
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
|
||||
--auto-rebuild-on-deploy Enable automatic rebuild during deploys
|
||||
--modules-rebuild-source PATH Source checkout used for module rebuilds
|
||||
--deploy-after Run ./deploy.sh automatically after setup completes
|
||||
--force Overwrite existing .env without prompting
|
||||
EOF
|
||||
exit 0
|
||||
@@ -779,25 +774,10 @@ EOF
|
||||
--playerbot-max-bots=*)
|
||||
CLI_PLAYERBOT_MAX="${1#*=}"; shift
|
||||
;;
|
||||
--auto-rebuild-on-deploy)
|
||||
CLI_AUTO_REBUILD=1
|
||||
shift
|
||||
;;
|
||||
--modules-rebuild-source)
|
||||
[[ $# -ge 2 ]] || { say ERROR "--modules-rebuild-source requires a value"; exit 1; }
|
||||
CLI_MODULES_SOURCE="$2"; shift 2
|
||||
;;
|
||||
--modules-rebuild-source=*)
|
||||
CLI_MODULES_SOURCE="${1#*=}"; shift
|
||||
;;
|
||||
--force)
|
||||
FORCE_OVERWRITE=1
|
||||
shift
|
||||
;;
|
||||
--deploy-after)
|
||||
CLI_DEPLOY_AFTER=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
echo "Use --help for usage" >&2
|
||||
@@ -1210,8 +1190,6 @@ fi
|
||||
local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
|
||||
local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
|
||||
|
||||
local AUTO_REBUILD_ON_DEPLOY=$CLI_AUTO_REBUILD
|
||||
local MODULES_REBUILD_SOURCE_PATH_VALUE="${CLI_MODULES_SOURCE}"
|
||||
local NEEDS_CXX_REBUILD=0
|
||||
|
||||
local module_mode_label=""
|
||||
@@ -1473,7 +1451,6 @@ fi
|
||||
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
|
||||
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
|
||||
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
|
||||
printf " %-18s %s\n" "Source checkout:" "$default_source_rel"
|
||||
printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE"
|
||||
|
||||
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
|
||||
@@ -1520,11 +1497,6 @@ fi
|
||||
echo ""
|
||||
say WARNING "These modules require compiling AzerothCore from source."
|
||||
say INFO "Run './build.sh' to compile your custom modules before deployment."
|
||||
if [ "$CLI_AUTO_REBUILD" = "1" ]; then
|
||||
AUTO_REBUILD_ON_DEPLOY=1
|
||||
else
|
||||
AUTO_REBUILD_ON_DEPLOY=$(ask_yn "Enable automatic rebuild during future deploys?" "$( [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ] && echo y || echo n )")
|
||||
fi
|
||||
|
||||
# Set build sentinel to indicate rebuild is needed
|
||||
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
||||
@@ -1554,23 +1526,8 @@ fi
|
||||
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
|
||||
fi
|
||||
|
||||
if [ -n "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
||||
local storage_abs="$STORAGE_PATH"
|
||||
if [[ "$storage_abs" != /* ]]; then
|
||||
storage_abs="$(pwd)/${storage_abs#./}"
|
||||
fi
|
||||
local candidate_path="$MODULES_REBUILD_SOURCE_PATH_VALUE"
|
||||
if [[ "$candidate_path" != /* ]]; then
|
||||
candidate_path="$(pwd)/${candidate_path#./}"
|
||||
fi
|
||||
if [[ "$candidate_path" == "$storage_abs"* ]]; then
|
||||
say WARNING "MODULES_REBUILD_SOURCE_PATH is inside shared storage (${candidate_path}). Using local workspace ${default_source_rel} instead."
|
||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Module staging will be handled directly in the rebuild section below
|
||||
|
||||
# Persist rebuild source path for downstream build scripts
|
||||
MODULES_REBUILD_SOURCE_PATH="$default_source_rel"
|
||||
|
||||
# Confirm write
|
||||
|
||||
@@ -1586,10 +1543,6 @@ fi
|
||||
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
|
||||
fi
|
||||
|
||||
if [ -z "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
||||
fi
|
||||
|
||||
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
|
||||
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
|
||||
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
|
||||
@@ -1756,11 +1709,12 @@ BACKUP_HEALTHCHECK_GRACE_SECONDS=$BACKUP_HEALTHCHECK_GRACE_SECONDS
|
||||
|
||||
EOF
|
||||
echo
|
||||
echo "# Modules"
|
||||
for module_key in "${MODULE_KEYS[@]}"; do
|
||||
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
||||
done
|
||||
cat <<EOF
|
||||
echo "# Modules"
|
||||
for module_key in "${MODULE_KEYS[@]}"; do
|
||||
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
||||
done
|
||||
cat <<EOF
|
||||
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH
|
||||
|
||||
# Client data
|
||||
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
|
||||
@@ -1779,12 +1733,8 @@ MODULES_CPP_LIST=$MODULES_CPP_LIST
|
||||
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
|
||||
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
|
||||
|
||||
# Rebuild automation
|
||||
AUTO_REBUILD_ON_DEPLOY=$AUTO_REBUILD_ON_DEPLOY
|
||||
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH_VALUE
|
||||
|
||||
# Eluna
|
||||
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
||||
# Eluna
|
||||
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
||||
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
|
||||
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
|
||||
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
|
||||
@@ -1853,16 +1803,6 @@ EOF
|
||||
printf ' 🚀 Quick deploy: ./deploy.sh\n'
|
||||
fi
|
||||
|
||||
if [ "${CLI_DEPLOY_AFTER:-0}" = "1" ]; then
|
||||
local deploy_args=(bash "./deploy.sh" --yes)
|
||||
if [ "$MODULE_PLAYERBOTS" != "1" ]; then
|
||||
deploy_args+=(--profile standard)
|
||||
fi
|
||||
say INFO "Launching deploy after setup (--deploy-after enabled)"
|
||||
if ! "${deploy_args[@]}"; then
|
||||
say WARNING "Automatic deploy failed; please run ./deploy.sh manually."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
Reference in New Issue
Block a user