#!/bin/bash # # High-level orchestrator for module-aware deployments. # 1. Ensures AzerothCore source repo is present # 2. Runs ac-modules to sync/clean module checkout and configs # 3. Rebuilds source images when C++ modules demand it # 4. Stages target compose profile and optionally tails worldserver logs set -euo pipefail ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml" ENV_PATH="$ROOT_DIR/.env" source "$ROOT_DIR/scripts/lib/compose_overrides.sh" TARGET_PROFILE="" WATCH_LOGS=1 KEEP_RUNNING=0 WORLD_LOG_SINCE="" ASSUME_YES=0 REMOTE_MODE=0 REMOTE_HOST="" REMOTE_USER="" REMOTE_PORT="22" REMOTE_IDENTITY="" REMOTE_PROJECT_DIR="" REMOTE_SKIP_STORAGE=0 REMOTE_ARGS_PROVIDED=0 MODULE_HELPER="$ROOT_DIR/scripts/modules.py" MODULE_STATE_INITIALIZED=0 declare -a MODULES_COMPILE_LIST=() declare -a COMPOSE_FILE_ARGS=() BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m' info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; } ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; } warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; } err(){ printf '%b\n' "${RED}❌ $*${NC}"; } show_deployment_header(){ printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}" printf '%b\n' "${BLUE}═══════════════════════════════════════${NC}" printf '%b\n\n' "${BLUE}🏰 Bringing Your Realm Online 🏰${NC}" } show_step(){ local step="$1" total="$2" message="$3" printf '%b\n' "${YELLOW}🔧 Step ${step}/${total}: ${message}...${NC}" } show_realm_ready(){ printf '\n%b\n' "${GREEN}⚔️ The realm has been forged! ⚔️${NC}" printf '%b\n' "${GREEN}🏰 Adventurers may now enter your world${NC}" printf '%b\n\n' "${GREEN}🗡️ May your server bring epic adventures!${NC}" } show_remote_plan(){ local plan_host="${REMOTE_HOST:-}" local plan_user="${REMOTE_USER:-}" local plan_dir="${REMOTE_PROJECT_DIR:-~/AzerothCore-RealmMaster}" printf '\n%b\n' "${BLUE}🧭 Remote Deployment Plan${NC}" printf '%b\n' "${YELLOW}├─ Validate build status locally${NC}" printf '%b\n' "${YELLOW}└─ Package & sync to ${plan_user}@${plan_host}:${plan_dir}${NC}" } maybe_select_deploy_target(){ if [ "$REMOTE_MODE" -eq 1 ]; then return fi if [ "$ASSUME_YES" -eq 1 ] || [ ! -t 0 ]; then return fi echo echo "Select deployment target:" echo " 1) Local host (current machine)" echo " 2) Remote host (package for SSH deployment)" local choice read -rp "Choice [1]: " choice case "${choice:-1}" in 2) REMOTE_MODE=1 REMOTE_ARGS_PROVIDED=0 ;; *) ;; esac } collect_remote_details(){ if [ "$REMOTE_MODE" -ne 1 ]; then return fi local interactive=0 if [ -t 0 ] && [ "$ASSUME_YES" -ne 1 ]; then interactive=1 fi if [ -z "$REMOTE_HOST" ] && [ "$interactive" -eq 1 ]; then while true; do read -rp "Remote host (hostname or IP): " REMOTE_HOST [ -n "$REMOTE_HOST" ] && break echo " Please enter a hostname or IP." done fi if [ -z "$REMOTE_USER" ] && [ "$interactive" -eq 1 ]; then local default_user="$USER" read -rp "SSH username [${default_user}]: " REMOTE_USER REMOTE_USER="${REMOTE_USER:-$default_user}" fi if [ -z "$REMOTE_USER" ] && [ -n "$USER" ]; then REMOTE_USER="$USER" fi if [ -z "$REMOTE_PORT" ]; then REMOTE_PORT="22" fi if [ "$interactive" -eq 1 ]; then local port_input read -rp "SSH port [${REMOTE_PORT}]: " port_input REMOTE_PORT="${port_input:-$REMOTE_PORT}" fi if [ "$interactive" -eq 1 ]; then local identity_input local identity_prompt="SSH identity file (leave blank for default)" if [ -n "$REMOTE_IDENTITY" ]; then identity_prompt="${identity_prompt} [${REMOTE_IDENTITY}]" fi read -rp "${identity_prompt}: " identity_input [ -n "$identity_input" ] && REMOTE_IDENTITY="$identity_input" fi if [ -n "$REMOTE_IDENTITY" ]; then REMOTE_IDENTITY="${REMOTE_IDENTITY/#\~/$HOME}" fi if [ -z "$REMOTE_PROJECT_DIR" ]; then REMOTE_PROJECT_DIR="~/AzerothCore-RealmMaster" fi if [ "$interactive" -eq 1 ]; then local dir_input read -rp "Remote project directory [${REMOTE_PROJECT_DIR}]: " dir_input REMOTE_PROJECT_DIR="${dir_input:-$REMOTE_PROJECT_DIR}" fi if [ "$interactive" -eq 1 ] && [ "$REMOTE_ARGS_PROVIDED" -eq 0 ]; then local sync_answer read -rp "Sync storage directory to remote host? [Y/n]: " sync_answer sync_answer="${sync_answer:-Y}" case "${sync_answer,,}" in n|no) REMOTE_SKIP_STORAGE=1 ;; *) REMOTE_SKIP_STORAGE=0 ;; esac fi } validate_remote_configuration(){ if [ "$REMOTE_MODE" -ne 1 ]; then return fi if [ -z "$REMOTE_HOST" ]; then err "Remote deployment requires a hostname or IP." exit 1 fi if [ -z "$REMOTE_USER" ]; then err "Remote deployment requires an SSH username." exit 1 fi REMOTE_PORT="${REMOTE_PORT:-22}" if ! [[ "$REMOTE_PORT" =~ ^[0-9]+$ ]]; then err "Invalid SSH port: $REMOTE_PORT" exit 1 fi if [ -n "$REMOTE_IDENTITY" ]; then REMOTE_IDENTITY="${REMOTE_IDENTITY/#\~/$HOME}" if [ ! -f "$REMOTE_IDENTITY" ]; then err "Remote identity file not found: $REMOTE_IDENTITY" exit 1 fi fi if [ -z "$REMOTE_PROJECT_DIR" ]; then REMOTE_PROJECT_DIR="~/AzerothCore-RealmMaster" fi if [ ! -f "$ROOT_DIR/scripts/migrate-stack.sh" ]; then err "Migration script not found: $ROOT_DIR/scripts/migrate-stack.sh" exit 1 fi } usage(){ cat </dev/null 2>&1 || { err "Missing required command: $1"; exit 1; } } require_cmd docker require_cmd python3 if [ "$REMOTE_MODE" -eq 1 ]; then if [ -z "$REMOTE_HOST" ]; then err "Remote deployment requires --remote-host to be specified" exit 1 fi if [ -z "$REMOTE_USER" ]; then err "Remote deployment requires --remote-user to be specified" exit 1 fi if [ -n "$REMOTE_IDENTITY" ]; then REMOTE_IDENTITY="${REMOTE_IDENTITY/#\~/$HOME}" if [ ! -f "$REMOTE_IDENTITY" ]; then err "Remote identity file not found: $REMOTE_IDENTITY" exit 1 fi fi if [ ! -f "$ROOT_DIR/scripts/migrate-stack.sh" ]; then err "Migration script not found: $ROOT_DIR/scripts/migrate-stack.sh" exit 1 fi fi read_env(){ local key="$1" default="${2:-}" local value="" if [ -f "$ENV_PATH" ]; then value="$(grep -E "^${key}=" "$ENV_PATH" | tail -n1 | cut -d'=' -f2- | tr -d '\r')" fi if [ -z "$value" ]; then value="$default" fi echo "$value" } init_compose_files(){ compose_overrides::build_compose_args "$ROOT_DIR" "$ENV_PATH" "$DEFAULT_COMPOSE_FILE" COMPOSE_FILE_ARGS } init_compose_files resolve_local_storage_path(){ local path path="$(read_env STORAGE_PATH_LOCAL "./local-storage")" if [[ "$path" != /* ]]; then path="${path#./}" path="$ROOT_DIR/$path" fi echo "${path%/}" } ensure_modules_dir_writable(){ local base_path="$1" local modules_dir="${base_path%/}/modules" if [ -d "$modules_dir" ] || mkdir -p "$modules_dir" 2>/dev/null; then local uid gid uid="$(id -u)" gid="$(id -g)" if ! chown -R "$uid":"$gid" "$modules_dir" 2>/dev/null; then if command -v docker >/dev/null 2>&1; then local helper_image helper_image="$(read_env ALPINE_IMAGE "alpine:latest")" docker run --rm \ -u 0:0 \ -v "$modules_dir":/modules \ "$helper_image" \ sh -c "chown -R ${uid}:${gid} /modules && chmod -R ug+rwX /modules" >/dev/null 2>&1 || true fi fi chmod -R u+rwX "$modules_dir" 2>/dev/null || true fi } ensure_module_state(){ if [ "$MODULE_STATE_INITIALIZED" -eq 1 ]; then return fi local storage_root storage_root="$(resolve_local_storage_path)" local output_dir="${storage_root}/modules" ensure_modules_dir_writable "$storage_root" if ! python3 "$MODULE_HELPER" --env-path "$ENV_PATH" --manifest "$ROOT_DIR/config/module-manifest.json" generate --output-dir "$output_dir"; then err "Module manifest validation failed. See errors above." fi if [ ! -f "$output_dir/modules.env" ]; then err "modules.env not produced at $output_dir/modules.env" fi # shellcheck disable=SC1090 source "$output_dir/modules.env" MODULE_STATE_INITIALIZED=1 MODULES_COMPILE_LIST=() IFS=' ' read -r -a MODULES_COMPILE_LIST <<< "${MODULES_COMPILE:-}" if [ "${#MODULES_COMPILE_LIST[@]}" -eq 1 ] && [ -z "${MODULES_COMPILE_LIST[0]}" ]; then MODULES_COMPILE_LIST=() fi } resolve_project_name(){ local raw_name="$(read_env COMPOSE_PROJECT_NAME "azerothcore-realmmaster")" local sanitized sanitized="$(echo "$raw_name" | tr '[:upper:]' '[:lower:]')" sanitized="${sanitized// /-}" sanitized="$(echo "$sanitized" | tr -cd 'a-z0-9_-')" if [[ -z "$sanitized" ]]; then sanitized="azerothcore-realmmaster" elif [[ ! "$sanitized" =~ ^[a-z0-9] ]]; then sanitized="ac${sanitized}" fi echo "$sanitized" } resolve_project_image(){ local tag="$1" local project_name project_name="$(resolve_project_name)" echo "${project_name}:${tag}" } filter_empty_lines(){ awk ' /^[[:space:]]*$/ { empty_count++ if (empty_count <= 1) print } /[^[:space:]]/ { empty_count = 0 print } ' } compose(){ local project_name project_name="$(resolve_project_name)" # Add --quiet for less verbose output, filter excessive empty lines docker compose --project-name "$project_name" "${COMPOSE_FILE_ARGS[@]}" "$@" | filter_empty_lines } # Build detection logic detect_build_needed(){ local reasons=() # Check sentinel file if modules_need_rebuild; then reasons+=("Module changes detected (sentinel file present)") fi # Check if any C++ modules are enabled but modules-latest images don't exist ensure_module_state local any_cxx_modules=0 if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then any_cxx_modules=1 fi if [ "$any_cxx_modules" = "1" ]; then local authserver_modules_image local worldserver_modules_image authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")" worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")" if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then reasons+=("C++ modules enabled but authserver modules image $authserver_modules_image is missing") fi if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then reasons+=("C++ modules enabled but worldserver modules image $worldserver_modules_image is missing") fi fi if [ ${#reasons[@]} -gt 0 ]; then printf '%s\n' "${reasons[@]}" fi } stop_runtime_stack(){ info "Stopping runtime stack to avoid container name conflicts" compose \ --profile services-standard \ --profile services-playerbots \ --profile services-modules \ --profile db \ --profile client-data \ --profile client-data-bots \ --profile modules \ down 2>/dev/null || true } # Deployment sentinel management mark_deployment_complete(){ local storage_path storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")" if [[ "$storage_path" != /* ]]; then # Remove leading ./ if present storage_path="${storage_path#./}" storage_path="$ROOT_DIR/$storage_path" fi local sentinel="$storage_path/modules/.last_deployed" if ! mkdir -p "$(dirname "$sentinel")" 2>/dev/null; then warn "Cannot create local-storage directory. Deployment tracking may not work properly." return 0 fi if ! date > "$sentinel" 2>/dev/null; then local sentinel_dir sentinel_dir="$(dirname "$sentinel")" if command -v docker >/dev/null 2>&1; then local helper_image helper_image="$(read_env ALPINE_IMAGE "alpine:latest")" local container_user container_user="$(read_env CONTAINER_USER "$(id -u):$(id -g)")" docker run --rm \ --user "$container_user" \ -v "$sentinel_dir":/sentinel \ "$helper_image" \ sh -c 'date > /sentinel/.last_deployed' >/dev/null 2>&1 || true fi if [ ! -f "$sentinel" ]; then warn "Unable to update deployment marker at $sentinel (permission denied)." return 0 fi fi } modules_need_rebuild(){ local storage_path storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")" if [[ "$storage_path" != /* ]]; then # Remove leading ./ if present storage_path="${storage_path#./}" storage_path="$ROOT_DIR/$storage_path" fi local sentinel="$storage_path/modules/.requires_rebuild" [[ -f "$sentinel" ]] } # Build prompting logic prompt_build_if_needed(){ local build_reasons_output build_reasons_output=$(detect_build_needed) if [ -z "$build_reasons_output" ]; then return 0 # No build needed fi local build_reasons readarray -t build_reasons <<< "$build_reasons_output" # Check if auto-rebuild is enabled local auto_rebuild auto_rebuild="$(read_env AUTO_REBUILD_ON_DEPLOY "0")" if [ "$auto_rebuild" = "1" ]; then warn "Auto-rebuild enabled, running build process..." if (cd "$ROOT_DIR" && ./build.sh --yes); then ok "Build completed successfully" return 0 else err "Build failed" return 1 fi fi if [ "$ASSUME_YES" -eq 1 ]; then warn "Build required; auto-confirming (--yes)" if (cd "$ROOT_DIR" && ./build.sh --yes); then ok "Build completed successfully" return 0 else err "Build failed" return 1 fi fi # Interactive prompt echo warn "Build appears to be required:" local reason for reason in "${build_reasons[@]}"; do warn " • $reason" done echo if [ -t 0 ]; then local reply read -r -p "Run build now? [y/N]: " reply reply="${reply:-n}" case "$reply" in [Yy]*) if (cd "$ROOT_DIR" && ./build.sh --yes); then ok "Build completed successfully" return 0 else err "Build failed" return 1 fi ;; *) err "Build required but declined. Run './build.sh' manually before deploying or re-run this script." return 1 ;; esac else err "Build required but running non-interactively. Run './build.sh' manually before deploying or re-run this script." return 1 fi } determine_profile(){ if [ -n "$TARGET_PROFILE" ]; then echo "$TARGET_PROFILE" return fi local module_playerbots local playerbot_enabled module_playerbots="$(read_env MODULE_PLAYERBOTS "0")" playerbot_enabled="$(read_env PLAYERBOT_ENABLED "0")" if [ "$module_playerbots" = "1" ] || [ "$playerbot_enabled" = "1" ]; then echo "playerbots" return fi ensure_module_state if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then echo "modules" return fi echo "standard" } run_remote_migration(){ local args=(--host "$REMOTE_HOST" --user "$REMOTE_USER") if [ -n "$REMOTE_PORT" ] && [ "$REMOTE_PORT" != "22" ]; then args+=(--port "$REMOTE_PORT") fi if [ -n "$REMOTE_IDENTITY" ]; then args+=(--identity "$REMOTE_IDENTITY") fi if [ -n "$REMOTE_PROJECT_DIR" ]; then args+=(--project-dir "$REMOTE_PROJECT_DIR") fi if [ "$REMOTE_SKIP_STORAGE" -eq 1 ]; then args+=(--skip-storage) fi if [ "$ASSUME_YES" -eq 1 ]; then args+=(--yes) fi (cd "$ROOT_DIR" && ./scripts/migrate-stack.sh "${args[@]}") } stage_runtime(){ local args=(--yes) if [ -n "$TARGET_PROFILE" ]; then args+=("$TARGET_PROFILE") fi info "Staging runtime environment via stage-modules.sh ${args[*]}" (cd "$ROOT_DIR" && ./scripts/stage-modules.sh "${args[@]}") } tail_world_logs(){ info "Tailing worldserver logs (Ctrl+C to stop)" local args=(--follow) if [ -n "$WORLD_LOG_SINCE" ]; then args+=(--since "$WORLD_LOG_SINCE") fi local tail_opt="${WORLD_LOG_TAIL:-0}" args+=(--tail "$tail_opt") if ! docker logs "${args[@]}" ac-worldserver; then warn "Worldserver logs unavailable; container may not be running." fi } wait_for_worldserver_ready(){ local timeout="${WORLD_READY_TIMEOUT:-180}" start start="$(date +%s)" info "Waiting for worldserver to become ready (timeout: ${timeout}s)" info "First deployment may take 10-15 minutes while client-data is extracted" while true; do if ! docker ps --format '{{.Names}}' | grep -qx "ac-worldserver"; then info "Worldserver container is not running yet; retrying..." else local health health="$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' ac-worldserver 2>/dev/null || echo none)" case "$health" in healthy) WORLD_LOG_SINCE="$(docker inspect --format='{{.State.StartedAt}}' ac-worldserver 2>/dev/null)" ok "Worldserver reported healthy" return 0 ;; none) if docker inspect --format='{{.State.Status}}' ac-worldserver 2>/dev/null | grep -q '^running$'; then WORLD_LOG_SINCE="$(docker inspect --format='{{.State.StartedAt}}' ac-worldserver 2>/dev/null)" ok "Worldserver running (no healthcheck configured)" return 0 fi ;; unhealthy) info "Worldserver starting up - waiting for client-data to complete..." info "This may take several minutes on first deployment while data files are extracted" ;; esac fi if [ $(( $(date +%s) - start )) -ge "$timeout" ]; then info "Worldserver is still starting up after ${timeout}s. This is normal for first deployments." info "Client-data extraction can take 10-15 minutes. Check progress with './status.sh' or container logs." return 1 fi sleep 3 done } main(){ if [ "$ASSUME_YES" -ne 1 ]; then if [ -t 0 ]; then read -r -p "Proceed with AzerothCore deployment? [y/N]: " reply reply="${reply:-n}" else warn "No --yes flag provided and standard input is not interactive; aborting deployment." exit 1 fi case "$reply" in [Yy]*) info "Deployment confirmed."; ;; *) err "Deployment cancelled."; exit 1 ;; esac else info "Auto-confirming deployment (--yes supplied)." fi show_deployment_header maybe_select_deploy_target collect_remote_details validate_remote_configuration if [ "$REMOTE_MODE" -eq 1 ]; then local remote_steps=2 show_remote_plan show_step 1 "$remote_steps" "Checking build requirements" if ! prompt_build_if_needed; then err "Build required but not completed. Remote deployment cancelled." exit 1 fi show_step 2 "$remote_steps" "Migrating deployment to $REMOTE_HOST" if run_remote_migration; then ok "Remote deployment package prepared for $REMOTE_USER@$REMOTE_HOST." local remote_dir="${REMOTE_PROJECT_DIR:-~/AzerothCore-RealmMaster}" info "Run the following on the remote host to complete deployment:" printf ' %bcd %s && ./deploy.sh --yes --no-watch%b\n' "$YELLOW" "$remote_dir" "$NC" exit 0 else err "Remote migration failed." exit 1 fi fi show_step 1 4 "Checking build requirements" if ! prompt_build_if_needed; then err "Build required but not completed. Deployment cancelled." exit 1 fi if [ "$KEEP_RUNNING" -ne 1 ]; then show_step 2 4 "Stopping runtime stack" stop_runtime_stack fi show_step 3 4 "Bringing your realm online" info "Pulling images and waiting for containers to become healthy; this may take a few minutes on first deploy." stage_runtime show_step 4 4 "Finalizing deployment" mark_deployment_complete show_realm_ready if [ "$WATCH_LOGS" -eq 1 ]; then if wait_for_worldserver_ready; then info "Watching your realm come to life (Ctrl+C to stop watching)" tail_world_logs else info "Worldserver still initializing. Client-data extraction may still be in progress." info "Use './status.sh' to monitor progress or 'docker logs ac-worldserver' to view startup logs." fi else ok "Realm deployment completed. Use './status.sh' to monitor your realm." fi } main