Merge pull request #6 from uprightbass360/refactor/extract-build-script

Refactor/extract build script
This commit is contained in:
MuchDev
2025-10-28 18:59:13 -04:00
committed by GitHub
9 changed files with 862 additions and 566 deletions

584
build.sh Executable file
View File

@@ -0,0 +1,584 @@
#!/bin/bash
#
# AzerothCore Build Script
# Handles all module compilation and image building for custom configurations
#
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_PATH="$ROOT_DIR/.env"
ASSUME_YES=0
FORCE_REBUILD=0
SKIP_SOURCE_SETUP=0
CUSTOM_SOURCE_PATH=""
MIGRATE_HOST=""
MIGRATE_USER=""
MIGRATE_PORT="22"
MIGRATE_IDENTITY=""
MIGRATE_PROJECT_DIR=""
MIGRATE_SKIP_STORAGE=0
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; }
show_build_header(){
printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}"
printf '%b\n' "${BLUE}═══════════════════════════════════${NC}"
printf '%b\n\n' "${BLUE}⚒️ Forging Your Custom Realm ⚒️${NC}"
}
usage(){
cat <<EOF
Usage: $(basename "$0") [options]
Build AzerothCore with custom modules and create deployment-ready images.
Options:
--yes, -y Auto-confirm all prompts
--force Force rebuild even if no changes detected
--source-path PATH Custom source repository path
--skip-source-setup Skip automatic source repository setup
--migrate-host HOST Migrate built images to remote host after build
--migrate-user USER SSH username for remote migration
--migrate-port PORT SSH port for remote migration (default: 22)
--migrate-identity PATH SSH private key for remote migration
--migrate-project-dir DIR Remote project directory (default: auto-detect)
--migrate-skip-storage Skip storage sync during migration
-h, --help Show this help
This script handles:
• Source repository preparation and updates
• Module staging and configuration
• AzerothCore compilation with enabled modules
• Docker image building and tagging
• Build state management
• Optional remote migration
Examples:
./build.sh Interactive build
./build.sh --yes Auto-confirm build
./build.sh --force Force rebuild regardless of state
./build.sh --yes \\
--migrate-host prod-server \\
--migrate-user deploy Build and migrate to remote server
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--yes|-y) ASSUME_YES=1; shift;;
--force) FORCE_REBUILD=1; shift;;
--source-path) CUSTOM_SOURCE_PATH="$2"; shift 2;;
--skip-source-setup) SKIP_SOURCE_SETUP=1; shift;;
--migrate-host) MIGRATE_HOST="$2"; shift 2;;
--migrate-user) MIGRATE_USER="$2"; shift 2;;
--migrate-port) MIGRATE_PORT="$2"; shift 2;;
--migrate-identity) MIGRATE_IDENTITY="$2"; shift 2;;
--migrate-project-dir) MIGRATE_PROJECT_DIR="$2"; shift 2;;
--migrate-skip-storage) MIGRATE_SKIP_STORAGE=1; shift;;
-h|--help) usage; exit 0;;
*) err "Unknown option: $1"; usage; exit 1;;
esac
done
require_cmd(){
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
}
require_cmd docker
# Validate migration parameters if any are provided
if [ -n "$MIGRATE_HOST" ] || [ -n "$MIGRATE_USER" ]; then
if [ -z "$MIGRATE_HOST" ]; then
err "Migration requires --migrate-host to be specified"
exit 1
fi
if [ -z "$MIGRATE_USER" ]; then
err "Migration requires --migrate-user to be specified"
exit 1
fi
# Check that migrate-stack.sh exists
if [ ! -f "$ROOT_DIR/scripts/migrate-stack.sh" ]; then
err "Migration script not found: $ROOT_DIR/scripts/migrate-stack.sh"
exit 1
fi
fi
read_env(){
local key="$1" default="${2:-}"
local value=""
if [ -f "$ENV_PATH" ]; then
value="$(grep -E "^${key}=" "$ENV_PATH" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r' | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
fi
if [ -z "$value" ]; then
value="$default"
fi
echo "$value"
}
# Module detection logic (extracted from deploy.sh)
COMPILE_MODULE_VARS=(
MODULE_AOE_LOOT MODULE_LEARN_SPELLS MODULE_FIREWORKS MODULE_INDIVIDUAL_PROGRESSION MODULE_AHBOT MODULE_AUTOBALANCE
MODULE_TRANSMOG MODULE_NPC_BUFFER MODULE_DYNAMIC_XP MODULE_SOLO_LFG MODULE_1V1_ARENA MODULE_PHASED_DUELS
MODULE_BREAKING_NEWS MODULE_BOSS_ANNOUNCER MODULE_ACCOUNT_ACHIEVEMENTS MODULE_AUTO_REVIVE MODULE_GAIN_HONOR_GUARD
MODULE_TIME_IS_TIME MODULE_POCKET_PORTAL MODULE_RANDOM_ENCHANTS MODULE_SOLOCRAFT MODULE_PVP_TITLES MODULE_NPC_BEASTMASTER
MODULE_NPC_ENCHANTER MODULE_INSTANCE_RESET MODULE_LEVEL_GRANT MODULE_ARAC MODULE_ASSISTANT MODULE_REAGENT_BANK
MODULE_CHALLENGE_MODES MODULE_OLLAMA_CHAT MODULE_PLAYER_BOT_LEVEL_BRACKETS MODULE_STATBOOSTER MODULE_DUNGEON_RESPAWN
MODULE_SKELETON_MODULE MODULE_BG_SLAVERYVALLEY MODULE_AZEROTHSHARD MODULE_WORGOBLIN
)
requires_playerbot_source(){
if [ "$(read_env MODULE_PLAYERBOTS "0")" = "1" ]; then
return 0
fi
local var
for var in "${COMPILE_MODULE_VARS[@]}"; do
if [ "$(read_env "$var" "0")" = "1" ]; then
return 0
fi
done
return 1
}
ensure_source_repo(){
local use_playerbot_source=0
if requires_playerbot_source; then
use_playerbot_source=1
fi
local local_root
local_root="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
local_root="${local_root%/}"
[ -z "$local_root" ] && local_root="."
local default_source="${local_root}/source/azerothcore"
if [ "$use_playerbot_source" = "1" ]; then
default_source="${local_root}/source/azerothcore-playerbots"
fi
local src_path
if [ -n "$CUSTOM_SOURCE_PATH" ]; then
src_path="$CUSTOM_SOURCE_PATH"
else
src_path="$(read_env MODULES_REBUILD_SOURCE_PATH "$default_source")"
fi
if [[ "$src_path" != /* ]]; then
src_path="$ROOT_DIR/$src_path"
fi
# Normalize path (extracted from deploy.sh)
if command -v readlink >/dev/null 2>&1 && [[ -e "$src_path" || -e "$(dirname "$src_path")" ]]; then
src_path="$(readlink -f "$src_path" 2>/dev/null || echo "$src_path")"
else
src_path="$(cd "$ROOT_DIR" && realpath -m "$src_path" 2>/dev/null || echo "$src_path")"
fi
src_path="${src_path//\/.\//\/}"
if [ -d "$src_path/.git" ]; then
echo "$src_path"
return
fi
if [ "$SKIP_SOURCE_SETUP" = "1" ]; then
err "Source repository not found at $src_path and --skip-source-setup specified"
exit 1
fi
warn "AzerothCore source not found at $src_path; running setup-source.sh" >&2
if ! (cd "$ROOT_DIR" && ./scripts/setup-source.sh) >&2; then
err "Failed to setup source repository" >&2
exit 1
fi
# Verify the source was actually created
if [ ! -d "$src_path/.git" ]; then
err "Source repository setup failed - no git directory at $src_path" >&2
exit 1
fi
echo "$src_path"
}
# Build state detection (extracted from setup.sh and deploy.sh)
modules_need_rebuild(){
local storage_path
storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
local sentinel="$storage_path/modules/.requires_rebuild"
[[ -f "$sentinel" ]]
}
detect_rebuild_reasons(){
local reasons=()
# Check sentinel file
if modules_need_rebuild; then
reasons+=("Module changes detected (sentinel file present)")
fi
# Check if any C++ modules are enabled but modules-latest images don't exist
local any_cxx_modules=0
local var
for var in "${COMPILE_MODULE_VARS[@]}"; do
if [ "$(read_env "$var" "0")" = "1" ]; then
any_cxx_modules=1
break
fi
done
if [ "$any_cxx_modules" = "1" ]; then
local authserver_modules_image
local worldserver_modules_image
authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:authserver-modules-latest")"
worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:worldserver-modules-latest")"
if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then
reasons+=("C++ modules enabled but authserver modules image $authserver_modules_image is missing")
fi
if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then
reasons+=("C++ modules enabled but worldserver modules image $worldserver_modules_image is missing")
fi
fi
printf '%s\n' "${reasons[@]}"
}
confirm_build(){
local reasons=("$@")
if [ ${#reasons[@]} -eq 0 ] && [ "$FORCE_REBUILD" = "0" ]; then
info "No build required - all images are up to date"
return 1 # No build needed
fi
# Skip duplicate output if called from deploy.sh (reasons already shown)
local show_reasons=1
if [ "$ASSUME_YES" -eq 1 ] && [ ${#reasons[@]} -gt 0 ]; then
show_reasons=0 # deploy.sh already showed the reasons
fi
if [ "$show_reasons" -eq 1 ]; then
echo
if [ "$FORCE_REBUILD" = "1" ]; then
warn "Force rebuild requested (--force flag)"
elif [ ${#reasons[@]} -gt 0 ]; then
warn "Build appears to be required:"
local reason
for reason in "${reasons[@]}"; do
warn "$reason"
done
fi
echo
fi
# Skip prompt if --yes flag is provided
if [ "$ASSUME_YES" -eq 1 ]; then
info "Auto-confirming build (--yes supplied)."
return 0
fi
# Interactive prompt
info "This will rebuild AzerothCore from source with your enabled modules."
warn "⏱️ This process typically takes 15-45 minutes depending on your system."
echo
if [ -t 0 ]; then
local reply
read -r -p "Proceed with build? [y/N]: " reply
reply="${reply:-n}"
case "$reply" in
[Yy]*)
info "Build confirmed."
return 0
;;
*)
info "Build cancelled."
return 1
;;
esac
else
warn "Standard input is not interactive; use --yes to auto-confirm."
return 1
fi
}
# Module staging logic (extracted from setup.sh)
sync_modules(){
local storage_path
storage_path="$(read_env STORAGE_PATH "./storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
info "Synchronising modules (ac-modules container)"
local project_name
project_name="$(resolve_project_name)"
docker compose --project-name "$project_name" -f "$ROOT_DIR/docker-compose.yml" --profile db --profile modules up ac-modules
docker compose --project-name "$project_name" -f "$ROOT_DIR/docker-compose.yml" --profile db --profile modules down >/dev/null 2>&1 || true
}
resolve_project_name(){
local raw_name="$(read_env COMPOSE_PROJECT_NAME "acore-compose")"
local sanitized
sanitized="$(echo "$raw_name" | tr '[:upper:]' '[:lower:]')"
sanitized="${sanitized// /-}"
sanitized="$(echo "$sanitized" | tr -cd 'a-z0-9_-')"
if [[ -z "$sanitized" ]]; then
sanitized="acore-compose"
elif [[ ! "$sanitized" =~ ^[a-z0-9] ]]; then
sanitized="ac${sanitized}"
fi
echo "$sanitized"
}
stage_modules(){
local src_path="$1"
local storage_path
storage_path="$(read_env STORAGE_PATH "./storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
info "Staging modules to source directory: $src_path/modules"
# Verify source path exists
if [ ! -d "$src_path" ]; then
err "Source path does not exist: $src_path"
return 1
fi
local local_modules_dir="${src_path}/modules"
mkdir -p "$local_modules_dir"
# Export module variables for the script
local module_vars=(
MODULE_PLAYERBOTS MODULE_AOE_LOOT MODULE_LEARN_SPELLS MODULE_FIREWORKS MODULE_INDIVIDUAL_PROGRESSION MODULE_AHBOT MODULE_AUTOBALANCE
MODULE_TRANSMOG MODULE_NPC_BUFFER MODULE_DYNAMIC_XP MODULE_SOLO_LFG MODULE_1V1_ARENA MODULE_PHASED_DUELS
MODULE_BREAKING_NEWS MODULE_BOSS_ANNOUNCER MODULE_ACCOUNT_ACHIEVEMENTS MODULE_AUTO_REVIVE MODULE_GAIN_HONOR_GUARD
MODULE_ELUNA MODULE_TIME_IS_TIME MODULE_POCKET_PORTAL MODULE_RANDOM_ENCHANTS MODULE_SOLOCRAFT MODULE_PVP_TITLES
MODULE_NPC_BEASTMASTER MODULE_NPC_ENCHANTER MODULE_INSTANCE_RESET MODULE_LEVEL_GRANT MODULE_ARAC MODULE_ASSISTANT
MODULE_REAGENT_BANK MODULE_BLACK_MARKET_AUCTION_HOUSE MODULE_CHALLENGE_MODES MODULE_OLLAMA_CHAT
MODULE_PLAYER_BOT_LEVEL_BRACKETS MODULE_STATBOOSTER MODULE_DUNGEON_RESPAWN MODULE_SKELETON_MODULE
MODULE_BG_SLAVERYVALLEY MODULE_AZEROTHSHARD MODULE_WORGOBLIN MODULE_ELUNA_TS
)
local module_export_var
for module_export_var in "${module_vars[@]}"; do
export "$module_export_var"
done
local host_modules_dir="${storage_path}/modules"
export MODULES_HOST_DIR="$host_modules_dir"
# Set up local storage path for build sentinel tracking
local local_storage_path
local_storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$local_storage_path" != /* ]]; then
local_storage_path="$ROOT_DIR/$local_storage_path"
fi
export LOCAL_STORAGE_SENTINEL_PATH="$local_storage_path/modules/.requires_rebuild"
# Prepare isolated git config for the module script
local prev_git_config_global="${GIT_CONFIG_GLOBAL:-}"
local git_temp_config=""
if command -v mktemp >/dev/null 2>&1; then
if ! git_temp_config="$(mktemp)"; then
git_temp_config=""
fi
fi
if [ -z "$git_temp_config" ]; then
git_temp_config="$local_modules_dir/.gitconfig.tmp"
: > "$git_temp_config"
fi
export GIT_CONFIG_GLOBAL="$git_temp_config"
# Run module staging script in local modules directory
export MODULES_LOCAL_RUN=1
if [ -n "$host_modules_dir" ]; then
mkdir -p "$host_modules_dir"
rm -f "$host_modules_dir/.modules_state" "$host_modules_dir/.requires_rebuild" 2>/dev/null || true
fi
if (cd "$local_modules_dir" && bash "$ROOT_DIR/scripts/manage-modules.sh"); then
ok "Module repositories staged to $local_modules_dir"
if [ -n "$host_modules_dir" ]; then
if [ -f "$local_modules_dir/.modules_state" ]; then
cp "$local_modules_dir/.modules_state" "$host_modules_dir/.modules_state" 2>/dev/null || true
fi
fi
else
warn "Module staging encountered issues, but continuing with build"
fi
# Cleanup
export GIT_CONFIG_GLOBAL="$prev_git_config_global"
unset MODULES_LOCAL_RUN
unset MODULES_HOST_DIR
[ -n "$git_temp_config" ] && [ -f "$git_temp_config" ] && rm -f "$git_temp_config"
}
# Build execution (extracted from setup.sh)
execute_build(){
local src_path="$1"
# Verify source path exists
if [ ! -d "$src_path" ]; then
err "Source path does not exist: $src_path"
return 1
fi
local compose_file="$src_path/docker-compose.yml"
if [ ! -f "$compose_file" ]; then
err "Source docker-compose.yml missing at $compose_file"
return 1
fi
info "Building AzerothCore with modules (this may take a while)"
docker compose -f "$compose_file" down --remove-orphans >/dev/null 2>&1 || true
if (cd "$ROOT_DIR" && ./scripts/rebuild-with-modules.sh --yes --source "$src_path"); then
ok "Source build completed"
else
err "Source build failed"
return 1
fi
docker compose -f "$compose_file" down --remove-orphans >/dev/null 2>&1 || true
}
# Image tagging (extracted from setup.sh and deploy.sh)
tag_module_images(){
local source_auth
local source_world
local target_auth
local target_world
source_auth="$(read_env AC_AUTHSERVER_IMAGE_PLAYERBOTS "uprightbass360/azerothcore-wotlk-playerbots:authserver-Playerbot")"
source_world="$(read_env AC_WORLDSERVER_IMAGE_PLAYERBOTS "uprightbass360/azerothcore-wotlk-playerbots:worldserver-Playerbot")"
target_auth="$(read_env AC_AUTHSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:authserver-modules-latest")"
target_world="$(read_env AC_WORLDSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:worldserver-modules-latest")"
if docker image inspect "$source_auth" >/dev/null 2>&1; then
docker tag "$source_auth" "$target_auth"
ok "Tagged $target_auth from $source_auth"
else
warn "Source authserver image $source_auth not found; skipping modules tag"
fi
if docker image inspect "$source_world" >/dev/null 2>&1; then
docker tag "$source_world" "$target_world"
ok "Tagged $target_world from $source_world"
else
warn "Source worldserver image $source_world not found; skipping modules tag"
fi
}
run_migration(){
if [ -z "$MIGRATE_HOST" ] || [ -z "$MIGRATE_USER" ]; then
return 0 # No migration requested
fi
info "Starting remote migration to $MIGRATE_USER@$MIGRATE_HOST"
# Build migrate-stack.sh arguments
local migrate_args=(
--host "$MIGRATE_HOST"
--user "$MIGRATE_USER"
)
if [ "$MIGRATE_PORT" != "22" ]; then
migrate_args+=(--port "$MIGRATE_PORT")
fi
if [ -n "$MIGRATE_IDENTITY" ]; then
migrate_args+=(--identity "$MIGRATE_IDENTITY")
fi
if [ -n "$MIGRATE_PROJECT_DIR" ]; then
migrate_args+=(--project-dir "$MIGRATE_PROJECT_DIR")
fi
if [ "$MIGRATE_SKIP_STORAGE" = "1" ]; then
migrate_args+=(--skip-storage)
fi
if [ "$ASSUME_YES" = "1" ]; then
migrate_args+=(--yes)
fi
if (cd "$ROOT_DIR" && ./scripts/migrate-stack.sh "${migrate_args[@]}"); then
ok "Migration completed successfully"
echo
info "Remote deployment ready! Run on $MIGRATE_HOST:"
printf ' %bcd %s && ./deploy.sh --no-watch%b\n' "$YELLOW" "${MIGRATE_PROJECT_DIR:-~/acore-compose}" "$NC"
else
warn "Migration failed, but build completed successfully"
return 1
fi
}
show_build_complete(){
printf '\n%b\n' "${GREEN}🔨 Build Complete! 🔨${NC}"
printf '%b\n' "${GREEN}⚒️ Your custom AzerothCore images are ready${NC}"
if [ -n "$MIGRATE_HOST" ]; then
printf '%b\n\n' "${GREEN}🌐 Remote migration completed${NC}"
else
printf '%b\n\n' "${GREEN}🚀 Ready for deployment with ./deploy.sh${NC}"
fi
}
main(){
show_build_header
local src_dir
local rebuild_reasons
info "Step 1/7: Setting up source repository"
src_dir="$(ensure_source_repo)"
info "Step 2/7: Detecting build requirements"
readarray -t rebuild_reasons < <(detect_rebuild_reasons)
if ! confirm_build "${rebuild_reasons[@]}"; then
info "Build cancelled or not required."
exit 0
fi
info "Step 3/7: Syncing modules to container storage"
sync_modules
info "Step 4/7: Staging modules to source directory"
stage_modules "$src_dir"
info "Step 5/7: Building AzerothCore with modules"
execute_build "$src_dir"
info "Step 6/7: Tagging images for deployment"
tag_module_images
# Clear build sentinel after successful build
local storage_path
storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
local sentinel="$storage_path/modules/.requires_rebuild"
rm -f "$sentinel" 2>/dev/null || true
# Run remote migration if requested
if [ -n "$MIGRATE_HOST" ]; then
echo
info "Step 7/7: Migrating images to remote host"
run_migration
fi
show_build_complete
}
main "$@"

335
deploy.sh
View File

@@ -14,7 +14,6 @@ ENV_PATH="$ROOT_DIR/.env"
TARGET_PROFILE=""
WATCH_LOGS=1
KEEP_RUNNING=0
SKIP_REBUILD=0
WORLD_LOG_SINCE=""
ASSUME_YES=0
@@ -36,7 +35,7 @@ err(){ printf '%b\n' "${RED}❌ $*${NC}"; }
show_deployment_header(){
printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}"
printf '%b\n' "${BLUE}═══════════════════════════════════════${NC}"
printf '%b\n' "${BLUE}═══════════════════════════════════════${NC}"
printf '%b\n\n' "${BLUE}🏰 Bringing Your Realm Online 🏰${NC}"
}
@@ -58,23 +57,20 @@ Usage: $(basename "$0") [options]
Options:
--profile {standard|playerbots|modules} Force target profile (default: auto-detect)
--no-watch Do not tail worldserver logs after staging
--keep-running Do not pre-stop runtime stack before rebuild
--skip-rebuild Skip source rebuild even if modules require it
--yes, -y Auto-confirm deployment and rebuild prompts
--keep-running Do not pre-stop runtime stack
--yes, -y Auto-confirm deployment prompts
--watch-logs Tail worldserver logs even if --no-watch was set earlier
--log-tail LINES Override WORLD_LOG_TAIL (number of log lines to show)
--once Run status checks once (alias for --no-watch)
-h, --help Show this help
This command automates the module workflow: sync modules, rebuild source if needed,
stage the correct compose profile, and optionally watch worldserver logs.
This command automates deployment: sync modules, stage the correct compose profile,
and optionally watch worldserver logs.
Rebuild Detection:
The script automatically detects when a module rebuild is required by checking:
Module changes (sentinel file .requires_rebuild)
C++ modules enabled but modules-latest Docker images missing
Set AUTO_REBUILD_ON_DEPLOY=1 in .env to skip rebuild prompts and auto-rebuild.
Image Requirements:
This script assumes Docker images are already built. If you have custom modules:
Run './build.sh' first to build custom images
Standard AzerothCore images will be pulled automatically
EOF
}
@@ -83,7 +79,6 @@ while [[ $# -gt 0 ]]; do
--profile) TARGET_PROFILE="$2"; shift 2;;
--no-watch) WATCH_LOGS=0; shift;;
--keep-running) KEEP_RUNNING=1; shift;;
--skip-rebuild) SKIP_REBUILD=1; shift;;
--yes|-y) ASSUME_YES=1; shift;;
-h|--help) usage; exit 0;;
*) err "Unknown option: $1"; usage; exit 1;;
@@ -122,85 +117,28 @@ resolve_project_name(){
echo "$sanitized"
}
filter_empty_lines(){
awk '
/^[[:space:]]*$/ {
empty_count++
if (empty_count <= 1) print
}
/[^[:space:]]/ {
empty_count = 0
print
}
'
}
compose(){
local project_name
project_name="$(resolve_project_name)"
docker compose --project-name "$project_name" -f "$COMPOSE_FILE" "$@"
# Add --quiet for less verbose output, filter excessive empty lines
docker compose --project-name "$project_name" -f "$COMPOSE_FILE" "$@" | filter_empty_lines
}
ensure_source_repo(){
local use_playerbot_source=0
if requires_playerbot_source; then
use_playerbot_source=1
fi
local local_root
local_root="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
local_root="${local_root%/}"
[ -z "$local_root" ] && local_root="."
local default_source="${local_root}/source/azerothcore"
if [ "$use_playerbot_source" = "1" ]; then
default_source="${local_root}/source/azerothcore-playerbots"
fi
local src_path
src_path="$(read_env MODULES_REBUILD_SOURCE_PATH "$default_source")"
if [[ "$src_path" != /* ]]; then
src_path="$ROOT_DIR/$src_path"
fi
# Normalize path to remove ./ and resolve to absolute path
# Use readlink -f if available, fall back to realpath, then manual normalization
if command -v readlink >/dev/null 2>&1 && [[ -e "$src_path" || -e "$(dirname "$src_path")" ]]; then
src_path="$(readlink -f "$src_path" 2>/dev/null || echo "$src_path")"
else
src_path="$(cd "$ROOT_DIR" && realpath -m "$src_path" 2>/dev/null || echo "$src_path")"
fi
# Final fallback: manual ./ removal if all else fails
src_path="${src_path//\/.\//\/}"
if [ -d "$src_path/.git" ]; then
echo "$src_path"
return
fi
warn "AzerothCore source not found at $src_path; running setup-source.sh"
(cd "$ROOT_DIR" && ./scripts/setup-source.sh)
echo "$src_path"
}
stop_runtime_stack(){
info "Stopping runtime stack to avoid container name conflicts"
compose \
--profile services-standard \
--profile services-playerbots \
--profile services-modules \
--profile db \
--profile client-data \
--profile client-data-bots \
--profile modules \
down 2>/dev/null || true
}
sync_modules(){
info "Synchronising modules (ac-modules)"
compose --profile db --profile modules up ac-modules
compose --profile db --profile modules down >/dev/null 2>&1 || true
}
modules_need_rebuild(){
local storage_path
storage_path="$(read_env STORAGE_PATH "./storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
local sentinel="$storage_path/modules/.requires_rebuild"
[[ -f "$sentinel" ]]
}
check_auto_rebuild_setting(){
local auto_rebuild
auto_rebuild="$(read_env AUTO_REBUILD_ON_DEPLOY "0")"
[[ "$auto_rebuild" = "1" ]]
}
detect_rebuild_reasons(){
# Build detection logic
detect_build_needed(){
local reasons=()
# Check sentinel file
@@ -235,73 +173,102 @@ detect_rebuild_reasons(){
printf '%s\n' "${reasons[@]}"
}
requires_playerbot_source(){
if [ "$(read_env MODULE_PLAYERBOTS "0")" = "1" ]; then
return 0
fi
local var
for var in "${COMPILE_MODULE_VARS[@]}"; do
if [ "$(read_env "$var" "0")" = "1" ]; then
return 0
fi
done
return 1
stop_runtime_stack(){
info "Stopping runtime stack to avoid container name conflicts"
compose \
--profile services-standard \
--profile services-playerbots \
--profile services-modules \
--profile db \
--profile client-data \
--profile client-data-bots \
--profile modules \
down 2>/dev/null || true
}
confirm_rebuild(){
local reasons=("$@")
# Deployment sentinel management
mark_deployment_complete(){
local storage_path
storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
local sentinel="$storage_path/modules/.last_deployed"
mkdir -p "$(dirname "$sentinel")"
date > "$sentinel"
}
if [ ${#reasons[@]} -eq 0 ]; then
return 1 # No rebuild needed
modules_need_rebuild(){
local storage_path
storage_path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$ROOT_DIR/$storage_path"
fi
local sentinel="$storage_path/modules/.requires_rebuild"
[[ -f "$sentinel" ]]
}
# Build prompting logic
prompt_build_if_needed(){
local build_reasons_output
build_reasons_output=$(detect_build_needed)
if [ -z "$build_reasons_output" ]; then
return 0 # No build needed
fi
local build_reasons
readarray -t build_reasons <<< "$build_reasons_output"
# Check if auto-rebuild is enabled
local auto_rebuild
auto_rebuild="$(read_env AUTO_REBUILD_ON_DEPLOY "0")"
if [ "$auto_rebuild" = "1" ]; then
warn "Auto-rebuild enabled, running build process..."
if (cd "$ROOT_DIR" && ./build.sh --yes); then
ok "Build completed successfully"
return 0
else
err "Build failed"
return 1
fi
fi
# Interactive prompt
echo
warn "Module rebuild appears to be required:"
warn "Build appears to be required:"
local reason
for reason in "${reasons[@]}"; do
for reason in "${build_reasons[@]}"; do
warn "$reason"
done
echo
# Check auto-rebuild setting
if check_auto_rebuild_setting; then
info "AUTO_REBUILD_ON_DEPLOY is enabled; proceeding with automatic rebuild."
return 0
fi
# Skip prompt if --yes flag is provided
if [ "$ASSUME_YES" -eq 1 ]; then
info "Auto-confirming rebuild (--yes supplied)."
return 0
fi
# Interactive prompt
info "This will rebuild AzerothCore from source with your enabled modules."
warn "⏱️ This process typically takes 15-45 minutes depending on your system."
echo
if [ -t 0 ]; then
local reply
read -r -p "Proceed with module rebuild? [y/N]: " reply
read -r -p "Run build now? [y/N]: " reply
reply="${reply:-n}"
case "$reply" in
[Yy]*)
info "Rebuild confirmed."
if (cd "$ROOT_DIR" && ./build.sh --yes); then
ok "Build completed successfully"
return 0
else
err "Build failed"
return 1
fi
;;
*)
warn "Rebuild declined. You can:"
warn " • Run with --skip-rebuild to deploy without rebuilding"
warn " • Set AUTO_REBUILD_ON_DEPLOY=1 in .env for automatic rebuilds"
warn " • Run './scripts/rebuild-with-modules.sh' manually later"
err "Build required but declined. Run './build.sh' manually before deploying or re-run this script."
return 1
;;
esac
else
warn "Standard input is not interactive; use --yes to auto-confirm or --skip-rebuild to skip."
err "Build required but running non-interactively. Run './build.sh' manually before deploying or re-run this script."
return 1
fi
}
determine_profile(){
if [ -n "$TARGET_PROFILE" ]; then
echo "$TARGET_PROFILE"
@@ -328,53 +295,6 @@ determine_profile(){
echo "standard"
}
rebuild_source(){
local src_dir="$1"
local compose_file="$src_dir/docker-compose.yml"
if [ ! -f "$compose_file" ]; then
warn "Source docker-compose.yml missing at $compose_file; running setup-source.sh"
(cd "$ROOT_DIR" && ./scripts/setup-source.sh)
fi
if [ ! -f "$compose_file" ]; then
err "Source docker-compose.yml missing at $compose_file"
return 1
fi
info "Rebuilding AzerothCore source with modules (this may take a while)"
docker compose -f "$compose_file" down --remove-orphans >/dev/null 2>&1 || true
if (cd "$ROOT_DIR" && ./scripts/rebuild-with-modules.sh --yes); then
ok "Source rebuild completed"
else
err "Source rebuild failed"
return 1
fi
docker compose -f "$compose_file" down --remove-orphans >/dev/null 2>&1 || true
}
tag_module_images(){
local source_auth
local source_world
local target_auth
local target_world
source_auth="$(read_env AC_AUTHSERVER_IMAGE_PLAYERBOTS "uprightbass360/azerothcore-wotlk-playerbots:authserver-Playerbot")"
source_world="$(read_env AC_WORLDSERVER_IMAGE_PLAYERBOTS "uprightbass360/azerothcore-wotlk-playerbots:worldserver-Playerbot")"
target_auth="$(read_env AC_AUTHSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:authserver-modules-latest")"
target_world="$(read_env AC_WORLDSERVER_IMAGE_MODULES "uprightbass360/azerothcore-wotlk-playerbots:worldserver-modules-latest")"
if docker image inspect "$source_auth" >/dev/null 2>&1; then
docker tag "$source_auth" "$target_auth"
ok "Tagged $target_auth from $source_auth"
else
warn "Source authserver image $source_auth not found; skipping modules tag"
fi
if docker image inspect "$source_world" >/dev/null 2>&1; then
docker tag "$source_world" "$target_world"
ok "Tagged $target_world from $source_world"
else
warn "Source worldserver image $source_world not found; skipping modules tag"
fi
}
stage_runtime(){
local args=(--yes)
@@ -401,7 +321,8 @@ tail_world_logs(){
wait_for_worldserver_ready(){
local timeout="${WORLD_READY_TIMEOUT:-180}" start
start="$(date +%s)"
info "Waiting for worldserver to become healthy (timeout: ${timeout}s)"
info "Waiting for worldserver to become ready (timeout: ${timeout}s)"
info "First deployment may take 10-15 minutes while client-data is extracted"
while true; do
if ! docker ps --format '{{.Names}}' | grep -qx "ac-worldserver"; then
info "Worldserver container is not running yet; retrying..."
@@ -422,13 +343,14 @@ wait_for_worldserver_ready(){
fi
;;
unhealthy)
warn "Worldserver healthcheck reports unhealthy; logs recommended"
return 1
info "Worldserver starting up - waiting for client-data to complete..."
info "This may take several minutes on first deployment while data files are extracted"
;;
esac
fi
if [ $(( $(date +%s) - start )) -ge "$timeout" ]; then
warn "Timed out waiting for worldserver health"
info "Worldserver is still starting up after ${timeout}s. This is normal for first deployments."
info "Client-data extraction can take 10-15 minutes. Check progress with './status.sh' or container logs."
return 1
fi
sleep 3
@@ -454,57 +376,27 @@ main(){
show_deployment_header
local src_dir
local resolved_profile
show_step 1 5 "Setting up source repository"
src_dir="$(ensure_source_repo)"
resolved_profile="$(determine_profile)"
show_step 1 4 "Checking build requirements"
if ! prompt_build_if_needed; then
err "Build required but not completed. Deployment cancelled."
exit 1
fi
if [ "$KEEP_RUNNING" -ne 1 ]; then
show_step 2 5 "Stopping runtime stack"
show_step 2 4 "Stopping runtime stack"
stop_runtime_stack
fi
show_step 3 5 "Syncing modules"
sync_modules
local did_rebuild=0
local rebuild_reasons
readarray -t rebuild_reasons < <(detect_rebuild_reasons)
if [ ${#rebuild_reasons[@]} -gt 0 ]; then
if [ "$SKIP_REBUILD" -eq 1 ]; then
warn "Modules require rebuild, but --skip-rebuild was provided:"
local reason
for reason in "${rebuild_reasons[@]}"; do
warn "$reason"
done
warn "Proceeding without rebuild; deployment may fail if modules-latest images are missing."
else
if confirm_rebuild "${rebuild_reasons[@]}"; then
show_step 4 5 "Building realm with modules (this may take 15-45 minutes)"
rebuild_source "$src_dir"
did_rebuild=1
else
err "Rebuild required but declined. Use --skip-rebuild to force deployment without rebuild."
exit 1
fi
fi
else
info "No module rebuild required."
fi
if [ "$did_rebuild" -eq 1 ]; then
tag_module_images
elif [ "$resolved_profile" = "modules" ]; then
tag_module_images
fi
show_step 5 5 "Bringing your realm online"
show_step 3 4 "Bringing your realm online"
info "Pulling images and waiting for containers to become healthy; this may take a few minutes on first deploy."
stage_runtime
show_step 4 4 "Finalizing deployment"
mark_deployment_complete
show_realm_ready
if [ "$WATCH_LOGS" -eq 1 ]; then
@@ -512,7 +404,8 @@ main(){
info "Watching your realm come to life (Ctrl+C to stop watching)"
tail_world_logs
else
warn "Skipping log tail; worldserver not healthy. Use './status.sh --once' or 'docker logs ac-worldserver'."
info "Worldserver still initializing. Client-data extraction may still be in progress."
info "Use './status.sh' to monitor progress or 'docker logs ac-worldserver' to view startup logs."
fi
else
ok "Realm deployment completed. Use './status.sh' to monitor your realm."

View File

@@ -1,195 +0,0 @@
#!/bin/bash
#
# Diagnostic script to identify why client-data extraction fails on Debian
# but works on Ubuntu
#
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
info() { echo -e "${BLUE}[INFO]${NC} $*"; }
ok() { echo -e "${GREEN}[PASS]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
fail() { echo -e "${RED}[FAIL]${NC} $*"; }
echo "========================================"
echo " Client-Data Extraction Diagnostics"
echo "========================================"
echo ""
# Test 1: System Information
info "Test 1: System Information"
echo " OS: $(cat /etc/os-release | grep PRETTY_NAME | cut -d'"' -f2)"
echo " Kernel: $(uname -r)"
echo " Host date: $(date)"
echo ""
# Test 2: Docker Version
info "Test 2: Docker Version"
docker version --format '{{.Server.Version}}' && ok "Docker installed" || fail "Docker not found"
echo " Docker version: $(docker version --format '{{.Server.Version}}')"
echo ""
# Test 3: Docker Configuration
info "Test 3: Docker Configuration"
if [ -f /etc/docker/daemon.json ]; then
ok "Found custom Docker config"
echo " Config:"
cat /etc/docker/daemon.json | sed 's/^/ /'
else
warn "No custom Docker config found (using defaults)"
fi
echo ""
# Test 4: Host DNS Configuration
info "Test 4: Host DNS Configuration"
echo " Nameservers:"
cat /etc/resolv.conf | grep nameserver | sed 's/^/ /'
echo ""
# Test 5: Container DNS Resolution
info "Test 5: Container DNS Resolution"
echo " Testing DNS inside Ubuntu 22.04 container..."
if docker run --rm ubuntu:22.04 sh -c "cat /etc/resolv.conf" >/dev/null 2>&1; then
docker run --rm ubuntu:22.04 cat /etc/resolv.conf | sed 's/^/ /'
ok "Container DNS configured"
else
fail "Container DNS check failed"
fi
echo ""
# Test 6: Network Connectivity
info "Test 6: Network Connectivity to Ubuntu Repos"
echo " Pinging archive.ubuntu.com..."
if docker run --rm ubuntu:22.04 sh -c "apt-get update -qq && apt-get install -y iputils-ping >/dev/null 2>&1 && ping -c 2 archive.ubuntu.com" >/dev/null 2>&1; then
ok "Can reach archive.ubuntu.com"
else
warn "Cannot reach archive.ubuntu.com (may be network/DNS issue)"
fi
echo ""
# Test 7: Container Date/Time
info "Test 7: Container Date/Time Sync"
HOST_DATE=$(date +%s)
CONTAINER_DATE=$(docker run --rm ubuntu:22.04 date +%s)
DATE_DIFF=$((HOST_DATE - CONTAINER_DATE))
if [ ${DATE_DIFF#-} -lt 10 ]; then
ok "Container time synced (diff: ${DATE_DIFF}s)"
else
warn "Container time out of sync (diff: ${DATE_DIFF}s)"
fi
echo " Host: $(date)"
echo " Container: $(docker run --rm ubuntu:22.04 date)"
echo ""
# Test 8: apt-get update (Default DNS)
info "Test 8: apt-get update with default DNS"
echo " Running apt-get update inside container..."
if docker run --rm ubuntu:22.04 apt-get update >/dev/null 2>&1; then
ok "apt-get update succeeded with default DNS"
else
fail "apt-get update failed with default DNS"
echo " Error output:"
docker run --rm ubuntu:22.04 apt-get update 2>&1 | grep -E "Err:|W:|E:" | head -5 | sed 's/^/ /'
fi
echo ""
# Test 9: apt-get update (Google DNS)
info "Test 9: apt-get update with Google DNS (8.8.8.8)"
echo " Running apt-get update with --dns 8.8.8.8..."
if docker run --rm --dns 8.8.8.8 ubuntu:22.04 apt-get update >/dev/null 2>&1; then
ok "apt-get update succeeded with Google DNS"
echo " ✓ FIX: Adding dns: [8.8.8.8, 8.8.4.4] to docker-compose.yml should work"
else
fail "apt-get update failed even with Google DNS"
echo " Error output:"
docker run --rm --dns 8.8.8.8 ubuntu:22.04 apt-get update 2>&1 | grep -E "Err:|W:|E:" | head -5 | sed 's/^/ /'
fi
echo ""
# Test 10: wget availability in base image
info "Test 10: Check if wget/curl exists in client-data image"
IMAGE="uprightbass360/azerothcore-wotlk-playerbots:client-data-Playerbot"
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
echo " Checking for download tools in $IMAGE..."
if docker run --rm "$IMAGE" sh -c "which wget" 2>/dev/null; then
ok "wget found in base image"
else
warn "wget not found in base image"
fi
if docker run --rm "$IMAGE" sh -c "which curl" 2>/dev/null; then
ok "curl found in base image"
else
warn "curl not found in base image"
fi
if docker run --rm "$IMAGE" sh -c "which aria2c" 2>/dev/null; then
ok "aria2c found in base image"
else
warn "aria2c not found in base image"
fi
else
warn "Image $IMAGE not found locally"
fi
echo ""
# Test 11: GitHub connectivity
info "Test 11: GitHub Connectivity"
echo " Testing connection to github.com..."
if docker run --rm alpine:latest sh -c "apk add --no-cache curl >/dev/null 2>&1 && curl -I https://github.com 2>&1" | grep -q "HTTP/"; then
ok "Can reach github.com"
else
fail "Cannot reach github.com"
fi
echo ""
# Test 12: Download test (small file)
info "Test 12: Download Test (small file from GitHub)"
echo " Attempting to download a small file from GitHub releases..."
TEST_URL="https://github.com/wowgaming/client-data/releases/latest"
if docker run --rm alpine:latest sh -c "apk add --no-cache curl >/dev/null 2>&1 && curl -sL '$TEST_URL' >/dev/null" 2>&1; then
ok "Successfully accessed GitHub releases"
else
fail "Failed to access GitHub releases"
fi
echo ""
# Summary
echo "========================================"
echo " Summary & Recommendations"
echo "========================================"
echo ""
# Provide recommendations based on test results
if docker run --rm --dns 8.8.8.8 ubuntu:22.04 apt-get update >/dev/null 2>&1; then
echo "✓ RECOMMENDATION: Add Google DNS to docker-compose.yml"
echo ""
echo "Add this to the ac-client-data-playerbots service in docker-compose.yml:"
echo ""
echo " ac-client-data-playerbots:"
echo " dns:"
echo " - 8.8.8.8"
echo " - 8.8.4.4"
echo " # ... rest of config"
echo ""
elif ! docker run --rm ubuntu:22.04 apt-get update >/dev/null 2>&1; then
echo "⚠ RECOMMENDATION: Use manual download method"
echo ""
echo "The apt-get update is failing even with Google DNS."
echo "Use manual download:"
echo ""
echo " cd /tmp"
echo " wget https://github.com/wowgaming/client-data/releases/download/v17/data.zip"
echo " docker volume create ac-client-data"
echo " docker run --rm -v ac-client-data:/data -v /tmp:/host alpine:latest \\"
echo " sh -c 'apk add --no-cache unzip && cd /data && unzip /host/data.zip'"
echo ""
else
ok "All tests passed - extraction should work"
fi
echo "========================================"

View File

@@ -701,6 +701,8 @@ elif [ -f "/tmp/scripts/manage-modules-sql.sh" ]; then
. /tmp/scripts/manage-modules-sql.sh
else
echo "⚠️ SQL helper not found, skipping module SQL execution"
echo "If you are seeing this during build this is normal"
fi
# Execute SQLs for enabled modules (via helper)
@@ -825,7 +827,14 @@ fi
echo 'Module management complete.'
if [ "$MODULES_LOCAL_RUN" = "1" ]; then
# When running locally, use local-storage for build state tracking
local_storage_path="${LOCAL_STORAGE_SENTINEL_PATH:-}"
if [ -n "$local_storage_path" ]; then
REBUILD_SENTINEL="$local_storage_path"
else
# Fallback to current directory if no path provided (legacy behavior)
REBUILD_SENTINEL="./.requires_rebuild"
fi
else
REBUILD_SENTINEL="/modules/.requires_rebuild"
fi

View File

@@ -18,6 +18,7 @@ Options:
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
--storage PATH Remote storage directory (default: <project-dir>/storage)
--skip-storage Do not sync the storage directory
--yes, -y Auto-confirm prompts (for existing deployments)
--help Show this help
EOF_HELP
}
@@ -30,6 +31,7 @@ PROJECT_DIR=""
TARBALL=""
REMOTE_STORAGE=""
SKIP_STORAGE=0
ASSUME_YES=0
while [[ $# -gt 0 ]]; do
case "$1" in
@@ -41,6 +43,7 @@ while [[ $# -gt 0 ]]; do
--tarball) TARBALL="$2"; shift 2;;
--storage) REMOTE_STORAGE="$2"; shift 2;;
--skip-storage) SKIP_STORAGE=1; shift;;
--yes|-y) ASSUME_YES=1; shift;;
--help|-h) usage; exit 0;;
*) echo "Unknown option: $1" >&2; usage; exit 1;;
esac
@@ -74,18 +77,133 @@ run_scp(){
scp "${SCP_OPTS[@]}" "$@"
}
validate_remote_environment(){
echo "⋅ Validating remote environment..."
# 1. Check Docker daemon is running
echo " • Checking Docker daemon..."
if ! run_ssh "docker info >/dev/null 2>&1"; then
echo "❌ Docker daemon not running or not accessible on remote host"
echo " Please ensure Docker is installed and running on $HOST"
exit 1
fi
# 2. Check disk space (need at least 5GB for images + storage)
echo " • Checking disk space..."
local available_gb
available_gb=$(run_ssh "df /tmp | tail -1 | awk '{print int(\$4/1024/1024)}'")
if [ "$available_gb" -lt 5 ]; then
echo "❌ Insufficient disk space on remote host"
echo " Available: ${available_gb}GB, Required: 5GB minimum"
echo " Please free up disk space on $HOST"
exit 1
fi
echo " Available: ${available_gb}GB ✓"
# 3. Check/create project directory with proper permissions
echo " • Validating project directory permissions..."
if ! run_ssh "mkdir -p '$PROJECT_DIR' && test -w '$PROJECT_DIR'"; then
echo "❌ Cannot create or write to project directory: $PROJECT_DIR"
echo " Please ensure $USER has write permissions to $PROJECT_DIR"
exit 1
fi
# 4. Check for existing deployment and warn if running
echo " • Checking for existing deployment..."
local running_containers
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
if [ "$running_containers" -gt 0 ]; then
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
echo " Migration will overwrite existing deployment"
if [ "$ASSUME_YES" != "1" ]; then
read -r -p " Continue with migration? [y/N]: " reply
case "$reply" in
[Yy]*) echo " Proceeding with migration..." ;;
*) echo " Migration cancelled."; exit 1 ;;
esac
fi
fi
# 5. Ensure remote repository is up to date
echo " • Ensuring remote repository is current..."
setup_remote_repository
echo "✅ Remote environment validation complete"
}
setup_remote_repository(){
# Check if git is available
if ! run_ssh "command -v git >/dev/null 2>&1"; then
echo "❌ Git not found on remote host. Please install git."
exit 1
fi
# Check if project directory has a git repository
if run_ssh "test -d '$PROJECT_DIR/.git'"; then
echo " • Updating existing repository..."
# Fetch latest changes and reset to match origin
run_ssh "cd '$PROJECT_DIR' && git fetch origin && git reset --hard origin/\$(git rev-parse --abbrev-ref HEAD) && git clean -fd"
else
echo " • Cloning repository..."
# Determine the git repository URL from local repo
local repo_url
repo_url=$(git config --get remote.origin.url 2>/dev/null || echo "")
if [ -z "$repo_url" ]; then
echo "❌ Cannot determine repository URL. Please ensure local directory is a git repository."
exit 1
fi
# Clone the repository to remote
run_ssh "rm -rf '$PROJECT_DIR' && git clone '$repo_url' '$PROJECT_DIR'"
fi
# Verify essential scripts exist
if ! run_ssh "test -f '$PROJECT_DIR/deploy.sh' && test -x '$PROJECT_DIR/deploy.sh'"; then
echo "❌ deploy.sh not found or not executable in remote repository"
exit 1
fi
echo " • Repository synchronized ✓"
}
validate_remote_environment
echo "⋅ Exporting module images to $TARBALL"
mkdir -p "$(dirname "$TARBALL")"
IMAGES_TO_SAVE=(
acore/ac-wotlk-worldserver:modules-latest
acore/ac-wotlk-authserver:modules-latest
)
# Check which images are available and collect them
IMAGES_TO_SAVE=()
# Check for custom module images (built by build.sh)
if docker image inspect uprightbass360/azerothcore-wotlk-playerbots:authserver-modules-latest >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(uprightbass360/azerothcore-wotlk-playerbots:authserver-modules-latest)
fi
if docker image inspect uprightbass360/azerothcore-wotlk-playerbots:worldserver-modules-latest >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(uprightbass360/azerothcore-wotlk-playerbots:worldserver-modules-latest)
fi
# Check for pre-compiled playerbots images
if docker image inspect uprightbass360/azerothcore-wotlk-playerbots:worldserver-Playerbot >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(uprightbass360/azerothcore-wotlk-playerbots:worldserver-Playerbot)
fi
if docker image inspect uprightbass360/azerothcore-wotlk-playerbots:authserver-Playerbot >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(uprightbass360/azerothcore-wotlk-playerbots:authserver-Playerbot)
fi
# Check for standard AzerothCore images (fallback)
if docker image inspect acore/ac-wotlk-worldserver:modules-latest >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(acore/ac-wotlk-worldserver:modules-latest)
fi
if docker image inspect acore/ac-wotlk-authserver:modules-latest >/dev/null 2>&1; then
IMAGES_TO_SAVE+=(acore/ac-wotlk-authserver:modules-latest)
fi
if [ ${#IMAGES_TO_SAVE[@]} -eq 0 ]; then
echo "❌ No AzerothCore images found to migrate. Run './build.sh' first or pull standard images."
exit 1
fi
echo "⋅ Found ${#IMAGES_TO_SAVE[@]} images to migrate:"
printf ' • %s\n' "${IMAGES_TO_SAVE[@]}"
docker image save "${IMAGES_TO_SAVE[@]}" > "$TARBALL"
if [[ $SKIP_STORAGE -eq 0 ]]; then
@@ -111,4 +229,4 @@ fi
echo "⋅ Remote prepares completed"
echo "Run on the remote host to deploy:"
echo " cd $PROJECT_DIR && ./deploy.sh --skip-rebuild --no-watch"
echo " cd $PROJECT_DIR && ./deploy.sh --no-watch"

View File

@@ -128,8 +128,13 @@ STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
if [[ "$STORAGE_PATH" != /* ]]; then
STORAGE_PATH="$PROJECT_DIR/${STORAGE_PATH#./}"
fi
# Build sentinel is tracked in local storage
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
fi
MODULES_DIR="$STORAGE_PATH/modules"
SENTINEL_FILE="$MODULES_DIR/.requires_rebuild"
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
STORAGE_PATH_ABS="$STORAGE_PATH"
@@ -166,11 +171,11 @@ SHARED_MODULES_DIR="$STORAGE_PATH/modules"
if [ -d "$LOCAL_MODULES_DIR" ]; then
echo "🔧 Using modules from source directory: $LOCAL_MODULES_DIR"
MODULES_DIR="$LOCAL_MODULES_DIR"
SENTINEL_FILE="$LOCAL_MODULES_DIR/.requires_rebuild"
# Build sentinel always stays in local storage for consistency
else
echo "🔧 Using modules from shared storage: $SHARED_MODULES_DIR"
MODULES_DIR="$SHARED_MODULES_DIR"
SENTINEL_FILE="$SHARED_MODULES_DIR/.requires_rebuild"
# Build sentinel always stays in local storage for consistency
fi
SOURCE_COMPOSE="$REBUILD_SOURCE_PATH/docker-compose.yml"

View File

@@ -98,7 +98,13 @@ if [[ "$STORAGE_PATH" != /* ]]; then
STORAGE_PATH="$PROJECT_DIR/$STORAGE_PATH"
fi
MODULES_DIR="$STORAGE_PATH/modules"
SENTINEL_FILE="$MODULES_DIR/.requires_rebuild"
# Build sentinel is in local storage, deployment modules are in shared storage
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
fi
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
# Define module mappings (from rebuild-with-modules.sh)
declare -A MODULE_REPO_MAP=(

150
setup.sh
View File

@@ -393,7 +393,6 @@ main(){
local CLI_PLAYERBOT_ENABLED=""
local CLI_PLAYERBOT_MAX=""
local CLI_AUTO_REBUILD=0
local CLI_RUN_REBUILD=0
local CLI_MODULES_SOURCE=""
local FORCE_OVERWRITE=0
local CLI_ENABLE_MODULES_RAW=()
@@ -433,7 +432,6 @@ Options:
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
--auto-rebuild-on-deploy Enable automatic rebuild during deploys
--run-rebuild-now Trigger module rebuild after setup completes
--modules-rebuild-source PATH Source checkout used for module rebuilds
--deploy-after Run ./deploy.sh automatically after setup completes
--force Overwrite existing .env without prompting
@@ -581,10 +579,6 @@ EOF
CLI_AUTO_REBUILD=1
shift
;;
--run-rebuild-now)
CLI_RUN_REBUILD=1
shift
;;
--modules-rebuild-source)
[[ $# -ge 2 ]] || { say ERROR "--modules-rebuild-source requires a value"; exit 1; }
CLI_MODULES_SOURCE="$2"; shift 2
@@ -938,7 +932,6 @@ fi
local AUTO_REBUILD_ON_DEPLOY=$CLI_AUTO_REBUILD
local MODULES_REBUILD_SOURCE_PATH_VALUE="${CLI_MODULES_SOURCE}"
local RUN_REBUILD_NOW=$CLI_RUN_REBUILD
local NEEDS_CXX_REBUILD=0
local module_mode_label=""
@@ -1106,26 +1099,22 @@ fi
if [ "$NEEDS_CXX_REBUILD" = "1" ]; then
echo ""
say WARNING "These modules require compiling AzerothCore from source."
if [ "$CLI_RUN_REBUILD" = "1" ]; then
RUN_REBUILD_NOW=1
else
RUN_REBUILD_NOW=$(ask_yn "Run module rebuild immediately?" n)
fi
say INFO "Run './build.sh' to compile your custom modules before deployment."
if [ "$CLI_AUTO_REBUILD" = "1" ]; then
AUTO_REBUILD_ON_DEPLOY=1
else
AUTO_REBUILD_ON_DEPLOY=$(ask_yn "Enable automatic rebuild during future deploys?" "$( [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ] && echo y || echo n )")
fi
if [ "$RUN_REBUILD_NOW" = "1" ] || [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ]; then
if [ -z "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
if [ "$MODULE_PLAYERBOTS" = "1" ]; then
MODULES_REBUILD_SOURCE_PATH_VALUE="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
else
MODULES_REBUILD_SOURCE_PATH_VALUE="${LOCAL_STORAGE_ROOT}/source/azerothcore"
fi
say INFO "Using default source path: ${MODULES_REBUILD_SOURCE_PATH_VALUE}"
fi
# Set build sentinel to indicate rebuild is needed
local storage_abs="$STORAGE_PATH_LOCAL"
if [[ "$storage_abs" != /* ]]; then
storage_abs="$(cd "$(dirname "$0")" && pwd)/$storage_abs"
fi
local sentinel="$storage_abs/modules/.requires_rebuild"
mkdir -p "$(dirname "$sentinel")"
touch "$sentinel"
say INFO "Build sentinel created at $sentinel"
fi
local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore"
@@ -1150,84 +1139,6 @@ fi
# Module staging will be handled directly in the rebuild section below
if [ "$RUN_REBUILD_NOW" = "1" ]; then
local default_source_path="$default_source_rel"
local rebuild_source_path="${MODULES_REBUILD_SOURCE_PATH_VALUE:-$default_source_path}"
MODULES_REBUILD_SOURCE_PATH_VALUE="$rebuild_source_path"
export MODULES_REBUILD_SOURCE_PATH="$MODULES_REBUILD_SOURCE_PATH_VALUE"
if [ ! -f "$rebuild_source_path/docker-compose.yml" ]; then
say INFO "Preparing source repository via scripts/setup-source.sh (progress will stream below)"
if ! ( set -o pipefail; ./scripts/setup-source.sh 2>&1 | while IFS= read -r line; do
say INFO "[setup-source]" "$line"
done ); then
say WARNING "Source setup encountered issues; running interactively."
if ! ./scripts/setup-source.sh; then
say WARNING "Source setup failed; skipping automatic rebuild."
RUN_REBUILD_NOW=0
fi
fi
fi
# Stage modules to local source directory before compilation
if [ "$NEEDS_CXX_REBUILD" = "1" ]; then
say INFO "Staging module repositories to local source directory..."
local local_modules_dir="${rebuild_source_path}/modules"
mkdir -p "$local_modules_dir"
# Export module variables for the script
local module_export_var
for module_export_var in "${KNOWN_MODULE_VARS[@]}"; do
export "$module_export_var"
done
local host_modules_dir="${storage_abs}/modules"
export MODULES_HOST_DIR="$host_modules_dir"
# Prepare isolated git config for the module script so we do not mutate user-level settings
local prev_git_config_global="${GIT_CONFIG_GLOBAL:-}"
local git_temp_config=""
if command -v mktemp >/dev/null 2>&1; then
if ! git_temp_config="$(mktemp)"; then
git_temp_config=""
fi
fi
if [ -z "$git_temp_config" ]; then
git_temp_config="$local_modules_dir/.gitconfig.tmp"
: > "$git_temp_config"
fi
export GIT_CONFIG_GLOBAL="$git_temp_config"
# Run module staging script in local modules directory
# Set environment variable to indicate we're running locally
export MODULES_LOCAL_RUN=1
if [ -n "$host_modules_dir" ]; then
mkdir -p "$host_modules_dir"
rm -f "$host_modules_dir/.modules_state" "$host_modules_dir/.requires_rebuild" 2>/dev/null || true
fi
if (cd "$local_modules_dir" && bash "$SCRIPT_DIR/scripts/manage-modules.sh"); then
say SUCCESS "Module repositories staged to $local_modules_dir"
if [ -n "$host_modules_dir" ]; then
if [ -f "$local_modules_dir/.modules_state" ]; then
cp "$local_modules_dir/.modules_state" "$host_modules_dir/.modules_state" 2>/dev/null || true
fi
fi
else
say WARNING "Module staging encountered issues, but continuing with rebuild"
fi
unset MODULES_LOCAL_RUN
unset MODULES_HOST_DIR
if [ -n "$git_temp_config" ]; then
rm -f "$git_temp_config"
fi
if [ -n "$prev_git_config_global" ]; then
export GIT_CONFIG_GLOBAL="$prev_git_config_global"
else
unset GIT_CONFIG_GLOBAL
fi
fi
fi
# Confirm write
@@ -1481,48 +1392,13 @@ EOF
say SUCCESS ".env written to $ENV_OUT"
show_realm_configured
if [ "$RUN_REBUILD_NOW" = "1" ]; then
echo ""
say HEADER "MODULE REBUILD"
if [ -n "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
local rebuild_args=(--yes --skip-stop)
rebuild_args+=(--source "$MODULES_REBUILD_SOURCE_PATH_VALUE")
if ./scripts/rebuild-with-modules.sh "${rebuild_args[@]}"; then
say SUCCESS "Module rebuild completed"
# Tag the built images as modules-latest so deploy.sh doesn't require another rebuild
if [ "$NEEDS_CXX_REBUILD" = "1" ] || [ "$MODULE_PLAYERBOTS" = "1" ]; then
say INFO "Tagging module images for deployment..."
local source_auth="$AC_AUTHSERVER_IMAGE_PLAYERBOTS_VALUE"
local source_world="$AC_WORLDSERVER_IMAGE_PLAYERBOTS_VALUE"
local target_auth="$AC_AUTHSERVER_IMAGE_MODULES_VALUE"
local target_world="$AC_WORLDSERVER_IMAGE_MODULES_VALUE"
if docker image inspect "$source_auth" >/dev/null 2>&1; then
docker tag "$source_auth" "$target_auth"
say SUCCESS "Tagged $target_auth from $source_auth"
fi
if docker image inspect "$source_world" >/dev/null 2>&1; then
docker tag "$source_world" "$target_world"
say SUCCESS "Tagged $target_world from $source_world"
fi
fi
else
say WARNING "Module rebuild failed; run ./scripts/rebuild-with-modules.sh manually once issues are resolved."
fi
else
say WARNING "Rebuild path was not provided; skipping automatic rebuild."
fi
fi
say INFO "Ready to bring your realm online:"
if [ "$MODULE_PLAYERBOTS" = "1" ]; then
printf ' 🚀 Quick deploy: ./deploy.sh\n'
printf ' 🔧 Manual: docker compose --profile db --profile services-playerbots --profile client-data-bots --profile modules up -d\n'
if [ "$NEEDS_CXX_REBUILD" = "1" ]; then
printf ' 🔨 First, build custom modules: ./build.sh\n'
printf ' 🚀 Then deploy your realm: ./deploy.sh\n'
else
printf ' 🚀 Quick deploy: ./deploy.sh\n'
printf ' 🔧 Manual: docker compose --profile db --profile services-standard --profile client-data --profile modules up -d\n'
fi
if [ "${CLI_DEPLOY_AFTER:-0}" = "1" ]; then

View File

@@ -98,7 +98,7 @@ read_env_value(){
handle_auto_rebuild(){
local storage_path
storage_path="$(read_env_value STORAGE_PATH "./storage")"
storage_path="$(read_env_value STORAGE_PATH_LOCAL "./local-storage")"
if [[ "$storage_path" != /* ]]; then
storage_path="$(dirname "$COMPOSE_FILE")/$storage_path"
fi