1 Commits

Author SHA1 Message Date
uprightbass360
c0aaf8ce96 chore: sync module manifest 2026-01-03 09:11:12 +00:00
37 changed files with 2140 additions and 4118 deletions

View File

@@ -1,321 +0,0 @@
# ================================================================================
# AzerothCore RealmMaster - Pre-Built Images Configuration
# ================================================================================
# Use this minimal configuration file to deploy pre-built RealmMaster images
# from Docker Hub. No local building required!
#
# Quick Start:
# 1. Copy this file: cp .env.prebuilt .env
# 2. Set your DOCKERHUB_USERNAME below
# 3. Run: ./deploy.sh
#
# The pre-built images include 32 modules from the RealmMaster profile:
# - Playerbots, Transmog, Solo LFG, Eluna, NPC Buffer, and 27 more
# - See config/module-profiles/RealmMaster.json for full list
# ================================================================================
# =====================
# REQUIRED: Docker Hub Configuration
# =====================
# Set this to your Docker Hub username where the images are published
DOCKERHUB_USERNAME=your-dockerhub-username
# =====================
# Project Configuration
# =====================
COMPOSE_PROJECT_NAME=azerothcore-realmmaster
# =====================
# Module Profile Selection
# =====================
# Choose which module profile build to use:
# - realmmaster: 32 modules (playerbots, transmog, solo-lfg, eluna, etc.) - ✅ Available now (Recommended)
#
# Additional profiles (available soon - will be built on demand):
# - suggested-modules: Alternative suggested module set
# - all-modules: All supported modules
# - playerbots-only: Just playerbots
#
# Note: Only 'realmmaster' images are currently published to Docker Hub.
# Other profiles will be available when built via GitHub Actions workflow.
MODULE_PROFILE=realmmaster
# =====================
# Pre-Built Images from Docker Hub
# =====================
# These images are built nightly with different module profiles
# Using profile-specific tags ensures you get the exact module set you want
AC_AUTHSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:authserver-${MODULE_PROFILE}-latest
AC_WORLDSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:worldserver-${MODULE_PROFILE}-latest
# Alternative: Use date-tagged images for version pinning
# AC_AUTHSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:authserver-${MODULE_PROFILE}-20260109
# AC_WORLDSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:worldserver-${MODULE_PROFILE}-20260109
# Alternative: Use generic latest tags (default: realmmaster profile)
# AC_AUTHSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:authserver-latest
# AC_WORLDSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:worldserver-latest
# Standard images (fallback if modules images not available)
AC_AUTHSERVER_IMAGE=acore/ac-wotlk-authserver:master
AC_WORLDSERVER_IMAGE=acore/ac-wotlk-worldserver:master
# Playerbots images (referenced by docker-compose, even if using modules profile)
AC_AUTHSERVER_IMAGE_PLAYERBOTS=${COMPOSE_PROJECT_NAME}:authserver-playerbots
AC_WORLDSERVER_IMAGE_PLAYERBOTS=${COMPOSE_PROJECT_NAME}:worldserver-playerbots
AC_CLIENT_DATA_IMAGE_PLAYERBOTS=${COMPOSE_PROJECT_NAME}:client-data-playerbots
# Database and client data images
AC_DB_IMPORT_IMAGE=acore/ac-wotlk-db-import:master
AC_CLIENT_DATA_IMAGE=acore/ac-wotlk-client-data:master
# Helper images
ALPINE_IMAGE=alpine:latest
MYSQL_IMAGE=mysql:8.0
# =====================
# Storage Paths
# =====================
STORAGE_PATH=./storage
STORAGE_PATH_LOCAL=./local-storage
STORAGE_CONFIG_PATH=${STORAGE_PATH}/config
STORAGE_LOGS_PATH=${STORAGE_PATH}/logs
STORAGE_MODULES_PATH=${STORAGE_PATH}/modules
STORAGE_LUA_SCRIPTS_PATH=${STORAGE_PATH}/lua_scripts
STORAGE_MODULE_SQL_PATH=${STORAGE_PATH}/module-sql-updates
STORAGE_INSTALL_MARKERS_PATH=${STORAGE_PATH}/install-markers
STORAGE_CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
BACKUP_PATH=${STORAGE_PATH}/backups
# =====================
# Timezone
# =====================
HOST_ZONEINFO_PATH=/usr/share/zoneinfo
TZ=UTC
# =====================
# Networking
# =====================
NETWORK_NAME=azerothcore
NETWORK_SUBNET=172.20.0.0/16
NETWORK_GATEWAY=172.20.0.1
# =====================
# Server Address & Realm
# =====================
# Change this to your server's public IP or domain name
SERVER_ADDRESS=127.0.0.1
REALM_PORT=8215
# =====================
# Ports
# =====================
# Authentication server
AUTH_EXTERNAL_PORT=3784
AUTH_PORT=3724
# World server
WORLD_EXTERNAL_PORT=8215
WORLD_PORT=8085
# SOAP/Remote access
SOAP_EXTERNAL_PORT=7778
SOAP_PORT=7878
# MySQL database (for external access)
MYSQL_EXTERNAL_PORT=64306
# phpMyAdmin web interface
PMA_EXTERNAL_PORT=8081
# Keira3 editor interface
KEIRA3_EXTERNAL_PORT=4201
# =====================
# MySQL Database Configuration
# =====================
MYSQL_IMAGE=mysql:8.0
CONTAINER_MYSQL=ac-mysql
MYSQL_HOST=ac-mysql
MYSQL_PORT=3306
# Security: Change these passwords!
MYSQL_ROOT_PASSWORD=azerothcore123
MYSQL_ROOT_HOST=%
MYSQL_USER=root
# Database names
DB_AUTH_NAME=acore_auth
DB_WORLD_NAME=acore_world
DB_CHARACTERS_NAME=acore_characters
DB_PLAYERBOTS_NAME=acore_playerbots
# Database performance settings
MYSQL_CHARACTER_SET=utf8mb4
MYSQL_COLLATION=utf8mb4_unicode_ci
MYSQL_MAX_CONNECTIONS=1000
MYSQL_INNODB_BUFFER_POOL_SIZE=256M
MYSQL_INNODB_LOG_FILE_SIZE=64M
MYSQL_INNODB_REDO_LOG_CAPACITY=512M
# MySQL tmpfs (RAM disk) for performance
MYSQL_RUNTIME_TMPFS_SIZE=8G
MYSQL_DISABLE_BINLOG=1
# Database connection settings
DB_WAIT_RETRIES=60
DB_WAIT_SLEEP=10
DB_RECONNECT_SECONDS=5
DB_RECONNECT_ATTEMPTS=5
# Database worker threads
DB_LOGIN_WORKER_THREADS=1
DB_WORLD_WORKER_THREADS=1
DB_CHARACTER_WORKER_THREADS=1
DB_LOGIN_SYNCH_THREADS=1
DB_WORLD_SYNCH_THREADS=1
DB_CHARACTER_SYNCH_THREADS=1
# =====================
# Automated Backups
# =====================
BACKUP_RETENTION_DAYS=3
BACKUP_RETENTION_HOURS=6
BACKUP_DAILY_TIME=09:00
BACKUP_INTERVAL_MINUTES=60
BACKUP_EXTRA_DATABASES=
BACKUP_HEALTHCHECK_MAX_MINUTES=1440
BACKUP_HEALTHCHECK_GRACE_SECONDS=4500
# =====================
# Module Configuration (Pre-Built)
# =====================
# These settings tell the system that modules are already built into the images
STACK_IMAGE_MODE=modules
STACK_SOURCE_VARIANT=playerbots
# Key modules enabled (needed for profile detection)
# The RealmMaster profile includes playerbots, so we need this set for deploy.sh to use the correct profile
MODULE_PLAYERBOTS=1
# Note: MODULES_ENABLED_LIST varies by profile - the list below is for the 'realmmaster' profile
# For other profiles, see the corresponding JSON in config/module-profiles/
MODULES_ENABLED_LIST=MODULE_PLAYERBOTS,MODULE_TRANSMOG,MODULE_SOLO_LFG,MODULE_ELUNA,MODULE_AIO,MODULE_NPC_BUFFER,MODULE_NPC_BEASTMASTER,MODULE_SOLOCRAFT,MODULE_1V1_ARENA,MODULE_ACCOUNT_ACHIEVEMENTS,MODULE_ACTIVE_CHAT,MODULE_ARAC,MODULE_ASSISTANT,MODULE_AUTO_REVIVE,MODULE_BLACK_MARKET_AUCTION_HOUSE,MODULE_BOSS_ANNOUNCER,MODULE_BREAKING_NEWS,MODULE_ELUNA_SCRIPTS,MODULE_EVENT_SCRIPTS,MODULE_FIREWORKS,MODULE_GAIN_HONOR_GUARD,MODULE_GLOBAL_CHAT,MODULE_GUILDHOUSE,MODULE_INSTANCE_RESET,MODULE_ITEM_LEVEL_UP,MODULE_LEARN_SPELLS,MODULE_MORPHSUMMON,MODULE_NPC_ENCHANTER,MODULE_NPC_FREE_PROFESSIONS,MODULE_RANDOM_ENCHANTS,MODULE_REAGENT_BANK,MODULE_TIME_IS_TIME
MODULES_CPP_LIST=
MODULES_REQUIRES_CUSTOM_BUILD=0
MODULES_REQUIRES_PLAYERBOT_SOURCE=1
# =====================
# Playerbot Runtime Configuration
# =====================
# Enable/disable playerbots and set population
PLAYERBOT_ENABLED=1
PLAYERBOT_MIN_BOTS=40
PLAYERBOT_MAX_BOTS=200
# =====================
# Client Data
# =====================
# Client data version (auto-detected when blank)
CLIENT_DATA_VERSION=
CLIENT_DATA_PATH=
# =====================
# Server Configuration Preset
# =====================
# Apply a configuration preset during deployment
# Options: none, blizzlike, fast-leveling, hardcore-pvp, casual-pve
SERVER_CONFIG_PRESET=none
# =====================
# Eluna Lua Scripting
# =====================
AC_ELUNA_ENABLED=1
AC_ELUNA_TRACE_BACK=1
AC_ELUNA_AUTO_RELOAD=1
AC_ELUNA_BYTECODE_CACHE=1
AC_ELUNA_SCRIPT_PATH=lua_scripts
AC_ELUNA_REQUIRE_PATHS=
AC_ELUNA_REQUIRE_CPATHS=
AC_ELUNA_AUTO_RELOAD_INTERVAL=1
# =====================
# Container Management
# =====================
CONTAINER_USER=0:0
CONTAINER_DB_IMPORT=ac-db-import
CONTAINER_DB_INIT=ac-db-init
CONTAINER_DB_GUARD=ac-db-guard
CONTAINER_BACKUP=ac-backup
CONTAINER_MODULES=ac-modules
CONTAINER_POST_INSTALL=ac-post-install
# =====================
# Database Guard
# =====================
DB_GUARD_RECHECK_SECONDS=120
DB_GUARD_RETRY_SECONDS=10
DB_GUARD_WAIT_ATTEMPTS=60
DB_GUARD_HEALTH_MAX_AGE=180
DB_GUARD_HEALTHCHECK_INTERVAL=30s
DB_GUARD_HEALTHCHECK_TIMEOUT=10s
DB_GUARD_HEALTHCHECK_RETRIES=5
DB_GUARD_VERIFY_INTERVAL_SECONDS=86400
# =====================
# Health Checks
# =====================
# MySQL health checks
MYSQL_HEALTHCHECK_INTERVAL=20s
MYSQL_HEALTHCHECK_TIMEOUT=15s
MYSQL_HEALTHCHECK_RETRIES=25
MYSQL_HEALTHCHECK_START_PERIOD=120s
# Auth server health checks
AUTH_HEALTHCHECK_INTERVAL=30s
AUTH_HEALTHCHECK_TIMEOUT=10s
AUTH_HEALTHCHECK_RETRIES=3
AUTH_HEALTHCHECK_START_PERIOD=60s
# World server health checks
WORLD_HEALTHCHECK_INTERVAL=30s
WORLD_HEALTHCHECK_TIMEOUT=10s
WORLD_HEALTHCHECK_RETRIES=3
WORLD_HEALTHCHECK_START_PERIOD=120s
# Backup health checks
BACKUP_HEALTHCHECK_INTERVAL=60s
BACKUP_HEALTHCHECK_TIMEOUT=30s
BACKUP_HEALTHCHECK_RETRIES=3
BACKUP_HEALTHCHECK_START_PERIOD=120s
# =====================
# Management Tools
# =====================
# phpMyAdmin configuration
PMA_HOST=ac-mysql
PMA_PORT=3306
PMA_USER=root
PMA_ARBITRARY=1
PMA_ABSOLUTE_URI=
PMA_UPLOAD_LIMIT=300M
PMA_MEMORY_LIMIT=512M
PMA_MAX_EXECUTION_TIME=600
# Keira3 configuration
KEIRA_DATABASE_HOST=ac-mysql
KEIRA_DATABASE_PORT=3306
# =====================
# Compose Overrides
# =====================
# Enable optional compose overrides (set to 1 to enable)
COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED=0
COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=0
# =====================
# DO NOT MODIFY BELOW (Build-related, not used with pre-built images)
# =====================
AUTO_REBUILD_ON_DEPLOY=0
DB_UPDATES_ALLOWED_MODULES=all
DB_UPDATES_REDUNDANCY=1

View File

@@ -155,7 +155,6 @@ MYSQL_MAX_CONNECTIONS=1000
MYSQL_INNODB_BUFFER_POOL_SIZE=256M
MYSQL_INNODB_LOG_FILE_SIZE=64M
MYSQL_INNODB_REDO_LOG_CAPACITY=512M
# MySQL runs on tmpfs (RAM) for performance, with sync to persistent storage on shutdown
MYSQL_RUNTIME_TMPFS_SIZE=8G
MYSQL_DISABLE_BINLOG=1
MYSQL_CONFIG_DIR=${STORAGE_CONFIG_PATH}/mysql/conf.d
@@ -559,5 +558,3 @@ MODULE_MOD_PYTHON_ENGINE=0
MODULE_WRATH_OF_THE_VANILLA_V2=0
MODULE_DUELS=0
MODULE_WOW_CORE=0
MODULE_CLANCENTAUR=0
MODULE_DELVES=0

View File

@@ -1,248 +0,0 @@
name: Build and Publish
# This workflow builds AzerothCore with configurable module profiles
# and publishes profile-tagged Docker images to Docker Hub for easy deployment.
#
# Default Profile: RealmMaster (32 modules including playerbots, transmog, solo-lfg, eluna, etc.)
# Available Profiles: RealmMaster, suggested-modules, all-modules, playerbots-only, or custom
# Profile Configuration: See config/module-profiles/
# Documentation: See docs/CICD.md
#
# Published Image Tags:
# - authserver-{profile}-latest (e.g., authserver-realmmaster-latest)
# - authserver-{profile}-YYYYMMDD (e.g., authserver-realmmaster-20260109)
# - authserver-latest (generic tag, defaults to RealmMaster)
# - worldserver-{profile}-latest
# - worldserver-{profile}-YYYYMMDD
# - worldserver-latest (generic tag, defaults to RealmMaster)
on:
schedule:
# Run nightly at 2 AM UTC
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
module_profile:
description: 'Module profile to build (e.g., RealmMaster, suggested-modules, all-modules)'
required: false
type: string
default: 'RealmMaster'
force_rebuild:
description: 'Force rebuild even if no changes detected'
required: false
type: boolean
default: false
jobs:
build:
runs-on: ubuntu-latest
timeout-minutes: 120
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Git
run: |
# Configure git for module repository cloning
git config --global user.name "GitHub Actions Bot"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git --version
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Prepare build environment
env:
TERM: xterm
run: |
# Determine which module profile to use
if [ "${{ github.event_name }}" = "schedule" ]; then
MODULE_PROFILE="RealmMaster"
else
MODULE_PROFILE="${{ github.event.inputs.module_profile }}"
MODULE_PROFILE="${MODULE_PROFILE:-RealmMaster}"
fi
echo "📋 Using module profile: ${MODULE_PROFILE}"
echo "🔧 Running setup.sh to generate proper .env file..."
# Use setup.sh to generate .env with proper configuration
# Benefits of this approach:
# - Uses the same setup logic as local builds (consistency)
# - Handles all path variables correctly (no manual sed patching needed)
# - Automatically determines source variant (standard vs playerbots)
# - Applies module profile and dependencies correctly
# - Centralizes configuration logic in one place (setup.sh)
./setup.sh \
--non-interactive \
--module-config "${MODULE_PROFILE}" \
--deployment-type local \
--force
echo "✅ Environment configuration generated successfully"
# Extract values for GitHub environment
PROJECT_NAME=$(grep '^COMPOSE_PROJECT_NAME=' .env | cut -d'=' -f2 | tr -d '\r' | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')
echo "PROJECT_NAME=${PROJECT_NAME}" >> $GITHUB_ENV
# Store profile name for image tagging (lowercase, replace underscores with hyphens)
PROFILE_TAG=$(echo "${MODULE_PROFILE}" | tr '[:upper:]' '[:lower:]' | tr '_' '-')
echo "PROFILE_TAG=${PROFILE_TAG}" >> $GITHUB_ENV
echo "MODULE_PROFILE=${MODULE_PROFILE}" >> $GITHUB_ENV
# Count enabled modules
MODULE_COUNT=$(grep -c '^MODULE_.*=1' .env || echo "0")
echo "MODULE_COUNT=${MODULE_COUNT}" >> $GITHUB_ENV
# Display configuration summary
echo ""
echo "📊 Build Configuration Summary:"
echo " Project: ${PROJECT_NAME}"
echo " Profile: ${MODULE_PROFILE}"
echo " Profile Tag: ${PROFILE_TAG}"
echo " Modules: ${MODULE_COUNT} enabled"
echo ""
echo "Enabled modules (first 10):"
grep '^MODULE_.*=1' .env | head -10 || true
echo ""
# Show key paths for verification
echo "📂 Key Paths:"
grep '^STORAGE_PATH_LOCAL=' .env || echo " STORAGE_PATH_LOCAL not found"
grep '^MODULES_REBUILD_SOURCE_PATH=' .env || echo " MODULES_REBUILD_SOURCE_PATH not found"
grep '^STACK_SOURCE_VARIANT=' .env || echo " STACK_SOURCE_VARIANT not found"
echo ""
# Verify all Docker images are configured
echo "🐳 Docker Images (that we build and push):"
grep -E '^AC_AUTHSERVER_IMAGE_PLAYERBOTS=' .env || echo " AC_AUTHSERVER_IMAGE_PLAYERBOTS not found"
grep -E '^AC_WORLDSERVER_IMAGE_PLAYERBOTS=' .env || echo " AC_WORLDSERVER_IMAGE_PLAYERBOTS not found"
grep -E '^AC_AUTHSERVER_IMAGE_MODULES=' .env || echo " AC_AUTHSERVER_IMAGE_MODULES not found"
grep -E '^AC_WORLDSERVER_IMAGE_MODULES=' .env || echo " AC_WORLDSERVER_IMAGE_MODULES not found"
- name: Cache Go build cache
uses: actions/cache@v4
with:
path: .gocache
key: ${{ runner.os }}-gocache-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-gocache-
- name: Cache local storage
uses: actions/cache@v4
with:
path: local-storage/source
key: ${{ runner.os }}-source-${{ github.sha }}
restore-keys: |
${{ runner.os }}-source-
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run build
run: |
# The build.sh script will automatically:
# 1. Generate module state from the enabled modules in .env
# 2. Set up the AzerothCore source repository
# 3. Fetch and clone all enabled module repositories from GitHub
# 4. Stage modules to the source directory
# 5. Compile AzerothCore with all modules
# 6. Tag the resulting Docker images
BUILD_ARGS="--yes"
# Add force flag if manually triggered with force_rebuild
if [ "${{ github.event.inputs.force_rebuild }}" = "true" ]; then
BUILD_ARGS="${BUILD_ARGS} --force"
fi
echo "🔨 Starting build process with ${BUILD_ARGS}..."
echo "This will fetch and build all ${MODULE_COUNT} enabled modules from the ${MODULE_PROFILE} profile"
./build.sh ${BUILD_ARGS}
- name: Tag images for Docker Hub
run: |
DATE_TAG=$(date +%Y%m%d)
# Tag authserver images with profile name
docker tag ${PROJECT_NAME}:authserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-latest
docker tag ${PROJECT_NAME}:authserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-${DATE_TAG}
# Also tag as generic 'latest' for backward compatibility
docker tag ${PROJECT_NAME}:authserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-latest
# Tag worldserver images with profile name
docker tag ${PROJECT_NAME}:worldserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-latest
docker tag ${PROJECT_NAME}:worldserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-${DATE_TAG}
# Also tag as generic 'latest' for backward compatibility
docker tag ${PROJECT_NAME}:worldserver-modules-latest \
${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-latest
echo "Tagged images with profile '${PROFILE_TAG}' and date '${DATE_TAG}'"
- name: Push images to Docker Hub
run: |
DATE_TAG=$(date +%Y%m%d)
# Push authserver images (all tags)
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-latest
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-${DATE_TAG}
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-latest
# Push worldserver images (all tags)
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-latest
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-${DATE_TAG}
docker push ${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-latest
echo "✅ Pushed all image tags to Docker Hub"
- name: Build summary
run: |
DATE_TAG=$(date +%Y%m%d)
echo "## Build Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ Build completed successfully" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Configuration" >> $GITHUB_STEP_SUMMARY
echo "- **Module Profile**: ${MODULE_PROFILE}" >> $GITHUB_STEP_SUMMARY
echo "- **Enabled Modules**: ${MODULE_COUNT}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "<details>" >> $GITHUB_STEP_SUMMARY
echo "<summary>View enabled modules</summary>" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
grep '^MODULE_.*=1' .env | sed 's/=1//' >> $GITHUB_STEP_SUMMARY || true
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "</details>" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Published Images" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "#### Profile-Specific Tags" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-latest\`" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-${PROFILE_TAG}-${DATE_TAG}\`" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-latest\`" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-${PROFILE_TAG}-${DATE_TAG}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "#### Generic Tags (backward compatibility)" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:authserver-latest\`" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ secrets.DOCKERHUB_USERNAME }}/${PROJECT_NAME}:worldserver-latest\`" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,246 +0,0 @@
name: Create Release
on:
workflow_dispatch:
inputs:
version:
description: 'Release version (e.g., v1.0.0)'
required: true
type: string
profile:
description: 'Module profile for this release'
required: false
type: string
default: 'RealmMaster'
prerelease:
description: 'Mark as pre-release'
required: false
type: boolean
default: false
jobs:
create-release:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Prepare release variables
run: |
VERSION="${{ github.event.inputs.version }}"
PROFILE="${{ github.event.inputs.profile }}"
PROFILE_TAG=$(echo "${PROFILE}" | tr '[:upper:]' '[:lower:]' | tr '_' '-')
echo "VERSION=${VERSION}" >> $GITHUB_ENV
echo "PROFILE=${PROFILE}" >> $GITHUB_ENV
echo "PROFILE_TAG=${PROFILE_TAG}" >> $GITHUB_ENV
# Get build date from Docker Hub image (or use current date)
BUILD_DATE=$(date +%Y%m%d)
echo "BUILD_DATE=${BUILD_DATE}" >> $GITHUB_ENV
# Get AzerothCore commit from local-storage if available
if [ -d "local-storage/source/azerothcore-playerbots" ]; then
ACORE_COMMIT=$(cd local-storage/source/azerothcore-playerbots && git rev-parse --short HEAD)
else
ACORE_COMMIT="unknown"
fi
echo "ACORE_COMMIT=${ACORE_COMMIT}" >> $GITHUB_ENV
- name: Read module list from profile
run: |
PROFILE_FILE="config/module-profiles/${PROFILE}.json"
if [ ! -f "$PROFILE_FILE" ]; then
echo "ERROR: Profile file not found: $PROFILE_FILE"
exit 1
fi
# Extract module count
MODULE_COUNT=$(python3 -c "import json; data=json.load(open('$PROFILE_FILE')); print(len(data.get('modules', [])))")
echo "MODULE_COUNT=${MODULE_COUNT}" >> $GITHUB_ENV
# Extract modules for release notes
python3 -c "import json; data=json.load(open('$PROFILE_FILE')); print('\n'.join(['- ' + m for m in data.get('modules', [])]))" > modules.txt
- name: Create deployment package
run: |
PACKAGE_NAME="azerothcore-realmmaster-${VERSION}-${PROFILE_TAG}"
mkdir -p "${PACKAGE_NAME}"
# Copy essential deployment files
cp .env.prebuilt "${PACKAGE_NAME}/.env.prebuilt"
cp docker-compose.yml "${PACKAGE_NAME}/docker-compose.yml"
cp deploy.sh "${PACKAGE_NAME}/deploy.sh"
cp status.sh "${PACKAGE_NAME}/status.sh"
cp cleanup.sh "${PACKAGE_NAME}/cleanup.sh"
cp README.md "${PACKAGE_NAME}/README.md"
# Copy scripts directory
cp -r scripts "${PACKAGE_NAME}/scripts"
# Copy config directory
cp -r config "${PACKAGE_NAME}/config"
# Copy docs directory
cp -r docs "${PACKAGE_NAME}/docs"
# Create a quick start guide specific to this release
{
echo "# Quick Start - AzerothCore RealmMaster ${VERSION}"
echo ""
echo "## Module Profile: ${PROFILE}"
echo "${MODULE_COUNT} modules included"
echo ""
echo "## Docker Images"
echo "This release uses the following pre-built images:"
echo "- \`\${DOCKERHUB_USERNAME}/azerothcore-realmmaster:authserver-${PROFILE_TAG}-${BUILD_DATE}\`"
echo "- \`\${DOCKERHUB_USERNAME}/azerothcore-realmmaster:worldserver-${PROFILE_TAG}-${BUILD_DATE}\`"
echo ""
echo "Or use the latest tags:"
echo "- \`\${DOCKERHUB_USERNAME}/azerothcore-realmmaster:authserver-${PROFILE_TAG}-latest\`"
echo "- \`\${DOCKERHUB_USERNAME}/azerothcore-realmmaster:worldserver-${PROFILE_TAG}-latest\`"
echo ""
echo "## Installation"
echo ""
echo "1. **Edit .env.prebuilt**:"
echo " \`\`\`bash"
echo " nano .env.prebuilt"
echo " # Set: DOCKERHUB_USERNAME=your-dockerhub-username"
echo " \`\`\`"
echo ""
echo "2. **Rename to .env**:"
echo " \`\`\`bash"
echo " mv .env.prebuilt .env"
echo " \`\`\`"
echo ""
echo "3. **Deploy**:"
echo " \`\`\`bash"
echo " chmod +x deploy.sh status.sh cleanup.sh"
echo " ./deploy.sh"
echo " \`\`\`"
echo ""
echo "4. **Check status**:"
echo " \`\`\`bash"
echo " ./status.sh"
echo " \`\`\`"
echo ""
echo "## Documentation"
echo "- [Pre-Built Images Guide](docs/PREBUILT_IMAGES.md)"
echo "- [Getting Started](docs/GETTING_STARTED.md)"
echo "- [Troubleshooting](docs/TROUBLESHOOTING.md)"
echo ""
echo "## Support"
echo "- GitHub Issues: https://github.com/uprightbass360/AzerothCore-RealmMaster/issues"
echo "- AzerothCore Discord: https://discord.gg/gkt4y2x"
} > "${PACKAGE_NAME}/QUICKSTART.md"
# Make scripts executable
chmod +x "${PACKAGE_NAME}/deploy.sh"
chmod +x "${PACKAGE_NAME}/status.sh"
chmod +x "${PACKAGE_NAME}/cleanup.sh"
# Create zip archive
zip -r "${PACKAGE_NAME}.zip" "${PACKAGE_NAME}"
echo "PACKAGE_NAME=${PACKAGE_NAME}" >> $GITHUB_ENV
- name: Generate release notes
run: |
{
echo "# AzerothCore RealmMaster ${VERSION} - ${PROFILE} Profile"
echo ""
echo "## 🎯 Module Profile: ${PROFILE}"
echo "${MODULE_COUNT} modules included"
echo ""
echo "## 📦 Docker Images"
echo ""
echo "Pull these pre-built images from Docker Hub:"
echo ""
echo "**Date-specific (recommended for production)**:"
echo "\`\`\`bash"
echo "docker pull \${DOCKERHUB_USERNAME}/azerothcore-realmmaster:authserver-${PROFILE_TAG}-${BUILD_DATE}"
echo "docker pull \${DOCKERHUB_USERNAME}/azerothcore-realmmaster:worldserver-${PROFILE_TAG}-${BUILD_DATE}"
echo "\`\`\`"
echo ""
echo "**Latest (auto-updated nightly)**:"
echo "\`\`\`bash"
echo "docker pull \${DOCKERHUB_USERNAME}/azerothcore-realmmaster:authserver-${PROFILE_TAG}-latest"
echo "docker pull \${DOCKERHUB_USERNAME}/azerothcore-realmmaster:worldserver-${PROFILE_TAG}-latest"
echo "\`\`\`"
echo ""
echo "## 🚀 Quick Start"
echo ""
echo "\`\`\`bash"
echo "# Download and extract"
echo "wget https://github.com/uprightbass360/AzerothCore-RealmMaster/releases/download/${VERSION}/${PACKAGE_NAME}.zip"
echo "unzip ${PACKAGE_NAME}.zip"
echo "cd ${PACKAGE_NAME}"
echo ""
echo "# Configure Docker Hub username"
echo "nano .env.prebuilt"
echo "# Set: DOCKERHUB_USERNAME=your-dockerhub-username"
echo ""
echo "# Deploy"
echo "mv .env.prebuilt .env"
echo "./deploy.sh"
echo "\`\`\`"
echo ""
echo "Full documentation in \`docs/PREBUILT_IMAGES.md\`"
echo ""
echo "## 📋 Included Modules"
echo ""
cat modules.txt
echo ""
echo "## 📊 Build Information"
echo ""
echo "- **Built**: ${BUILD_DATE}"
echo "- **AzerothCore Commit**: ${ACORE_COMMIT}"
echo "- **Source Variant**: playerbots (for MODULE_PLAYERBOTS support)"
echo "- **Profile**: ${PROFILE}"
echo "- **Module Count**: ${MODULE_COUNT}"
echo ""
echo "## 📖 Documentation"
echo ""
echo "Full documentation available in the \`docs/\` directory of the release package:"
echo "- [Pre-Built Images Guide](https://github.com/uprightbass360/AzerothCore-RealmMaster/blob/${VERSION}/docs/PREBUILT_IMAGES.md)"
echo "- [Getting Started Guide](https://github.com/uprightbass360/AzerothCore-RealmMaster/blob/${VERSION}/docs/GETTING_STARTED.md)"
echo "- [Module Catalog](https://github.com/uprightbass360/AzerothCore-RealmMaster/blob/${VERSION}/docs/MODULES.md)"
echo "- [Troubleshooting](https://github.com/uprightbass360/AzerothCore-RealmMaster/blob/${VERSION}/docs/TROUBLESHOOTING.md)"
echo ""
echo "## 🐛 Known Issues"
echo ""
echo "None at this time. Report issues at: https://github.com/uprightbass360/AzerothCore-RealmMaster/issues"
echo ""
echo "## 💬 Support"
echo ""
echo "- **GitHub Issues**: https://github.com/uprightbass360/AzerothCore-RealmMaster/issues"
echo "- **AzerothCore Discord**: https://discord.gg/gkt4y2x"
echo "- **Documentation**: https://github.com/uprightbass360/AzerothCore-RealmMaster/tree/${VERSION}/docs"
} > release_notes.md
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ env.VERSION }}
name: "RealmMaster ${{ env.VERSION }} - ${{ env.PROFILE }} Profile"
body_path: release_notes.md
files: |
${{ env.PACKAGE_NAME }}.zip
prerelease: ${{ github.event.inputs.prerelease }}
draft: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Release summary
run: |
echo "## Release Created Successfully! 🎉" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Version**: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "**Profile**: ${{ env.PROFILE }}" >> $GITHUB_STEP_SUMMARY
echo "**Modules**: ${{ env.MODULE_COUNT }}" >> $GITHUB_STEP_SUMMARY
echo "**Package**: ${{ env.PACKAGE_NAME }}.zip" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "📦 Release available at:" >> $GITHUB_STEP_SUMMARY
echo "https://github.com/${{ github.repository }}/releases/tag/${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -11,13 +11,11 @@ A complete containerized deployment of AzerothCore WoW 3.3.5a (Wrath of the Lich
- [Quick Start](#quick-start)
- [What You Get](#what-you-get)
- [Getting Started](#getting-started) → **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**
- [Using Pre-Built Images](#using-pre-built-images-no-build-required) → **[docs/PREBUILT_IMAGES.md](docs/PREBUILT_IMAGES.md)**
- [Complete Module Catalog](#complete-module-catalog) → **[docs/MODULES.md](docs/MODULES.md)**
- [Management & Operations](#management--operations) → **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**
- [Advanced Configuration](#advanced-configuration) → **[docs/ADVANCED.md](docs/ADVANCED.md)**
- [Custom NPCs Guide](#custom-npcs-guide) → **[docs/NPCS.md](docs/NPCS.md)**
- [Script Reference](#script-reference) → **[docs/SCRIPTS.md](docs/SCRIPTS.md)**
- [CI/CD & Pre-Built Images](#cicd--pre-built-images) → **[docs/CICD.md](docs/CICD.md)**
- [Troubleshooting](#troubleshooting) → **[docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)**
- [Credits & Next Steps](#credits--next-steps)
@@ -28,7 +26,7 @@ A complete containerized deployment of AzerothCore WoW 3.3.5a (Wrath of the Lich
### Reccomendations
- **Docker** with Docker Compose 2
- **16GB+ RAM** and **64GB+ storage**
- **Linux/macOS/WSL2** Fully tested with Ubuntu 24.04 - Debian 12 might work but permissions can require manual intervention
- **Linux/macOS/WSL2** Fully tested with Ubuntu 24.04 and Debian 12
### Three Simple Steps
@@ -47,25 +45,6 @@ cd AzerothCore-RealmMaster
**First deployment takes 30-60 minutes** for database setup and client data download. Subsequent starts are much faster.
### Using Pre-Built Images (No Build Required!)
Skip the build process and deploy with pre-built Docker images:
```bash
# 1. Clone the repository
git clone https://github.com/uprightbass360/AzerothCore-RealmMaster.git
cd AzerothCore-RealmMaster
# 2. Use pre-built configuration
cp .env.prebuilt .env
# 3. Edit .env and set DOCKERHUB_USERNAME
# 4. Deploy
./deploy.sh
```
Pre-built images include the **RealmMaster profile** (32 modules) and are automatically built nightly. See **[docs/PREBUILT_IMAGES.md](docs/PREBUILT_IMAGES.md)** for details.
See [Getting Started](#getting-started) for detailed walkthrough.
## What You Get

View File

@@ -8,10 +8,8 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_PATH="$ROOT_DIR/.env"
DEFAULT_ENV_PATH="$ENV_PATH"
TEMPLATE_PATH="$ROOT_DIR/.env.template"
source "$ROOT_DIR/scripts/bash/project_name.sh"
source "$ROOT_DIR/scripts/bash/lib/common.sh"
# Default project name (read from .env or template)
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_PATH" "$TEMPLATE_PATH")"
@@ -19,6 +17,11 @@ ASSUME_YES=0
FORCE_REBUILD=0
SKIP_SOURCE_SETUP=0
CUSTOM_SOURCE_PATH=""
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; }
show_build_header(){
printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}"
@@ -67,9 +70,39 @@ while [[ $# -gt 0 ]]; do
esac
done
require_cmd(){
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
}
require_cmd docker
require_cmd python3
read_env(){
local key="$1" default="${2:-}"
local value=""
if [ -f "$ENV_PATH" ]; then
value="$(grep -E "^${key}=" "$ENV_PATH" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r' | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
fi
if [ -z "$value" ]; then
value="$default"
fi
echo "$value"
}
update_env_value(){
local key="$1" value="$2" env_file="$ENV_PATH"
[ -n "$env_file" ] || return 0
if [ ! -f "$env_file" ]; then
printf '%s=%s\n' "$key" "$value" >> "$env_file"
return 0
fi
if grep -q "^${key}=" "$env_file"; then
sed -i "s|^${key}=.*|${key}=${value}|" "$env_file"
else
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
fi
}
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
MODULE_STATE_INITIALIZED=0
declare -a MODULES_COMPILE_LIST=()

View File

@@ -5517,34 +5517,6 @@
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_DELVES",
"name": "Delves",
"repo": "https://github.com/araxiaonline/Delves.git",
"description": "List of the Custom Made Single Player Delves for Araxia Online",
"type": "lua",
"category": "scripting",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
},
{
"key": "MODULE_CLANCENTAUR",
"name": "ClanCentaur",
"repo": "https://github.com/araxiaonline/ClanCentaur.git",
"description": "Custom SQL modifications and patch notes for new faction rewards, reputation items, and unique vendors on the Araxia WoW 3.3.5a server.",
"type": "sql",
"category": "database",
"notes": "Discovered via GitHub topic 'azerothcore-module'",
"status": "active",
"order": 5000,
"requires": [],
"post_install_hooks": [],
"config_cleanup": []
}
]
}

View File

@@ -11,10 +11,8 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
ENV_PATH="$ROOT_DIR/.env"
DEFAULT_ENV_PATH="$ENV_PATH"
TEMPLATE_PATH="$ROOT_DIR/.env.template"
source "$ROOT_DIR/scripts/bash/project_name.sh"
source "$ROOT_DIR/scripts/bash/lib/common.sh"
# Default project name (read from .env or template)
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_PATH" "$TEMPLATE_PATH")"
@@ -48,6 +46,12 @@ MODULE_STATE_INITIALIZED=0
declare -a MODULES_COMPILE_LIST=()
declare -a COMPOSE_FILE_ARGS=()
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; }
show_deployment_header(){
printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}"
printf '%b\n' "${BLUE}═══════════════════════════════════════${NC}"
@@ -307,6 +311,10 @@ if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ] && [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1
exit 1
fi
require_cmd(){
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
}
require_cmd docker
require_cmd python3
@@ -332,6 +340,18 @@ if [ "$REMOTE_MODE" -eq 1 ]; then
fi
fi
read_env(){
local key="$1" default="${2:-}"
local value=""
if [ -f "$ENV_PATH" ]; then
value="$(grep -E "^${key}=" "$ENV_PATH" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then
value="$default"
fi
echo "$value"
}
init_compose_files(){
compose_overrides::build_compose_args "$ROOT_DIR" "$ENV_PATH" "$DEFAULT_COMPOSE_FILE" COMPOSE_FILE_ARGS
}

View File

@@ -26,7 +26,6 @@ services:
MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE}
MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE}
MYSQL_BINLOG_EXPIRE_LOGS_SECONDS: 86400
MYSQL_DISABLE_BINLOG: ${MYSQL_DISABLE_BINLOG}
TZ: "${TZ}"
entrypoint:
- /usr/local/bin/mysql-entrypoint.sh
@@ -51,7 +50,6 @@ services:
- --expire_logs_days=0
- --binlog_expire_logs_seconds=86400
- --binlog_expire_logs_auto_purge=ON
stop_grace_period: 2m
restart: unless-stopped
logging: *logging-default
healthcheck:

View File

@@ -1,321 +0,0 @@
# CI/CD Documentation
This document describes the continuous integration and deployment workflows configured for the AzerothCore RealmMaster project.
## Build and Publish Workflow
The `build-and-publish.yml` workflow automatically builds AzerothCore with your configured modules and publishes Docker images to Docker Hub.
### Trigger Schedule
- **Nightly builds**: Runs automatically at 2 AM UTC every day
- **Manual trigger**: Can be triggered manually via GitHub Actions UI with optional force rebuild
### What It Does
1. **Checks out the repository** - Gets the RealmMaster project code
2. **Sets up Git** - Configures git for module repository cloning
3. **Sets up Docker Buildx** - Enables optimized Docker builds
4. **Logs in to Docker Hub** - Authenticates for image publishing
5. **Prepares the build environment**:
- Runs `./setup.sh --non-interactive --module-config RealmMaster --force`
- Uses the same setup process as local builds (ensures consistency)
- Applies the **RealmMaster module profile** from `config/module-profiles/RealmMaster.json`
- Creates `.env` with proper paths and configured modules (32 modules)
- Automatically selects correct source variant (standard or playerbots)
6. **Caches build artifacts** to speed up subsequent builds:
- Go build cache (`.gocache`)
- Source repository (`local-storage/source`)
7. **Sets up Python 3.11** - Required for module management scripts
8. **Runs `./build.sh --yes`** - This is where the magic happens:
- **Step 1**: Sets up the AzerothCore source repository
- **Step 2**: Detects build requirements
- **Step 3**: Syncs module metadata
- **Step 4**: **Fetches all module repositories** - Automatically clones all 32 enabled module repos from GitHub
- **Step 5**: **Compiles AzerothCore** with all fetched modules integrated
- **Step 6**: Tags the compiled images
9. **Tags images for Docker Hub** - Prepares `latest` and date-based tags
10. **Pushes images to Docker Hub** - Publishes the built images
11. **Generates a build summary** - Shows enabled modules and published images
### Module Fetching Process
The workflow **automatically fetches all module repositories** during the build. Here's how it works:
- The `build.sh` script reads the enabled modules from `.env` (set by the RealmMaster profile)
- For each enabled module, it clones the repository from GitHub (all modules are public repos)
- Module repositories are cloned into the AzerothCore source tree under `modules/`
- Examples of fetched repositories:
- `mod-playerbots` from https://github.com/mod-playerbots/mod-playerbots.git
- `mod-transmog` from https://github.com/azerothcore/mod-transmog.git
- `mod-solo-lfg` from https://github.com/azerothcore/mod-solo-lfg.git
- ...and 29 more
**No manual module setup required!** The build process handles everything automatically.
### Published Images
The workflow publishes images with **profile-specific tags** so you know exactly which modules are included:
**Profile-Tagged Images** (recommended):
- `<dockerhub-username>/azerothcore-realmmaster:authserver-realmmaster-latest` ✅ Built nightly
- `<dockerhub-username>/azerothcore-realmmaster:authserver-realmmaster-YYYYMMDD` ✅ Built nightly
- `<dockerhub-username>/azerothcore-realmmaster:worldserver-realmmaster-latest` ✅ Built nightly
- `<dockerhub-username>/azerothcore-realmmaster:worldserver-realmmaster-YYYYMMDD` ✅ Built nightly
**Generic Tags** (backward compatibility, defaults to RealmMaster profile):
- `<dockerhub-username>/azerothcore-realmmaster:authserver-latest` ✅ Built nightly
- `<dockerhub-username>/azerothcore-realmmaster:worldserver-latest` ✅ Built nightly
**Other Profile Tags** (built on-demand via manual workflow trigger):
- `authserver-suggested-modules-latest` - Available when built
- `authserver-all-modules-latest` - Available when built
- `authserver-playerbots-only-latest` - Available when built
**Note**: Only the RealmMaster profile is built automatically on schedule. Other profiles can be built by manually triggering the workflow with different profile names.
## Required GitHub Secrets
To enable the build and publish workflow, you must configure the following secrets in your GitHub repository:
### Setting Up Secrets
1. Go to your GitHub repository
2. Click **Settings****Secrets and variables****Actions**
3. Click **New repository secret**
4. Add the following secrets:
#### DOCKERHUB_USERNAME
Your Docker Hub username.
**Example**: `yourusername`
#### DOCKERHUB_TOKEN
A Docker Hub access token (recommended) or your Docker Hub password.
**How to create a Docker Hub access token**:
1. Log in to [Docker Hub](https://hub.docker.com/)
2. Click on your username in the top right → **Account Settings**
3. Go to **Security****Personal Access Tokens****Generate New Token**
4. Give it a description (e.g., "GitHub Actions")
5. Set permissions: **Read & Write**
6. Click **Generate**
7. Copy the token (you won't be able to see it again)
8. Add this token as the `DOCKERHUB_TOKEN` secret in GitHub
## Module Configuration
### Default Profile: RealmMaster
The workflow uses the **RealmMaster** module profile by default, which includes 32 carefully selected modules:
- MODULE_PLAYERBOTS - AI-controlled player characters
- MODULE_TRANSMOG - Transmogrification system
- MODULE_SOLO_LFG - Solo dungeon finder
- MODULE_NPC_BUFFER - Buff NPC
- MODULE_ELUNA - Lua scripting engine
- MODULE_AIO - All-in-one interface
- ...and 26 more modules
See the full list in `config/module-profiles/RealmMaster.json`.
### Customizing the Module Profile
To use a different module profile in the CI/CD workflow:
1. **Choose or create a profile** in `config/module-profiles/`:
- `RealmMaster.json` - Default (32 modules)
- `suggested-modules.json` - Alternative suggested set
- `playerbots-only.json` - Just playerbots
- `all-modules.json` - All supported modules
- Create your own JSON file
2. **Edit the workflow** at `.github/workflows/build-and-publish.yml`:
```yaml
# Change this line in the "Prepare build environment" step:
python3 scripts/python/apply_module_profile.py RealmMaster \
# To use a different profile:
python3 scripts/python/apply_module_profile.py suggested-modules \
```
3. **Update the build summary** (optional):
```yaml
# Change this line in the "Build summary" step:
echo "- **Module Profile**: RealmMaster" >> $GITHUB_STEP_SUMMARY
# To:
echo "- **Module Profile**: suggested-modules" >> $GITHUB_STEP_SUMMARY
```
### Testing Module Profiles Locally
You can test the module profile script locally before committing:
```bash
# List modules that will be enabled
python3 scripts/python/apply_module_profile.py RealmMaster --list-modules
# Apply a profile to create .env
python3 scripts/python/apply_module_profile.py RealmMaster
# Verify the result
grep '^MODULE_.*=1' .env | wc -l
```
## Cache Strategy
The workflow uses GitHub Actions cache to speed up builds:
- **Go build cache**: Cached in `.gocache` directory
- **Source repository**: Cached in `local-storage/source` directory
This significantly reduces build times for subsequent runs.
## Manual Workflow Trigger
To manually trigger the workflow:
1. Go to **Actions** tab in your GitHub repository
2. Click on **Build and Publish** workflow
3. Click **Run workflow**
4. **Choose module profile** (default: RealmMaster):
- Enter profile name (e.g., `RealmMaster`, `suggested-modules`, `all-modules`, `playerbots-only`)
- Profile must exist in `config/module-profiles/`
5. Optionally check **Force rebuild** to rebuild even if no changes detected
6. Click **Run workflow**
The workflow will build with the selected profile and tag images accordingly (e.g., `authserver-realmmaster-latest` for RealmMaster profile).
## Troubleshooting
### Build fails with "missing required command"
The workflow runs on Ubuntu and has Docker and Python 3.11 pre-installed. If you see missing command errors, ensure the build script dependencies are available.
### Authentication errors
If you see Docker Hub authentication errors:
- Verify `DOCKERHUB_USERNAME` and `DOCKERHUB_TOKEN` secrets are set correctly
- Ensure the Docker Hub token has **Read & Write** permissions
- Check that the token hasn't expired
### Build timeout
The workflow has a 120-minute timeout. If builds consistently exceed this:
- Consider optimizing the build process
- Check if all module sources are accessible
- Review cache effectiveness
## Using Pre-Built Images
After images are published to Docker Hub, users can deploy RealmMaster **without building locally**!
### For End Users
See the complete guide at **[docs/PREBUILT_IMAGES.md](PREBUILT_IMAGES.md)** for step-by-step instructions.
**Quick start for users**:
```bash
# Clone the repository
git clone https://github.com/uprightbass360/AzerothCore-RealmMaster.git
cd AzerothCore-RealmMaster
# Use pre-built configuration
cp .env.prebuilt .env
# Edit .env and set DOCKERHUB_USERNAME=your-dockerhub-username
# Deploy (no build required!)
./deploy.sh
```
### For Developers
To test the published images:
```bash
# Pull latest RealmMaster profile images
docker pull <dockerhub-username>/azerothcore-realmmaster:authserver-realmmaster-latest
docker pull <dockerhub-username>/azerothcore-realmmaster:worldserver-realmmaster-latest
# Or pull specific date-tagged images
docker pull <dockerhub-username>/azerothcore-realmmaster:authserver-realmmaster-20260109
docker pull <dockerhub-username>/azerothcore-realmmaster:worldserver-realmmaster-20260109
# Or use generic latest tags (defaults to RealmMaster profile)
docker pull <dockerhub-username>/azerothcore-realmmaster:authserver-latest
docker pull <dockerhub-username>/azerothcore-realmmaster:worldserver-latest
```
### Pre-Built Configuration File
The `.env.prebuilt` template provides a minimal configuration that:
- References Docker Hub images instead of local builds
- Removes all build-related variables
- Includes only runtime configuration
- Is ready to use with minimal editing (just set DOCKERHUB_USERNAME)
**Benefits of pre-built images**:
- ✅ Skip 15-45 minute build time
- ✅ No build dependencies required
- ✅ Same 32 RealmMaster modules included
- ✅ Automatic nightly updates available
- ✅ Date-tagged versions for stability
- ✅ Profile-tagged images for clear identification
## Building Multiple Profiles
You can build different module profiles by manually triggering the workflow:
### Example: Build All Modules Profile
1. Go to **Actions** → **Build and Publish**
2. Click **Run workflow**
3. Set **module_profile** to `all-modules`
4. Click **Run workflow**
This will create:
- `authserver-all-modules-latest`
- `authserver-all-modules-YYYYMMDD`
- `worldserver-all-modules-latest`
- `worldserver-all-modules-YYYYMMDD`
### Creating Custom Profile Builds
To build a custom profile:
1. **Create profile JSON** in `config/module-profiles/my-custom-profile.json`:
```json
{
"modules": [
"MODULE_PLAYERBOTS",
"MODULE_TRANSMOG",
"MODULE_SOLO_LFG"
],
"label": "My Custom Profile",
"description": "Custom module selection",
"order": 100
}
```
2. **Trigger workflow** with profile name `my-custom-profile`
3. **Images created**:
- `authserver-my-custom-profile-latest`
- `worldserver-my-custom-profile-latest`
### Scheduled Builds
The nightly scheduled build always uses the **RealmMaster** profile. To schedule builds for different profiles, you can:
1. Create additional workflow files (e.g., `.github/workflows/build-all-modules.yml`)
2. Set different cron schedules
3. Hardcode the profile name in the workflow

View File

@@ -187,8 +187,6 @@ Because MySQL stores its hot data in a tmpfs (`/var/lib/mysql-runtime`) while pe
- If **any tables exist**, the script logs `Backup restoration completed successfully` and skips the expensive restore just as before.
- If **no tables are found or the query fails**, the script logs `Restoration marker found, but databases are empty - forcing re-import`, automatically clears the stale marker, and reruns the backup restore + `dbimport` pipeline so services always start with real data.
On graceful shutdown, the MySQL container now syncs the tmpfs datadir back into `/var/lib/mysql-persistent` so a normal restart keeps the latest state. Unclean shutdowns (host reboot, OOM kill) can still lose recent changes, so the backup restore path remains the safety net.
To complement that one-shot safety net, the long-running `ac-db-guard` service now watches the runtime tmpfs. It polls MySQL, and if it ever finds those schemas empty (the usual symptom after a daemon restart), it automatically reruns `db-import-conditional.sh` to rehydrate from the most recent backup before marking itself healthy. All auth/world services now depend on `ac-db-guard`'s health check, guaranteeing that AzerothCore never boots without real tables in memory. The guard also mounts the working SQL tree from `local-storage/source/azerothcore-playerbots/data/sql` into the db containers so that every `dbimport` run uses the exact SQL that matches your checked-out source, even if the Docker image was built earlier.
Because new features sometimes require schema changes even when the databases already contain data, `ac-db-guard` now performs a `dbimport` verification sweep (configurable via `DB_GUARD_VERIFY_INTERVAL_SECONDS`) to proactively apply any outstanding updates from the mounted SQL tree. By default it runs once per bootstrap and then every 24 hours, so the auth/world servers always see the columns/tables expected by their binaries without anyone having to run host scripts manually.

View File

@@ -1,336 +0,0 @@
# Deploying Pre-Built RealmMaster Images
This guide explains how to deploy AzerothCore RealmMaster using pre-built Docker images from Docker Hub. **No local building required!**
## What's Included in Pre-Built Images
The pre-built images are automatically built nightly with the **RealmMaster module profile**, which includes **32 carefully selected modules**:
- **MODULE_PLAYERBOTS** - AI-controlled player characters
- **MODULE_TRANSMOG** - Transmogrification system
- **MODULE_SOLO_LFG** - Solo dungeon finder
- **MODULE_ELUNA** - Lua scripting engine
- **MODULE_AIO** - All-in-one interface
- **MODULE_NPC_BUFFER** - Buff NPC
- **MODULE_NPC_BEASTMASTER** - Pet management
- **MODULE_SOLOCRAFT** - Solo dungeon scaling
- **MODULE_1V1_ARENA** - 1v1 arena system
- **MODULE_ACCOUNT_ACHIEVEMENTS** - Account-wide achievements
- ...and 22 more modules!
See `config/module-profiles/RealmMaster.json` for the complete list.
## Prerequisites
- Docker with Docker Compose v2
- 16GB+ RAM
- 64GB+ storage
- Linux/macOS/WSL2
## Quick Start
### 1. Clone the Repository
```bash
git clone https://github.com/uprightbass360/AzerothCore-RealmMaster.git
cd AzerothCore-RealmMaster
```
### 2. Create Configuration File
```bash
# Copy the pre-built images template
cp .env.prebuilt .env
```
### 3. Configure Docker Hub Username
Edit `.env` and set your Docker Hub username:
```bash
# Change this line to your Docker Hub username:
DOCKERHUB_USERNAME=your-dockerhub-username
```
### 4. Optional: Customize Settings
Edit `.env` to customize:
- **Server address**: `SERVER_ADDRESS=your-server-ip`
- **Passwords**: `MYSQL_ROOT_PASSWORD=your-password`
- **Playerbot population**: `PLAYERBOT_MIN_BOTS` and `PLAYERBOT_MAX_BOTS`
- **Server preset**: `SERVER_CONFIG_PRESET=fast-leveling` (or blizzlike, hardcore-pvp, casual-pve)
### 5. Deploy
```bash
./deploy.sh
```
The deployment will:
- Pull pre-built images from Docker Hub
- Set up MySQL database with all module SQL
- Configure client data
- Start all services
**First deployment takes 30-60 minutes** for database setup and client data download.
## Image Tags
The CI/CD workflow publishes images with **profile-specific tags** so you know exactly which modules are included:
### Profile-Tagged Images (Recommended)
Each module profile gets its own tag:
- **`:authserver-realmmaster-latest`** - RealmMaster profile (32 modules)
- **`:worldserver-realmmaster-latest`** - RealmMaster profile (32 modules)
- **`:authserver-realmmaster-YYYYMMDD`** - Date-tagged RealmMaster builds
- **`:worldserver-realmmaster-YYYYMMDD`** - Date-tagged RealmMaster builds
Other profiles (available when built via GitHub Actions):
- **`:authserver-suggested-modules-latest`** - Suggested modules profile (not yet published)
- **`:authserver-all-modules-latest`** - All modules profile (not yet published)
- **`:authserver-playerbots-only-latest`** - Playerbots only (not yet published)
**Note**: Currently only the RealmMaster profile is built nightly. Other profiles can be built on-demand by manually triggering the CI/CD workflow.
### Generic Tags (Backward Compatibility)
- **`:authserver-latest`** - Latest build (defaults to RealmMaster profile)
- **`:worldserver-latest`** - Latest build (defaults to RealmMaster profile)
### Choosing a Profile
In `.env.prebuilt`, set the `MODULE_PROFILE` variable:
```bash
# Choose your profile
MODULE_PROFILE=realmmaster # 32 modules (default, recommended)
# MODULE_PROFILE=suggested-modules # Alternative module set
# MODULE_PROFILE=all-modules # All supported modules
# MODULE_PROFILE=playerbots-only # Just playerbots
# Images automatically reference the selected profile
AC_AUTHSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:authserver-${MODULE_PROFILE}-latest
AC_WORLDSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:worldserver-${MODULE_PROFILE}-latest
```
### Using Date-Tagged Images
To pin to a specific build date, edit `.env`:
```bash
# Set your profile
MODULE_PROFILE=realmmaster
# Pin to a specific date (example: January 9, 2026)
AC_AUTHSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:authserver-${MODULE_PROFILE}-20260109
AC_WORLDSERVER_IMAGE_MODULES=${DOCKERHUB_USERNAME}/${COMPOSE_PROJECT_NAME}:worldserver-${MODULE_PROFILE}-20260109
```
## Differences from Local Build
### What You DON'T Need
When using pre-built images, you **skip**:
- ❌ Running `./setup.sh` (module selection)
- ❌ Running `./build.sh` (compilation)
- ❌ 15-45 minute build time
- ❌ Build dependencies (Go compiler, etc.)
### What's the Same
Everything else works identically:
- ✅ Database setup and migrations
- ✅ Module SQL installation
- ✅ Configuration management
- ✅ Backup system
- ✅ All management commands
- ✅ phpMyAdmin and Keira3 tools
## Verifying Your Deployment
After deployment completes:
### 1. Check Container Status
```bash
./status.sh
```
You should see all services running:
- ✅ ac-mysql
- ✅ ac-authserver
- ✅ ac-worldserver
- ✅ ac-phpmyadmin
- ✅ ac-keira3
### 2. Verify Modules Are Loaded
Check the worldserver logs:
```bash
docker logs ac-worldserver | grep "module"
```
You should see messages about 32 modules being loaded.
### 3. Access Management Tools
- **phpMyAdmin**: http://localhost:8081
- **Keira3**: http://localhost:4201
## Post-Installation
### Create Admin Account
1. Attach to the worldserver container:
```bash
docker attach ac-worldserver
```
2. Create an account and set GM level:
```
account create admin password
account set gmlevel admin 3 -1
```
3. Detach: Press `Ctrl+P` then `Ctrl+Q`
### Configure Client
Edit your WoW 3.3.5a client's `realmlist.wtf`:
```
set realmlist 127.0.0.1
```
(Replace `127.0.0.1` with your server's IP if remote)
## Updating to Latest Images
To update to the latest nightly build:
```bash
# Pull latest images
docker compose pull
# Restart services
docker compose down
docker compose up -d
```
**Note**: Database schema updates will be applied automatically on restart.
## Switching Between Pre-Built and Local Build
### From Pre-Built to Local Build
If you want to customize modules and build locally:
```bash
# Remove pre-built .env
rm .env
# Run interactive setup
./setup.sh
# Build with your custom modules
./build.sh
# Deploy
./deploy.sh
```
### From Local Build to Pre-Built
If you want to use pre-built images instead:
```bash
# Back up your current .env
mv .env .env.custom
# Use pre-built configuration
cp .env.prebuilt .env
# Edit DOCKERHUB_USERNAME in .env
# Deploy
./deploy.sh
```
## Troubleshooting
### Image Pull Errors
**Problem**: `Error response from daemon: manifest not found`
**Solutions**:
1. Verify `DOCKERHUB_USERNAME` is set correctly in `.env`
2. Check that the images exist at: https://hub.docker.com/u/your-username
3. Ensure the CI/CD workflow has run successfully
### Module SQL Not Applied
**Problem**: Modules don't seem to be working
**Solution**: The module SQL is automatically applied during deployment. Check:
```bash
# Verify module SQL staging
ls -la storage/module-sql-updates/
# Check database for module tables
docker exec -it ac-mysql mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e "SHOW TABLES" acore_world | grep -i module
```
### Performance Issues
**Problem**: Server is slow or laggy
**Solutions**:
1. Increase MySQL tmpfs size in `.env`: `MYSQL_RUNTIME_TMPFS_SIZE=16G`
2. Reduce playerbot population: `PLAYERBOT_MAX_BOTS=100`
3. Check system resources: `docker stats`
## Advanced Configuration
### Custom Module Selection
Pre-built images include all RealmMaster modules. To disable specific modules:
1. Edit server configuration files in `storage/config/`
2. Set module enable flags to 0
3. Restart worldserver: `docker compose restart ac-worldserver`
**Note**: You can only disable modules, not add new ones (requires local build).
### Server Configuration Presets
Apply configuration presets for different server types:
```bash
# In .env, set one of these presets:
SERVER_CONFIG_PRESET=blizzlike # Authentic WotLK experience (1x rates)
SERVER_CONFIG_PRESET=fast-leveling # 3x XP rates, QoL improvements
SERVER_CONFIG_PRESET=hardcore-pvp # Competitive PvP (1.5x rates)
SERVER_CONFIG_PRESET=casual-pve # Relaxed PvE (2x rates)
```
Restart after changing: `docker compose restart ac-worldserver`
## Getting Help
- **Documentation**: See other guides in `docs/`
- **GitHub Issues**: https://github.com/uprightbass360/AzerothCore-RealmMaster/issues
- **AzerothCore Discord**: https://discord.gg/gkt4y2x
## Next Steps
- [Database Management](DATABASE_MANAGEMENT.md) - Backups, restores, migrations
- [Getting Started Guide](GETTING_STARTED.md) - Detailed walkthrough
- [Troubleshooting](TROUBLESHOOTING.md) - Common issues and solutions
- [Module Catalog](MODULES.md) - Complete list of available modules

View File

@@ -1,213 +0,0 @@
# Release Strategy
This document explains how AzerothCore RealmMaster releases work and what they contain.
## Release Philosophy
Since **Docker images are stored on Docker Hub**, GitHub releases serve as **deployment packages** rather than source distributions. Each release contains everything users need to deploy pre-built images without building from source.
## What's in a Release?
### Release Assets (ZIP Archive)
Each release includes a downloadable `.zip` file containing:
```
azerothcore-realmmaster-v1.0.0-realmmaster.zip
├── .env.prebuilt # Pre-configured for Docker Hub images
├── docker-compose.yml # Service definitions
├── deploy.sh # Deployment script
├── status.sh # Status monitoring
├── cleanup.sh # Cleanup utilities
├── scripts/ # Required Python/Bash scripts
├── config/ # Module manifest and presets
├── docs/ # Complete documentation
├── QUICKSTART.md # Release-specific quick start
└── README.md # Project overview
```
### Release Notes
Each release includes:
- Module profile and count
- Docker Hub image tags (date-specific and latest)
- Quick start instructions
- Complete module list
- Build information (commit, date, source variant)
- Links to documentation
- Known issues
## Release Types
### 1. Profile-Based Releases
Each module profile gets its own release variant:
- **v1.0.0-realmmaster** - RealmMaster profile (32 modules, recommended)
- **v1.0.0-suggested-modules** - Alternative suggested module set
- **v1.0.0-all-modules** - All supported modules
- **v1.0.0-playerbots-only** - Just playerbots
Users choose the release that matches their desired module set.
### 2. Version Numbering
We use semantic versioning:
- **Major** (v1.0.0 → v2.0.0): Breaking changes, major feature additions
- **Minor** (v1.0.0 → v1.1.0): New modules, feature enhancements
- **Patch** (v1.0.0 → v1.0.1): Bug fixes, documentation updates
## Docker Hub Image Tags
Releases reference specific Docker Hub tags:
### Date-Tagged Images (Recommended for Production)
```
uprightbass360/azerothcore-realmmaster:authserver-realmmaster-20260109
uprightbass360/azerothcore-realmmaster:worldserver-realmmaster-20260109
```
- **Immutable**: Never change
- **Stable**: Guaranteed to match the release
- **Recommended**: For production deployments
### Latest Tags (Auto-Updated)
```
uprightbass360/azerothcore-realmmaster:authserver-realmmaster-latest
uprightbass360/azerothcore-realmmaster:worldserver-realmmaster-latest
```
- **Mutable**: Updated nightly by CI/CD
- **Convenient**: Always get the newest build
- **Use case**: Development, testing, staying current
## Creating a Release
### Automated (Recommended)
Use the GitHub Actions workflow:
1. Go to **Actions****Create Release**
2. Click **Run workflow**
3. Fill in:
- **Version**: `v1.0.0`
- **Profile**: `RealmMaster` (or other profile)
- **Pre-release**: Check if beta/RC
4. Click **Run workflow**
The workflow automatically:
- Creates deployment package with all files
- Generates release notes with module list
- Uploads ZIP archive as release asset
- Creates GitHub release with proper tags
### Manual
If you need to create a release manually:
```bash
# 1. Tag the release
git tag -a v1.0.0 -m "Release v1.0.0 - RealmMaster Profile"
git push origin v1.0.0
# 2. Create deployment package
./scripts/create-release-package.sh v1.0.0 RealmMaster
# 3. Create GitHub release
# Go to GitHub → Releases → Draft a new release
# - Tag: v1.0.0
# - Title: RealmMaster v1.0.0 - RealmMaster Profile
# - Upload: azerothcore-realmmaster-v1.0.0-realmmaster.zip
# - Add release notes
```
## Release Checklist
Before creating a release:
- [ ] Verify CI/CD build succeeded
- [ ] Test Docker Hub images work correctly
- [ ] Update CHANGELOG.md
- [ ] Update version in documentation if needed
- [ ] Verify all module SQL migrations are included
- [ ] Test deployment on clean system
- [ ] Update known issues section
## For Users: Using a Release
### Quick Start
```bash
# 1. Download release
wget https://github.com/uprightbass360/AzerothCore-RealmMaster/releases/download/v1.0.0/azerothcore-realmmaster-v1.0.0-realmmaster.zip
# 2. Extract
unzip azerothcore-realmmaster-v1.0.0-realmmaster.zip
cd azerothcore-realmmaster-v1.0.0-realmmaster
# 3. Configure
nano .env.prebuilt
# Set: DOCKERHUB_USERNAME=your-dockerhub-username
# 4. Deploy
mv .env.prebuilt .env
./deploy.sh
```
### Upgrading Between Releases
```bash
# 1. Backup your data
./scripts/bash/backup.sh
# 2. Download new release
wget https://github.com/.../releases/download/v1.1.0/...
# 3. Extract to new directory
unzip azerothcore-realmmaster-v1.1.0-realmmaster.zip
# 4. Copy your .env and data
cp old-version/.env new-version/.env
cp -r old-version/storage new-version/storage
# 5. Deploy new version
cd new-version
./deploy.sh
```
## Release Schedule
- **Nightly Builds**: Images built automatically at 2 AM UTC
- **Releases**: Created as needed when significant changes accumulate
- **LTS Releases**: Planned quarterly for long-term support
## Support
- **Release Issues**: https://github.com/uprightbass360/AzerothCore-RealmMaster/issues
- **Documentation**: Included in each release ZIP
- **Discord**: https://discord.gg/gkt4y2x
## FAQ
### Why are images on Docker Hub and not in releases?
Docker images can be 1-2GB each. GitHub has a 2GB file limit and releases should be lightweight. Docker Hub is designed for hosting images, GitHub releases are for deployment packages.
### Can I use latest tags in production?
We recommend **date-tagged images** for production (e.g., `authserver-realmmaster-20260109`). Latest tags are updated nightly and may have untested changes.
### How do I know which image version a release uses?
Check the release notes - they include the specific Docker Hub tags (date-stamped) that were tested with that release.
### What if I want to build from source instead?
Clone the repository and use `./setup.sh` + `./build.sh` instead of using pre-built releases. See [GETTING_STARTED.md](GETTING_STARTED.md) for instructions.
### Are releases required?
No! You can:
1. **Use releases**: Download ZIP, deploy pre-built images (easiest)
2. **Use nightly images**: Pull latest tags directly from Docker Hub
3. **Build from source**: Clone repo, build locally (most flexible)
Releases are just convenient snapshots for users who want stability.

View File

@@ -96,7 +96,7 @@ Comprehensive cleanup with multiple destruction levels and safety checks.
Starts all configured containers using appropriate profiles.
#### `scripts/bash/stop-containers.sh` - Graceful Shutdown
Stops all containers with proper cleanup and data protection. The MySQL container performs a shutdown-time sync from tmpfs to persistent storage.
Stops all containers with proper cleanup and data protection.
#### `status.sh` - Service Health Monitoring
```bash

View File

@@ -75,7 +75,7 @@ services:
| Upstream Concept | RealmMaster Equivalent | Notes |
| ---------------- | ---------------------- | ----- |
| MySQL container with bind-mounted storage | `ac-mysql` + `ac-storage-init` | Bind mounts live under `storage/` and `local-storage/`; tmpfs keeps runtime data fast and is synced to disk on graceful shutdown. |
| MySQL container with bind-mounted storage | `ac-mysql` + `ac-storage-init` | Bind mounts live under `storage/` and `local-storage/`; tmpfs keeps runtime data fast and is checkpointed to disk automatically. |
| Manual DB import container | `ac-db-import` & `ac-db-init` | Automatically imports schemas or restores from backups; disable by skipping the `db` profile if you truly want manual control. |
| World/Auth servers with optional DBC overrides | `ac-authserver-*` / `ac-worldserver-*` | Profile-based builds cover vanilla, playerbots, and custom module binaries. DBC overrides go into the shared client data mount just like upstream. |
| Client data bind mounts | `ac-client-data-standard` (or `-playerbots`) | Runs `scripts/bash/download-client-data.sh`, caches releases, and mounts them read-only into the worldserver. |

View File

@@ -6,7 +6,6 @@ INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$SCRIPT_DIR"
source "$SCRIPT_DIR/lib/common.sh"
# Load environment defaults if present
if [ -f "$PROJECT_ROOT/.env" ]; then
@@ -64,7 +63,8 @@ Examples:
EOF
}
die(){ fatal "$1"; }
err(){ printf 'Error: %s\n' "$*" >&2; }
die(){ err "$1"; exit 1; }
normalize_token(){
printf '%s' "$1" | tr '[:upper:]' '[:lower:]' | tr -d '[:space:]'

View File

@@ -6,7 +6,15 @@ INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
source "$SCRIPT_DIR/lib/common.sh"
COLOR_RED='\033[0;31m'
COLOR_GREEN='\033[0;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_RESET='\033[0m'
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
SUPPORTED_DBS=(auth characters world)
declare -A SUPPORTED_SET=()

View File

@@ -6,7 +6,18 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
source "$SCRIPT_DIR/lib/common.sh"
COLOR_RED='\033[0;31m'
COLOR_GREEN='\033[0;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_BLUE='\033[0;34m'
COLOR_CYAN='\033[0;36m'
COLOR_RESET='\033[0m'
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
info(){ printf '%b\n' "${COLOR_CYAN}$*${COLOR_RESET}"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
MYSQL_PW=""
BACKUP_DIR=""

View File

@@ -449,7 +449,7 @@ if [ -n "$backup_path" ]; then
echo "⚠️ Backup restoration failed, will proceed with fresh database setup"
fi
else
echo " No valid SQL backups found - proceeding with fresh setup"
echo " No valid backups found - proceeding with fresh setup"
echo "$(date): No backup found - fresh setup needed" > "$RESTORE_FAILED_MARKER"
fi

View File

@@ -9,16 +9,35 @@ ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
ENV_FILE="$ROOT_DIR/.env"
TEMPLATE_FILE="$ROOT_DIR/.env.template"
ENV_PATH="$ENV_FILE"
DEFAULT_ENV_PATH="$ENV_FILE"
source "$ROOT_DIR/scripts/bash/project_name.sh"
source "$ROOT_DIR/scripts/bash/lib/common.sh"
# Default project name (read from .env or template)
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
source "$ROOT_DIR/scripts/bash/compose_overrides.sh"
declare -a COMPOSE_FILE_ARGS=()
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
info(){ echo -e "${BLUE} $*${NC}"; }
ok(){ echo -e "${GREEN}$*${NC}"; }
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
err(){ echo -e "${RED}$*${NC}"; }
read_env(){
local key="$1" default="${2:-}" value=""
if [ -f "$ENV_FILE" ]; then
value="$(grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then
value="$default"
fi
echo "$value"
}
resolve_project_name(){
local raw_name sanitized
raw_name="$(read_env COMPOSE_PROJECT_NAME "$DEFAULT_PROJECT_NAME")"
@@ -41,6 +60,13 @@ show_header(){
echo -e "${BLUE} 📊 Enabling Management UIs 📊${NC}\n"
}
ensure_command(){
if ! command -v "$1" >/dev/null 2>&1; then
err "Required command '$1' not found in PATH."
exit 1
fi
}
ensure_mysql_running(){
local mysql_service="ac-mysql"
local mysql_container
@@ -82,7 +108,7 @@ EOF
exit 0
fi
require_cmd docker
ensure_command docker
docker info >/dev/null 2>&1 || { err "Docker daemon unavailable."; exit 1; }
PROJECT_NAME="$(resolve_project_name)"

View File

@@ -6,7 +6,17 @@ INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR/../.." # Go to project root
source "$SCRIPT_DIR/lib/common.sh"
COLOR_RED='\033[0;31m'
COLOR_GREEN='\033[0;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_BLUE='\033[0;34m'
COLOR_RESET='\033[0m'
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
info(){ printf '%b\n' "${COLOR_BLUE}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
# Source environment variables
if [ -f ".env" ]; then
@@ -270,4 +280,4 @@ if [[ $processed -gt 0 ]]; then
log ""
log "Character imports completed! Processed files moved to $IMPORT_DIR/processed/"
log "You can now log in and access your imported characters."
fi
fi

View File

@@ -127,23 +127,17 @@ read_env() {
}
# Read value from .env.template file (used during setup)
# This is similar to read_env but specifically for template files.
# This is similar to read_env but specifically for template files
#
# Usage:
# get_template_value KEY [TEMPLATE_FILE] [DEFAULT]
# get_template_value KEY [TEMPLATE_FILE]
# value=$(get_template_value "MYSQL_PASSWORD")
# value=$(get_template_value "DOCKER_IMAGE_TAG" ".env.template" "latest")
#
get_template_value() {
local key="$1"
local template_file="${2:-${TEMPLATE_FILE:-${TEMPLATE_PATH:-.env.template}}}"
local fallback="${3:-}"
if [ ! -f "$template_file" ]; then
if [ -n "$fallback" ]; then
echo "$fallback"
return 0
fi
fatal "Template file not found: $template_file"
fi
@@ -153,11 +147,8 @@ get_template_value() {
raw_line=$(grep "^${key}=" "$template_file" 2>/dev/null | head -1)
if [ -z "$raw_line" ]; then
if [ -n "$fallback" ]; then
echo "$fallback"
return 0
fi
fatal "Key '$key' not found in template: $template_file"
err "Key '$key' not found in template: $template_file"
return 1
fi
value="${raw_line#*=}"
@@ -168,10 +159,6 @@ get_template_value() {
value="${BASH_REMATCH[1]}"
fi
if [ -z "$value" ] && [ -n "$fallback" ]; then
value="$fallback"
fi
echo "$value"
}

View File

@@ -11,67 +11,67 @@ if ! command -v "$ORIGINAL_ENTRYPOINT" >/dev/null 2>&1; then
fi
TARGET_SPEC="${MYSQL_RUNTIME_USER:-${CONTAINER_USER:-}}"
target_group_name=""
if [ -n "${TARGET_SPEC:-}" ] && [ "${TARGET_SPEC}" != "0:0" ]; then
if [[ "$TARGET_SPEC" != *:* ]]; then
echo "mysql-entrypoint: Expected MYSQL_RUNTIME_USER/CONTAINER_USER in uid:gid form, got '${TARGET_SPEC}'" >&2
exit 1
fi
IFS=':' read -r TARGET_UID TARGET_GID <<< "$TARGET_SPEC"
if ! [[ "$TARGET_UID" =~ ^[0-9]+$ ]] || ! [[ "$TARGET_GID" =~ ^[0-9]+$ ]]; then
echo "mysql-entrypoint: UID/GID must be numeric (received uid='${TARGET_UID}' gid='${TARGET_GID}')" >&2
exit 1
fi
if ! id mysql >/dev/null 2>&1; then
echo "mysql-entrypoint: mysql user not found in container" >&2
exit 1
fi
current_uid="$(id -u mysql)"
current_gid="$(id -g mysql)"
# Adjust group if needed
if [ "$current_gid" != "$TARGET_GID" ]; then
if groupmod -g "$TARGET_GID" mysql 2>/dev/null; then
target_group_name="mysql"
else
existing_group="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
if [ -z "$existing_group" ]; then
existing_group="mysql-host"
if ! getent group "$existing_group" >/dev/null 2>&1; then
groupadd -g "$TARGET_GID" "$existing_group"
fi
fi
usermod -g "$existing_group" mysql
target_group_name="$existing_group"
fi
else
target_group_name="$(getent group mysql | cut -d: -f1)"
fi
if [ -z "$target_group_name" ]; then
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
fi
# Adjust user UID if needed
if [ "$current_uid" != "$TARGET_UID" ]; then
if getent passwd "$TARGET_UID" >/dev/null 2>&1 && [ "$(getent passwd "$TARGET_UID" | cut -d: -f1)" != "mysql" ]; then
echo "mysql-entrypoint: UID ${TARGET_UID} already in use by $(getent passwd "$TARGET_UID" | cut -d: -f1)." >&2
echo "mysql-entrypoint: Please choose a different CONTAINER_USER or adjust the image." >&2
exit 1
fi
usermod -u "$TARGET_UID" mysql
fi
# Ensure group lookup after potential changes
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || echo "$target_group_name")"
else
target_group_name="$(getent group mysql | cut -d: -f1 || echo mysql)"
if [ -z "${TARGET_SPEC:-}" ] || [ "${TARGET_SPEC}" = "0:0" ]; then
exec "$ORIGINAL_ENTRYPOINT" "$@"
fi
if [[ "$TARGET_SPEC" != *:* ]]; then
echo "mysql-entrypoint: Expected MYSQL_RUNTIME_USER/CONTAINER_USER in uid:gid form, got '${TARGET_SPEC}'" >&2
exit 1
fi
IFS=':' read -r TARGET_UID TARGET_GID <<< "$TARGET_SPEC"
if ! [[ "$TARGET_UID" =~ ^[0-9]+$ ]] || ! [[ "$TARGET_GID" =~ ^[0-9]+$ ]]; then
echo "mysql-entrypoint: UID/GID must be numeric (received uid='${TARGET_UID}' gid='${TARGET_GID}')" >&2
exit 1
fi
if ! id mysql >/dev/null 2>&1; then
echo "mysql-entrypoint: mysql user not found in container" >&2
exit 1
fi
current_uid="$(id -u mysql)"
current_gid="$(id -g mysql)"
# Adjust group if needed
target_group_name=""
if [ "$current_gid" != "$TARGET_GID" ]; then
if groupmod -g "$TARGET_GID" mysql 2>/dev/null; then
target_group_name="mysql"
else
existing_group="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
if [ -z "$existing_group" ]; then
existing_group="mysql-host"
if ! getent group "$existing_group" >/dev/null 2>&1; then
groupadd -g "$TARGET_GID" "$existing_group"
fi
fi
usermod -g "$existing_group" mysql
target_group_name="$existing_group"
fi
else
target_group_name="$(getent group mysql | cut -d: -f1)"
fi
if [ -z "$target_group_name" ]; then
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
fi
# Adjust user UID if needed
if [ "$current_uid" != "$TARGET_UID" ]; then
if getent passwd "$TARGET_UID" >/dev/null 2>&1 && [ "$(getent passwd "$TARGET_UID" | cut -d: -f1)" != "mysql" ]; then
echo "mysql-entrypoint: UID ${TARGET_UID} already in use by $(getent passwd "$TARGET_UID" | cut -d: -f1)." >&2
echo "mysql-entrypoint: Please choose a different CONTAINER_USER or adjust the image." >&2
exit 1
fi
usermod -u "$TARGET_UID" mysql
fi
# Ensure group lookup after potential changes
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || echo "$target_group_name")"
# Update ownership on relevant directories if they exist
for path in /var/lib/mysql-runtime /var/lib/mysql /var/lib/mysql-persistent /backups; do
if [ -e "$path" ]; then
@@ -79,91 +79,6 @@ for path in /var/lib/mysql-runtime /var/lib/mysql /var/lib/mysql-persistent /bac
fi
done
# Minimal fix: Restore data from persistent storage on startup and sync on shutdown only
RUNTIME_DIR="/var/lib/mysql-runtime"
PERSISTENT_DIR="/var/lib/mysql-persistent"
sync_datadir() {
if [ ! -d "$RUNTIME_DIR" ]; then
echo "⚠️ Runtime directory not found: $RUNTIME_DIR"
return 1
fi
if [ ! -d "$PERSISTENT_DIR" ]; then
echo "⚠️ Persistent directory not found: $PERSISTENT_DIR"
return 1
fi
user_schema_count="$(find "$RUNTIME_DIR" -mindepth 1 -maxdepth 1 -type d \
! -name mysql \
! -name performance_schema \
! -name information_schema \
! -name sys \
! -name "#innodb_temp" \
! -name "#innodb_redo" 2>/dev/null | wc -l | tr -d ' ')"
if [ "${user_schema_count:-0}" -eq 0 ]; then
echo "⚠️ Runtime data appears empty (system schemas only); skipping sync"
return 0
fi
echo "📦 Syncing MySQL data to persistent storage..."
if command -v rsync >/dev/null 2>&1; then
rsync -a --delete \
--exclude='.restore-completed' \
--exclude='.restore-failed' \
--exclude='.import-completed' \
--exclude='backup.sql' \
"$RUNTIME_DIR"/ "$PERSISTENT_DIR"/
else
# Mirror the runtime state while preserving marker files.
find "$PERSISTENT_DIR" -mindepth 1 -maxdepth 1 \
! -name ".restore-completed" \
! -name ".restore-failed" \
! -name ".import-completed" \
! -name "backup.sql" \
-exec rm -rf {} + 2>/dev/null || true
cp -a "$RUNTIME_DIR"/. "$PERSISTENT_DIR"/
fi
chown -R mysql:"$target_group_name" "$PERSISTENT_DIR"
echo "✅ Sync completed"
}
handle_shutdown() {
echo "🔻 Shutdown signal received"
if command -v mysqladmin >/dev/null 2>&1; then
if mysqladmin -h localhost -u root -p"${MYSQL_ROOT_PASSWORD:-}" shutdown 2>/dev/null; then
echo "✅ MySQL shutdown complete"
sync_datadir || true
else
echo "⚠️ mysqladmin shutdown failed; skipping sync to avoid corruption"
fi
else
echo "⚠️ mysqladmin not found; skipping sync"
fi
if [ -n "${child_pid:-}" ] && kill -0 "$child_pid" 2>/dev/null; then
wait "$child_pid" || true
fi
exit 0
}
# Simple startup restoration
if [ -d "$PERSISTENT_DIR" ]; then
# Check for MySQL data files (exclude marker files starting with .)
if find "$PERSISTENT_DIR" -maxdepth 1 -name "*" ! -name ".*" ! -path "$PERSISTENT_DIR" | grep -q .; then
if [ -d "$RUNTIME_DIR" ] && [ -z "$(ls -A "$RUNTIME_DIR" 2>/dev/null)" ]; then
echo "🔄 Restoring MySQL data from persistent storage..."
cp -a "$PERSISTENT_DIR"/* "$RUNTIME_DIR/" 2>/dev/null || true
chown -R mysql:"$target_group_name" "$RUNTIME_DIR"
echo "✅ Data restored from persistent storage"
fi
fi
fi
# Simple approach: restore on startup only
# Data loss window exists but prevents complete loss on restart
trap handle_shutdown TERM INT
disable_binlog="${MYSQL_DISABLE_BINLOG:-}"
if [ "${disable_binlog}" = "1" ]; then
add_skip_flag=1
@@ -178,6 +93,4 @@ if [ "${disable_binlog}" = "1" ]; then
fi
fi
"$ORIGINAL_ENTRYPOINT" "$@" &
child_pid=$!
wait "$child_pid"
exec "$ORIGINAL_ENTRYPOINT" "$@"

View File

@@ -6,7 +6,17 @@ INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
source "$SCRIPT_DIR/lib/common.sh"
COLOR_RED='\033[0;31m'
COLOR_GREEN='\033[0;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_BLUE='\033[0;34m'
COLOR_RESET='\033[0m'
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
info(){ printf '%b\n' "${COLOR_BLUE}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
MYSQL_PW=""
PDUMP_FILE=""
@@ -331,4 +341,4 @@ log "Import completed successfully!"
log "Characters on account $TARGET_ACCOUNT: $CHARACTER_COUNT"
[[ -n "$BACKUP_FILE" ]] && log "Backup created: $BACKUP_FILE"
info "Character import from pdump completed. You can now log in and play!"
info "Character import from pdump completed. You can now log in and play!"

View File

@@ -8,18 +8,15 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
ENV_FILE="$PROJECT_DIR/.env"
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
ENV_PATH="$ENV_FILE"
DEFAULT_ENV_PATH="$ENV_FILE"
source "$PROJECT_DIR/scripts/bash/project_name.sh"
source "$PROJECT_DIR/scripts/bash/lib/common.sh"
# Default project name (read from .env or template)
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
BLUE="${BLUE:-\033[0;34m}"
GREEN="${GREEN:-\033[0;32m}"
YELLOW="${YELLOW:-\033[1;33m}"
NC="${NC:-\033[0m}"
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
show_rebuild_step(){
local step="$1" total="$2" message="$3"
@@ -38,6 +35,33 @@ Options:
EOF
}
read_env(){
local key="$1" default="$2" env_path="$ENV_FILE" value
if [ -f "$env_path" ]; then
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then
value="${!key:-}"
fi
if [ -z "$value" ]; then
value="$default"
fi
echo "$value"
}
update_env_value(){
local key="$1" value="$2" env_file="$ENV_FILE"
[ -n "$env_file" ] || return 0
if [ ! -f "$env_file" ]; then
printf '%s=%s\n' "$key" "$value" >> "$env_file"
return 0
fi
if grep -q "^${key}=" "$env_file"; then
sed -i "s|^${key}=.*|${key}=${value}|" "$env_file"
else
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
fi
}
find_image_with_suffix(){
local suffix="$1"
@@ -319,6 +343,20 @@ echo " • Source date: $BUILD_SOURCE_DATE"
# Get image names and tags from .env.template
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
get_template_value() {
local key="$1"
local fallback="$2"
if [ -f "$TEMPLATE_FILE" ]; then
local value
value=$(grep "^${key}=" "$TEMPLATE_FILE" | head -1 | cut -d'=' -f2- | sed 's/^"\(.*\)"$/\1/')
if [[ "$value" =~ ^\$\{[^}]*:-([^}]*)\}$ ]]; then
value="${BASH_REMATCH[1]}"
fi
[ -n "$value" ] && echo "$value" || echo "$fallback"
else
echo "$fallback"
fi
}
strip_tag(){
local image="$1"
@@ -370,13 +408,13 @@ tag_if_exists(){
return 1
}
SOURCE_IMAGE_TAG="$(read_env DOCKER_IMAGE_TAG "$(get_template_value "DOCKER_IMAGE_TAG" "$TEMPLATE_FILE" "master")")"
SOURCE_IMAGE_TAG="$(read_env DOCKER_IMAGE_TAG "$(get_template_value "DOCKER_IMAGE_TAG" "master")")"
[ -z "$SOURCE_IMAGE_TAG" ] && SOURCE_IMAGE_TAG="master"
AUTHSERVER_BASE_REPO="$(strip_tag "$(read_env AC_AUTHSERVER_IMAGE_BASE "$(get_template_value "AC_AUTHSERVER_IMAGE_BASE" "$TEMPLATE_FILE" "acore/ac-wotlk-authserver")")")"
WORLDSERVER_BASE_REPO="$(strip_tag "$(read_env AC_WORLDSERVER_IMAGE_BASE "$(get_template_value "AC_WORLDSERVER_IMAGE_BASE" "$TEMPLATE_FILE" "acore/ac-wotlk-worldserver")")")"
DB_IMPORT_BASE_REPO="$(strip_tag "$(read_env AC_DB_IMPORT_IMAGE_BASE "$(get_template_value "AC_DB_IMPORT_IMAGE_BASE" "$TEMPLATE_FILE" "acore/ac-wotlk-db-import")")")"
CLIENT_DATA_BASE_REPO="$(strip_tag "$(read_env AC_CLIENT_DATA_IMAGE_BASE "$(get_template_value "AC_CLIENT_DATA_IMAGE_BASE" "$TEMPLATE_FILE" "acore/ac-wotlk-client-data")")")"
AUTHSERVER_BASE_REPO="$(strip_tag "$(read_env AC_AUTHSERVER_IMAGE_BASE "$(get_template_value "AC_AUTHSERVER_IMAGE_BASE" "acore/ac-wotlk-authserver")")")"
WORLDSERVER_BASE_REPO="$(strip_tag "$(read_env AC_WORLDSERVER_IMAGE_BASE "$(get_template_value "AC_WORLDSERVER_IMAGE_BASE" "acore/ac-wotlk-worldserver")")")"
DB_IMPORT_BASE_REPO="$(strip_tag "$(read_env AC_DB_IMPORT_IMAGE_BASE "$(get_template_value "AC_DB_IMPORT_IMAGE_BASE" "acore/ac-wotlk-db-import")")")"
CLIENT_DATA_BASE_REPO="$(strip_tag "$(read_env AC_CLIENT_DATA_IMAGE_BASE "$(get_template_value "AC_CLIENT_DATA_IMAGE_BASE" "acore/ac-wotlk-client-data")")")"
BUILT_AUTHSERVER_IMAGE="$AUTHSERVER_BASE_REPO:$SOURCE_IMAGE_TAG"
BUILT_WORLDSERVER_IMAGE="$WORLDSERVER_BASE_REPO:$SOURCE_IMAGE_TAG"

View File

@@ -1,261 +0,0 @@
# CLI parsing for setup.sh
init_cli_defaults() {
CLI_DEPLOYMENT_TYPE=""
CLI_PERMISSION_SCHEME=""
CLI_CUSTOM_UID=""
CLI_CUSTOM_GID=""
CLI_SERVER_ADDRESS=""
CLI_REALM_PORT=""
CLI_AUTH_PORT=""
CLI_SOAP_PORT=""
CLI_MYSQL_PORT=""
CLI_MYSQL_PASSWORD=""
CLI_STORAGE_PATH=""
CLI_BACKUP_DAYS=""
CLI_BACKUP_HOURS=""
CLI_BACKUP_TIME=""
CLI_MODULE_MODE=""
CLI_MODULE_PRESET=""
CLI_PLAYERBOT_ENABLED=""
CLI_PLAYERBOT_MIN=""
CLI_PLAYERBOT_MAX=""
CLI_CONFIG_PRESET=""
FORCE_OVERWRITE=0
CLI_ENABLE_MODULES_RAW=()
}
print_help() {
cat <<'HELP'
Usage: ./setup.sh [options]
Description:
Interactive wizard that generates .env for the
profiles-based compose. Prompts for deployment type, ports, storage,
MySQL credentials, backup retention, and module presets or manual
toggles.
Options:
-h, --help Show this help message and exit
--non-interactive Use defaults/arguments without prompting
--deployment-type TYPE Deployment type: local, lan, or public
--permission-scheme SCHEME Permissions: local, nfs, or custom
--custom-uid UID UID when --permission-scheme=custom
--custom-gid GID GID when --permission-scheme=custom
--server-address ADDRESS Realm/public address
--realm-port PORT Client connection port (default 8215)
--auth-port PORT Authserver external port (default 3784)
--soap-port PORT SOAP external port (default 7778)
--mysql-port PORT MySQL external port (default 64306)
--mysql-password PASSWORD MySQL root password (default azerothcore123)
--storage-path PATH Storage directory
--backup-retention-days N Daily backup retention (default 3)
--backup-retention-hours N Hourly backup retention (default 6)
--backup-daily-time HH Daily backup hour 00-23 (default 09)
--module-mode MODE suggested, playerbots, manual, or none
--module-config NAME Use preset NAME from config/module-profiles/<NAME>.json
--server-config NAME Use server preset NAME from config/presets/<NAME>.conf
--enable-modules LIST Comma-separated module list (MODULE_* or shorthand)
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
--force Overwrite existing .env without prompting
HELP
}
parse_cli_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
print_help
exit 0
;;
--non-interactive)
NON_INTERACTIVE=1
shift
;;
--deployment-type)
[[ $# -ge 2 ]] || { say ERROR "--deployment-type requires a value"; exit 1; }
CLI_DEPLOYMENT_TYPE="$2"; shift 2
;;
--deployment-type=*)
CLI_DEPLOYMENT_TYPE="${1#*=}"; shift
;;
--permission-scheme)
[[ $# -ge 2 ]] || { say ERROR "--permission-scheme requires a value"; exit 1; }
CLI_PERMISSION_SCHEME="$2"; shift 2
;;
--permission-scheme=*)
CLI_PERMISSION_SCHEME="${1#*=}"; shift
;;
--custom-uid)
[[ $# -ge 2 ]] || { say ERROR "--custom-uid requires a value"; exit 1; }
CLI_CUSTOM_UID="$2"; shift 2
;;
--custom-uid=*)
CLI_CUSTOM_UID="${1#*=}"; shift
;;
--custom-gid)
[[ $# -ge 2 ]] || { say ERROR "--custom-gid requires a value"; exit 1; }
CLI_CUSTOM_GID="$2"; shift 2
;;
--custom-gid=*)
CLI_CUSTOM_GID="${1#*=}"; shift
;;
--server-address)
[[ $# -ge 2 ]] || { say ERROR "--server-address requires a value"; exit 1; }
CLI_SERVER_ADDRESS="$2"; shift 2
;;
--server-address=*)
CLI_SERVER_ADDRESS="${1#*=}"; shift
;;
--realm-port)
[[ $# -ge 2 ]] || { say ERROR "--realm-port requires a value"; exit 1; }
CLI_REALM_PORT="$2"; shift 2
;;
--realm-port=*)
CLI_REALM_PORT="${1#*=}"; shift
;;
--auth-port)
[[ $# -ge 2 ]] || { say ERROR "--auth-port requires a value"; exit 1; }
CLI_AUTH_PORT="$2"; shift 2
;;
--auth-port=*)
CLI_AUTH_PORT="${1#*=}"; shift
;;
--soap-port)
[[ $# -ge 2 ]] || { say ERROR "--soap-port requires a value"; exit 1; }
CLI_SOAP_PORT="$2"; shift 2
;;
--soap-port=*)
CLI_SOAP_PORT="${1#*=}"; shift
;;
--mysql-port)
[[ $# -ge 2 ]] || { say ERROR "--mysql-port requires a value"; exit 1; }
CLI_MYSQL_PORT="$2"; shift 2
;;
--mysql-port=*)
CLI_MYSQL_PORT="${1#*=}"; shift
;;
--mysql-password)
[[ $# -ge 2 ]] || { say ERROR "--mysql-password requires a value"; exit 1; }
CLI_MYSQL_PASSWORD="$2"; shift 2
;;
--mysql-password=*)
CLI_MYSQL_PASSWORD="${1#*=}"; shift
;;
--storage-path)
[[ $# -ge 2 ]] || { say ERROR "--storage-path requires a value"; exit 1; }
CLI_STORAGE_PATH="$2"; shift 2
;;
--storage-path=*)
CLI_STORAGE_PATH="${1#*=}"; shift
;;
--backup-retention-days)
[[ $# -ge 2 ]] || { say ERROR "--backup-retention-days requires a value"; exit 1; }
CLI_BACKUP_DAYS="$2"; shift 2
;;
--backup-retention-days=*)
CLI_BACKUP_DAYS="${1#*=}"; shift
;;
--backup-retention-hours)
[[ $# -ge 2 ]] || { say ERROR "--backup-retention-hours requires a value"; exit 1; }
CLI_BACKUP_HOURS="$2"; shift 2
;;
--backup-retention-hours=*)
CLI_BACKUP_HOURS="${1#*=}"; shift
;;
--backup-daily-time)
[[ $# -ge 2 ]] || { say ERROR "--backup-daily-time requires a value"; exit 1; }
CLI_BACKUP_TIME="$2"; shift 2
;;
--backup-daily-time=*)
CLI_BACKUP_TIME="${1#*=}"; shift
;;
--module-mode)
[[ $# -ge 2 ]] || { say ERROR "--module-mode requires a value"; exit 1; }
CLI_MODULE_MODE="$2"; shift 2
;;
--module-mode=*)
CLI_MODULE_MODE="${1#*=}"; shift
;;
--module-config)
[[ $# -ge 2 ]] || { say ERROR "--module-config requires a value"; exit 1; }
CLI_MODULE_PRESET="$2"; shift 2
;;
--module-config=*)
CLI_MODULE_PRESET="${1#*=}"; shift
;;
--server-config)
[[ $# -ge 2 ]] || { say ERROR "--server-config requires a value"; exit 1; }
CLI_CONFIG_PRESET="$2"; shift 2
;;
--server-config=*)
CLI_CONFIG_PRESET="${1#*=}"; shift
;;
--enable-modules)
[[ $# -ge 2 ]] || { say ERROR "--enable-modules requires a value"; exit 1; }
CLI_ENABLE_MODULES_RAW+=("$2"); shift 2
;;
--enable-modules=*)
CLI_ENABLE_MODULES_RAW+=("${1#*=}"); shift
;;
--playerbot-enabled)
[[ $# -ge 2 ]] || { say ERROR "--playerbot-enabled requires 0 or 1"; exit 1; }
CLI_PLAYERBOT_ENABLED="$2"; shift 2
;;
--playerbot-enabled=*)
CLI_PLAYERBOT_ENABLED="${1#*=}"; shift
;;
--playerbot-min-bots)
[[ $# -ge 2 ]] || { say ERROR "--playerbot-min-bots requires a value"; exit 1; }
CLI_PLAYERBOT_MIN="$2"; shift 2
;;
--playerbot-min-bots=*)
CLI_PLAYERBOT_MIN="${1#*=}"; shift
;;
--playerbot-max-bots)
[[ $# -ge 2 ]] || { say ERROR "--playerbot-max-bots requires a value"; exit 1; }
CLI_PLAYERBOT_MAX="$2"; shift 2
;;
--playerbot-max-bots=*)
CLI_PLAYERBOT_MAX="${1#*=}"; shift
;;
--force)
FORCE_OVERWRITE=1
shift
;;
*)
echo "Unknown argument: $1" >&2
echo "Use --help for usage" >&2
exit 1
;;
esac
done
}
apply_cli_module_flags() {
# setup.sh -> scripts/bash/setup/modules.sh (normalize_module_name)
# setup.sh -> scripts/bash/setup/ui.sh (say)
if [ ${#CLI_ENABLE_MODULES_RAW[@]} -gt 0 ]; then
local raw part norm
for raw in "${CLI_ENABLE_MODULES_RAW[@]}"; do
IFS=',' read -ra parts <<<"$raw"
for part in "${parts[@]}"; do
part="${part//[[:space:]]/}"
[ -z "$part" ] && continue
norm="$(normalize_module_name "$part")"
if [ -z "${KNOWN_MODULE_LOOKUP[$norm]}" ]; then
say WARNING "Ignoring unknown module identifier: ${part}"
continue
fi
MODULE_ENABLE_SET["$norm"]=1
done
done
unset raw part norm parts
fi
if [ ${#CLI_ENABLE_MODULES_RAW[@]} -gt 0 ] && [ -z "$CLI_MODULE_MODE" ]; then
CLI_MODULE_MODE="manual"
fi
}

View File

@@ -1,223 +0,0 @@
# Interactive configuration flow for setup.sh
select_deployment_type() {
# setup.sh -> scripts/bash/setup/ui.sh (say)
say HEADER "DEPLOYMENT TYPE"
echo "1) 🏠 Local Development (${DEFAULT_LOCAL_ADDRESS})"
echo "2) 🌐 LAN Server (local network IP) (autodetect)"
echo "3) ☁️ Public Server (domain or public IP) (manual)"
local DEPLOYMENT_TYPE_INPUT="${CLI_DEPLOYMENT_TYPE}"
if [ "$NON_INTERACTIVE" = "1" ] && [ -z "$DEPLOYMENT_TYPE_INPUT" ]; then
DEPLOYMENT_TYPE_INPUT="local"
fi
while true; do
if [ -z "$DEPLOYMENT_TYPE_INPUT" ]; then
read -p "$(echo -e "${YELLOW}🔧 Select deployment type [1-3]: ${NC}")" DEPLOYMENT_TYPE_INPUT
fi
case "${DEPLOYMENT_TYPE_INPUT,,}" in
1|local)
DEPLOYMENT_TYPE=local
;;
2|lan)
DEPLOYMENT_TYPE=lan
;;
3|public)
DEPLOYMENT_TYPE=public
;;
*)
if [ -n "$CLI_DEPLOYMENT_TYPE" ] || [ "$NON_INTERACTIVE" = "1" ]; then
say ERROR "Invalid deployment type: ${DEPLOYMENT_TYPE_INPUT}"
exit 1
fi
say ERROR "Please select 1, 2, or 3"
DEPLOYMENT_TYPE_INPUT=""
continue
;;
esac
break
done
if [ -n "$CLI_DEPLOYMENT_TYPE" ] || [ "$NON_INTERACTIVE" = "1" ]; then
say INFO "Deployment type set to ${DEPLOYMENT_TYPE}."
fi
}
configure_server() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask, validate_ip, validate_port)
say HEADER "SERVER CONFIGURATION"
if [ -n "$CLI_SERVER_ADDRESS" ]; then
SERVER_ADDRESS="$CLI_SERVER_ADDRESS"
elif [ "$DEPLOYMENT_TYPE" = "local" ]; then
SERVER_ADDRESS=$DEFAULT_LOCAL_ADDRESS
elif [ "$DEPLOYMENT_TYPE" = "lan" ]; then
local LAN_IP
LAN_IP=$(ip route get $ROUTE_DETECTION_IP 2>/dev/null | awk 'NR==1{print $7}')
SERVER_ADDRESS=$(ask "Enter server IP address" "${CLI_SERVER_ADDRESS:-${LAN_IP:-$DEFAULT_FALLBACK_LAN_IP}}" validate_ip)
else
SERVER_ADDRESS=$(ask "Enter server address (IP or domain)" "${CLI_SERVER_ADDRESS:-$DEFAULT_DOMAIN_PLACEHOLDER}" )
fi
REALM_PORT=$(ask "Enter client connection port" "${CLI_REALM_PORT:-$DEFAULT_REALM_PORT}" validate_port)
AUTH_EXTERNAL_PORT=$(ask "Enter auth server port" "${CLI_AUTH_PORT:-$DEFAULT_AUTH_PORT}" validate_port)
SOAP_EXTERNAL_PORT=$(ask "Enter SOAP API port" "${CLI_SOAP_PORT:-$DEFAULT_SOAP_PORT}" validate_port)
MYSQL_EXTERNAL_PORT=$(ask "Enter MySQL external port" "${CLI_MYSQL_PORT:-$DEFAULT_MYSQL_PORT}" validate_port)
}
choose_permission_scheme() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask, validate_number)
say HEADER "PERMISSION SCHEME"
local CURRENT_UID CURRENT_GID CURRENT_USER_PAIR CURRENT_USER_NAME CURRENT_GROUP_NAME
CURRENT_UID="$(id -u 2>/dev/null || echo 1000)"
CURRENT_GID="$(id -g 2>/dev/null || echo 1000)"
CURRENT_USER_NAME="$(id -un 2>/dev/null || echo user)"
CURRENT_GROUP_NAME="$(id -gn 2>/dev/null || echo users)"
CURRENT_USER_PAIR="${CURRENT_UID}:${CURRENT_GID}"
echo "1) 🏠 Local Root (${PERMISSION_LOCAL_USER})"
echo "2) 🗂️ Current User (${CURRENT_USER_NAME}:${CURRENT_GROUP_NAME}${CURRENT_USER_PAIR})"
echo "3) ⚙️ Custom"
local PERMISSION_SCHEME_INPUT="${CLI_PERMISSION_SCHEME}"
if [ "$NON_INTERACTIVE" = "1" ] && [ -z "$PERMISSION_SCHEME_INPUT" ]; then
PERMISSION_SCHEME_INPUT="local"
fi
while true; do
if [ -z "$PERMISSION_SCHEME_INPUT" ]; then
read -p "$(echo -e "${YELLOW}🔧 Select permission scheme [1-3]: ${NC}")" PERMISSION_SCHEME_INPUT
fi
case "${PERMISSION_SCHEME_INPUT,,}" in
1|local)
CONTAINER_USER="$PERMISSION_LOCAL_USER"
PERMISSION_SCHEME_NAME="local"
;;
2|nfs|user)
CONTAINER_USER="$CURRENT_USER_PAIR"
PERMISSION_SCHEME_NAME="user"
;;
3|custom)
local uid gid
uid="${CLI_CUSTOM_UID:-$(ask "Enter PUID (user id)" $DEFAULT_CUSTOM_UID validate_number)}"
gid="${CLI_CUSTOM_GID:-$(ask "Enter PGID (group id)" $DEFAULT_CUSTOM_GID validate_number)}"
CONTAINER_USER="${uid}:${gid}"
PERMISSION_SCHEME_NAME="custom"
;;
*)
if [ -n "$CLI_PERMISSION_SCHEME" ] || [ "$NON_INTERACTIVE" = "1" ]; then
say ERROR "Invalid permission scheme: ${PERMISSION_SCHEME_INPUT}"
exit 1
fi
say ERROR "Please select 1, 2, or 3"
PERMISSION_SCHEME_INPUT=""
continue
;;
esac
break
done
if [ -n "$CLI_PERMISSION_SCHEME" ] || [ "$NON_INTERACTIVE" = "1" ]; then
say INFO "Permission scheme set to ${PERMISSION_SCHEME_NAME:-$PERMISSION_SCHEME_INPUT}."
fi
}
configure_database() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask)
say HEADER "DATABASE CONFIGURATION"
MYSQL_ROOT_PASSWORD=$(ask "Enter MySQL root password" "${CLI_MYSQL_PASSWORD:-$DEFAULT_MYSQL_PASSWORD}")
}
configure_storage() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask)
say HEADER "STORAGE CONFIGURATION"
if [ -n "$CLI_STORAGE_PATH" ]; then
STORAGE_PATH="$CLI_STORAGE_PATH"
elif [ "$NON_INTERACTIVE" = "1" ]; then
if [ "$DEPLOYMENT_TYPE" = "local" ]; then
STORAGE_PATH=$DEFAULT_LOCAL_STORAGE
else
STORAGE_PATH=$DEFAULT_MOUNT_STORAGE
fi
else
echo "1) 💾 ${DEFAULT_LOCAL_STORAGE} (local)"
echo "2) 🌐 ${DEFAULT_NFS_STORAGE} (NFS)"
echo "3) 📁 Custom"
while true; do
read -p "$(echo -e "${YELLOW}🔧 Select storage option [1-3]: ${NC}")" s
case "$s" in
1) STORAGE_PATH=$DEFAULT_LOCAL_STORAGE; break;;
2) STORAGE_PATH=$DEFAULT_NFS_STORAGE; break;;
3) STORAGE_PATH=$(ask "Enter custom storage path" "$DEFAULT_MOUNT_STORAGE"); break;;
*) say ERROR "Please select 1, 2, or 3";;
esac
done
fi
say INFO "Storage path set to ${STORAGE_PATH}"
}
configure_backups() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask, validate_number)
say HEADER "BACKUP CONFIGURATION"
BACKUP_RETENTION_DAYS=$(ask "Daily backups retention (days)" "${CLI_BACKUP_DAYS:-$DEFAULT_BACKUP_DAYS}" validate_number)
BACKUP_RETENTION_HOURS=$(ask "Hourly backups retention (hours)" "${CLI_BACKUP_HOURS:-$DEFAULT_BACKUP_HOURS}" validate_number)
BACKUP_DAILY_TIME=$(ask "Daily backup hour (00-23, UTC)" "${CLI_BACKUP_TIME:-$DEFAULT_BACKUP_TIME}" validate_number)
}
select_server_preset() {
# setup.sh -> scripts/bash/setup/ui.sh (say, ask)
if [ "$ENABLE_CONFIG_PRESETS" = "1" ]; then
say HEADER "SERVER CONFIGURATION PRESET"
if [ -n "$CLI_CONFIG_PRESET" ]; then
SERVER_CONFIG_PRESET="$CLI_CONFIG_PRESET"
say INFO "Using preset from command line: $SERVER_CONFIG_PRESET"
return 0
fi
declare -A CONFIG_PRESET_NAMES=()
declare -A CONFIG_PRESET_DESCRIPTIONS=()
declare -A CONFIG_MENU_INDEX=()
local config_dir="$SCRIPT_DIR/config/presets"
local menu_index=1
echo "Choose a server configuration preset:"
# setup.sh -> scripts/python/parse-config-presets.py (preset metadata)
if [ -x "$SCRIPT_DIR/scripts/python/parse-config-presets.py" ] && [ -d "$config_dir" ]; then
while IFS=$'\t' read -r preset_key preset_name preset_desc; do
[ -n "$preset_key" ] || continue
CONFIG_PRESET_NAMES["$preset_key"]="$preset_name"
CONFIG_PRESET_DESCRIPTIONS["$preset_key"]="$preset_desc"
CONFIG_MENU_INDEX[$menu_index]="$preset_key"
echo "$menu_index) $preset_name"
echo " $preset_desc"
menu_index=$((menu_index + 1))
done < <(python3 "$SCRIPT_DIR/scripts/python/parse-config-presets.py" list --presets-dir "$config_dir")
else
# Fallback if parser script not available
CONFIG_MENU_INDEX[1]="none"
CONFIG_PRESET_NAMES["none"]="Default (No Preset)"
CONFIG_PRESET_DESCRIPTIONS["none"]="Use default AzerothCore settings"
echo "1) Default (No Preset)"
echo " Use default AzerothCore settings without any modifications"
fi
local max_config_option=$((menu_index - 1))
if [ "$NON_INTERACTIVE" = "1" ]; then
SERVER_CONFIG_PRESET="none"
say INFO "Non-interactive mode: Using default configuration preset"
return 0
fi
while true; do
read -p "$(echo -e "${YELLOW}🎯 Select server configuration [1-$max_config_option]: ${NC}")" choice
if [[ "$choice" =~ ^[0-9]+$ ]] && [ "$choice" -ge 1 ] && [ "$choice" -le "$max_config_option" ]; then
SERVER_CONFIG_PRESET="${CONFIG_MENU_INDEX[$choice]}"
local chosen_name="${CONFIG_PRESET_NAMES[$SERVER_CONFIG_PRESET]}"
say INFO "Selected: $chosen_name"
break
else
say ERROR "Please select a number between 1 and $max_config_option"
fi
done
else
# Config presets disabled - use default
SERVER_CONFIG_PRESET="none"
say INFO "Server configuration presets disabled - using default settings"
fi
}

View File

@@ -1,170 +0,0 @@
# Setup defaults and template-backed constants for setup.sh
# setup.sh -> scripts/bash/lib/common.sh (shared helpers)
source "$SCRIPT_DIR/scripts/bash/lib/common.sh"
# Feature Flags
# Set to 0 to disable server configuration preset selection
ENABLE_CONFIG_PRESETS="${ENABLE_CONFIG_PRESETS:-0}"
sanitize_project_name(){
project_name::sanitize "$1"
}
resolve_project_image_tag(){
local project="$1" tag="$2"
echo "${project}:${tag}"
}
declare -A TEMPLATE_VALUE_MAP=(
[DEFAULT_MYSQL_PASSWORD]=MYSQL_ROOT_PASSWORD
[DEFAULT_REALM_PORT]=WORLD_EXTERNAL_PORT
[DEFAULT_AUTH_PORT]=AUTH_EXTERNAL_PORT
[DEFAULT_SOAP_PORT]=SOAP_EXTERNAL_PORT
[DEFAULT_MYSQL_PORT]=MYSQL_EXTERNAL_PORT
[DEFAULT_PLAYERBOT_MIN]=PLAYERBOT_MIN_BOTS
[DEFAULT_PLAYERBOT_MAX]=PLAYERBOT_MAX_BOTS
[DEFAULT_LOCAL_STORAGE]=STORAGE_PATH
[DEFAULT_BACKUP_PATH]=BACKUP_PATH
[DEFAULT_COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED]=COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED
[DEFAULT_COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED]=COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED
[PERMISSION_LOCAL_USER]=DEFAULT_PERMISSION_LOCAL_USER
[PERMISSION_NFS_USER]=DEFAULT_PERMISSION_NFS_USER
[DEFAULT_CUSTOM_UID]=DEFAULT_CUSTOM_UID
[DEFAULT_CUSTOM_GID]=DEFAULT_CUSTOM_GID
[DEFAULT_LOCAL_ADDRESS]=SERVER_ADDRESS
[DEFAULT_BACKUP_DAYS]=BACKUP_RETENTION_DAYS
[DEFAULT_BACKUP_HOURS]=BACKUP_RETENTION_HOURS
[DEFAULT_BACKUP_TIME]=BACKUP_DAILY_TIME
[DEFAULT_BACKUP_HEALTHCHECK_MAX_MINUTES]=BACKUP_HEALTHCHECK_MAX_MINUTES
[DEFAULT_BACKUP_HEALTHCHECK_GRACE_SECONDS]=BACKUP_HEALTHCHECK_GRACE_SECONDS
[DEFAULT_NFS_STORAGE]=DEFAULT_NFS_STORAGE_PATH
[DEFAULT_MOUNT_STORAGE]=DEFAULT_MOUNT_STORAGE_PATH
[DEFAULT_MYSQL_IMAGE]=MYSQL_IMAGE
[DEFAULT_AC_DB_IMPORT_IMAGE]=AC_DB_IMPORT_IMAGE
[DEFAULT_AC_AUTHSERVER_IMAGE]=AC_AUTHSERVER_IMAGE
[DEFAULT_AC_WORLDSERVER_IMAGE]=AC_WORLDSERVER_IMAGE
[DEFAULT_AC_CLIENT_DATA_IMAGE]=AC_CLIENT_DATA_IMAGE
[DEFAULT_DOCKER_IMAGE_TAG]=DOCKER_IMAGE_TAG
[DEFAULT_AUTHSERVER_IMAGE_BASE]=AC_AUTHSERVER_IMAGE_BASE
[DEFAULT_WORLDSERVER_IMAGE_BASE]=AC_WORLDSERVER_IMAGE_BASE
[DEFAULT_DB_IMPORT_IMAGE_BASE]=AC_DB_IMPORT_IMAGE_BASE
[DEFAULT_CLIENT_DATA_IMAGE_BASE]=AC_CLIENT_DATA_IMAGE_BASE
[DEFAULT_AUTH_IMAGE_PLAYERBOTS]=AC_AUTHSERVER_IMAGE_PLAYERBOTS
[DEFAULT_WORLD_IMAGE_PLAYERBOTS]=AC_WORLDSERVER_IMAGE_PLAYERBOTS
[DEFAULT_CLIENT_DATA_IMAGE_PLAYERBOTS]=AC_CLIENT_DATA_IMAGE_PLAYERBOTS
[DEFAULT_AUTH_IMAGE_MODULES]=AC_AUTHSERVER_IMAGE_MODULES
[DEFAULT_WORLD_IMAGE_MODULES]=AC_WORLDSERVER_IMAGE_MODULES
[DEFAULT_ALPINE_GIT_IMAGE]=ALPINE_GIT_IMAGE
[DEFAULT_ALPINE_IMAGE]=ALPINE_IMAGE
[DEFAULT_DB_AUTH_NAME]=DB_AUTH_NAME
[DEFAULT_DB_WORLD_NAME]=DB_WORLD_NAME
[DEFAULT_DB_CHARACTERS_NAME]=DB_CHARACTERS_NAME
[DEFAULT_DB_PLAYERBOTS_NAME]=DB_PLAYERBOTS_NAME
[DEFAULT_CONTAINER_MYSQL]=CONTAINER_MYSQL
[DEFAULT_CONTAINER_DB_IMPORT]=CONTAINER_DB_IMPORT
[DEFAULT_CONTAINER_DB_INIT]=CONTAINER_DB_INIT
[DEFAULT_CONTAINER_BACKUP]=CONTAINER_BACKUP
[DEFAULT_CONTAINER_MODULES]=CONTAINER_MODULES
[DEFAULT_CONTAINER_POST_INSTALL]=CONTAINER_POST_INSTALL
[DEFAULT_COMPOSE_PROJECT_NAME]=COMPOSE_PROJECT_NAME
[DEFAULT_CLIENT_DATA_PATH]=CLIENT_DATA_PATH
[DEFAULT_CLIENT_DATA_CACHE_PATH]=CLIENT_DATA_CACHE_PATH
[DEFAULT_CLIENT_DATA_VERSION]=CLIENT_DATA_VERSION
[DEFAULT_NETWORK_NAME]=NETWORK_NAME
[DEFAULT_NETWORK_SUBNET]=NETWORK_SUBNET
[DEFAULT_NETWORK_GATEWAY]=NETWORK_GATEWAY
[DEFAULT_MYSQL_CHARACTER_SET]=MYSQL_CHARACTER_SET
[DEFAULT_MYSQL_COLLATION]=MYSQL_COLLATION
[DEFAULT_MYSQL_MAX_CONNECTIONS]=MYSQL_MAX_CONNECTIONS
[DEFAULT_MYSQL_INNODB_BUFFER_POOL_SIZE]=MYSQL_INNODB_BUFFER_POOL_SIZE
[DEFAULT_MYSQL_INNODB_LOG_FILE_SIZE]=MYSQL_INNODB_LOG_FILE_SIZE
[DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY]=MYSQL_INNODB_REDO_LOG_CAPACITY
[DEFAULT_MYSQL_RUNTIME_TMPFS_SIZE]=MYSQL_RUNTIME_TMPFS_SIZE
[DEFAULT_MYSQL_DISABLE_BINLOG]=MYSQL_DISABLE_BINLOG
[DEFAULT_MYSQL_CONFIG_DIR]=MYSQL_CONFIG_DIR
[DEFAULT_MYSQL_HOST]=MYSQL_HOST
[DEFAULT_DB_WAIT_RETRIES]=DB_WAIT_RETRIES
[DEFAULT_DB_WAIT_SLEEP]=DB_WAIT_SLEEP
[DEFAULT_DB_RECONNECT_SECONDS]=DB_RECONNECT_SECONDS
[DEFAULT_DB_RECONNECT_ATTEMPTS]=DB_RECONNECT_ATTEMPTS
[DEFAULT_DB_UPDATES_ALLOWED_MODULES]=DB_UPDATES_ALLOWED_MODULES
[DEFAULT_DB_UPDATES_REDUNDANCY]=DB_UPDATES_REDUNDANCY
[DEFAULT_DB_LOGIN_WORKER_THREADS]=DB_LOGIN_WORKER_THREADS
[DEFAULT_DB_WORLD_WORKER_THREADS]=DB_WORLD_WORKER_THREADS
[DEFAULT_DB_CHARACTER_WORKER_THREADS]=DB_CHARACTER_WORKER_THREADS
[DEFAULT_DB_LOGIN_SYNCH_THREADS]=DB_LOGIN_SYNCH_THREADS
[DEFAULT_DB_WORLD_SYNCH_THREADS]=DB_WORLD_SYNCH_THREADS
[DEFAULT_DB_CHARACTER_SYNCH_THREADS]=DB_CHARACTER_SYNCH_THREADS
[DEFAULT_HOST_ZONEINFO_PATH]=HOST_ZONEINFO_PATH
[DEFAULT_ELUNA_SCRIPT_PATH]=AC_ELUNA_SCRIPT_PATH
[DEFAULT_PMA_EXTERNAL_PORT]=PMA_EXTERNAL_PORT
[DEFAULT_PMA_UPLOAD_LIMIT]=PMA_UPLOAD_LIMIT
[DEFAULT_PMA_MEMORY_LIMIT]=PMA_MEMORY_LIMIT
[DEFAULT_PMA_MAX_EXECUTION_TIME]=PMA_MAX_EXECUTION_TIME
[DEFAULT_KEIRA3_EXTERNAL_PORT]=KEIRA3_EXTERNAL_PORT
[DEFAULT_PMA_USER]=PMA_USER
[DEFAULT_PMA_ARBITRARY]=PMA_ARBITRARY
[DEFAULT_PMA_ABSOLUTE_URI]=PMA_ABSOLUTE_URI
[DEFAULT_AUTH_INTERNAL_PORT]=AUTH_PORT
[DEFAULT_WORLD_INTERNAL_PORT]=WORLD_PORT
[DEFAULT_SOAP_INTERNAL_PORT]=SOAP_PORT
[DEFAULT_MYSQL_INTERNAL_PORT]=MYSQL_PORT
[DEFAULT_TZ]=TZ
[DEFAULT_MYSQL_ROOT_HOST]=MYSQL_ROOT_HOST
[DEFAULT_MYSQL_USER]=MYSQL_USER
[DEFAULT_ELUNA_ENABLED]=AC_ELUNA_ENABLED
[DEFAULT_ELUNA_TRACE_BACK]=AC_ELUNA_TRACE_BACK
[DEFAULT_ELUNA_AUTO_RELOAD]=AC_ELUNA_AUTO_RELOAD
[DEFAULT_ELUNA_BYTECODE_CACHE]=AC_ELUNA_BYTECODE_CACHE
[DEFAULT_ELUNA_AUTO_RELOAD_INTERVAL]=AC_ELUNA_AUTO_RELOAD_INTERVAL
[DEFAULT_ELUNA_REQUIRE_PATHS]=AC_ELUNA_REQUIRE_PATHS
[DEFAULT_ELUNA_REQUIRE_CPATHS]=AC_ELUNA_REQUIRE_CPATHS
[DEFAULT_MODULE_ELUNA]=MODULE_ELUNA
)
for __template_var in "${!TEMPLATE_VALUE_MAP[@]}"; do
__template_key="${TEMPLATE_VALUE_MAP[$__template_var]}"
__template_value="$(get_template_value "${__template_key}")"
printf -v "${__template_var}" '%s' "${__template_value}"
readonly "${__template_var}"
done
unset __template_var __template_key __template_value
# Static values
readonly DEFAULT_FALLBACK_LAN_IP="192.168.1.100"
readonly DEFAULT_DOMAIN_PLACEHOLDER="your-domain.com"
# Module preset names (not in template)
readonly DEFAULT_PRESET_SUGGESTED="suggested-modules"
readonly DEFAULT_PRESET_PLAYERBOTS="suggested-modules-playerbots"
# Health check configuration (loaded via loop)
readonly -a HEALTHCHECK_KEYS=(
MYSQL_HEALTHCHECK_INTERVAL
MYSQL_HEALTHCHECK_TIMEOUT
MYSQL_HEALTHCHECK_RETRIES
MYSQL_HEALTHCHECK_START_PERIOD
AUTH_HEALTHCHECK_INTERVAL
AUTH_HEALTHCHECK_TIMEOUT
AUTH_HEALTHCHECK_RETRIES
AUTH_HEALTHCHECK_START_PERIOD
WORLD_HEALTHCHECK_INTERVAL
WORLD_HEALTHCHECK_TIMEOUT
WORLD_HEALTHCHECK_RETRIES
WORLD_HEALTHCHECK_START_PERIOD
BACKUP_HEALTHCHECK_INTERVAL
BACKUP_HEALTHCHECK_TIMEOUT
BACKUP_HEALTHCHECK_RETRIES
BACKUP_HEALTHCHECK_START_PERIOD
)
for __hc_key in "${HEALTHCHECK_KEYS[@]}"; do
__hc_value="$(get_template_value "${__hc_key}")"
printf -v "DEFAULT_${__hc_key}" '%s' "$__hc_value"
readonly "DEFAULT_${__hc_key}"
done
unset __hc_key __hc_value
# Route detection IP (not in template)
readonly ROUTE_DETECTION_IP="1.1.1.1"

View File

@@ -1,300 +0,0 @@
# .env rendering helpers for setup.sh
setup_write_env() {
local ENV_OUT="$(dirname "$0")/.env"
if [ -f "$ENV_OUT" ]; then
say WARNING ".env already exists at $(realpath "$ENV_OUT" 2>/dev/null || echo "$ENV_OUT"). It will be overwritten."
local cont
if [ "$FORCE_OVERWRITE" = "1" ]; then
cont=1
else
cont=$(ask_yn "Continue and overwrite?" n)
fi
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
fi
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
MYSQL_RUNTIME_TMPFS_SIZE=${MYSQL_RUNTIME_TMPFS_SIZE:-$DEFAULT_MYSQL_RUNTIME_TMPFS_SIZE}
COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED=${COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED:-$DEFAULT_COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED}
COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=${COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED:-$DEFAULT_COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED}
MYSQL_DISABLE_BINLOG=${MYSQL_DISABLE_BINLOG:-$DEFAULT_MYSQL_DISABLE_BINLOG}
MYSQL_CONFIG_DIR=${MYSQL_CONFIG_DIR:-$DEFAULT_MYSQL_CONFIG_DIR}
CLIENT_DATA_PATH=${CLIENT_DATA_PATH:-$DEFAULT_CLIENT_DATA_PATH}
BACKUP_HEALTHCHECK_MAX_MINUTES=${BACKUP_HEALTHCHECK_MAX_MINUTES:-$DEFAULT_BACKUP_HEALTHCHECK_MAX_MINUTES}
BACKUP_HEALTHCHECK_GRACE_SECONDS=${BACKUP_HEALTHCHECK_GRACE_SECONDS:-$DEFAULT_BACKUP_HEALTHCHECK_GRACE_SECONDS}
DB_WAIT_RETRIES=${DB_WAIT_RETRIES:-$DEFAULT_DB_WAIT_RETRIES}
DB_WAIT_SLEEP=${DB_WAIT_SLEEP:-$DEFAULT_DB_WAIT_SLEEP}
DB_RECONNECT_SECONDS=${DB_RECONNECT_SECONDS:-$DEFAULT_DB_RECONNECT_SECONDS}
DB_RECONNECT_ATTEMPTS=${DB_RECONNECT_ATTEMPTS:-$DEFAULT_DB_RECONNECT_ATTEMPTS}
DB_UPDATES_ALLOWED_MODULES=${DB_UPDATES_ALLOWED_MODULES:-$DEFAULT_DB_UPDATES_ALLOWED_MODULES}
DB_UPDATES_REDUNDANCY=${DB_UPDATES_REDUNDANCY:-$DEFAULT_DB_UPDATES_REDUNDANCY}
DB_LOGIN_WORKER_THREADS=${DB_LOGIN_WORKER_THREADS:-$DEFAULT_DB_LOGIN_WORKER_THREADS}
DB_WORLD_WORKER_THREADS=${DB_WORLD_WORKER_THREADS:-$DEFAULT_DB_WORLD_WORKER_THREADS}
DB_CHARACTER_WORKER_THREADS=${DB_CHARACTER_WORKER_THREADS:-$DEFAULT_DB_CHARACTER_WORKER_THREADS}
DB_LOGIN_SYNCH_THREADS=${DB_LOGIN_SYNCH_THREADS:-$DEFAULT_DB_LOGIN_SYNCH_THREADS}
DB_WORLD_SYNCH_THREADS=${DB_WORLD_SYNCH_THREADS:-$DEFAULT_DB_WORLD_SYNCH_THREADS}
DB_CHARACTER_SYNCH_THREADS=${DB_CHARACTER_SYNCH_THREADS:-$DEFAULT_DB_CHARACTER_SYNCH_THREADS}
MYSQL_HEALTHCHECK_INTERVAL=${MYSQL_HEALTHCHECK_INTERVAL:-$DEFAULT_MYSQL_HEALTHCHECK_INTERVAL}
MYSQL_HEALTHCHECK_TIMEOUT=${MYSQL_HEALTHCHECK_TIMEOUT:-$DEFAULT_MYSQL_HEALTHCHECK_TIMEOUT}
MYSQL_HEALTHCHECK_RETRIES=${MYSQL_HEALTHCHECK_RETRIES:-$DEFAULT_MYSQL_HEALTHCHECK_RETRIES}
MYSQL_HEALTHCHECK_START_PERIOD=${MYSQL_HEALTHCHECK_START_PERIOD:-$DEFAULT_MYSQL_HEALTHCHECK_START_PERIOD}
AUTH_HEALTHCHECK_INTERVAL=${AUTH_HEALTHCHECK_INTERVAL:-$DEFAULT_AUTH_HEALTHCHECK_INTERVAL}
AUTH_HEALTHCHECK_TIMEOUT=${AUTH_HEALTHCHECK_TIMEOUT:-$DEFAULT_AUTH_HEALTHCHECK_TIMEOUT}
AUTH_HEALTHCHECK_RETRIES=${AUTH_HEALTHCHECK_RETRIES:-$DEFAULT_AUTH_HEALTHCHECK_RETRIES}
AUTH_HEALTHCHECK_START_PERIOD=${AUTH_HEALTHCHECK_START_PERIOD:-$DEFAULT_AUTH_HEALTHCHECK_START_PERIOD}
WORLD_HEALTHCHECK_INTERVAL=${WORLD_HEALTHCHECK_INTERVAL:-$DEFAULT_WORLD_HEALTHCHECK_INTERVAL}
WORLD_HEALTHCHECK_TIMEOUT=${WORLD_HEALTHCHECK_TIMEOUT:-$DEFAULT_WORLD_HEALTHCHECK_TIMEOUT}
WORLD_HEALTHCHECK_RETRIES=${WORLD_HEALTHCHECK_RETRIES:-$DEFAULT_WORLD_HEALTHCHECK_RETRIES}
WORLD_HEALTHCHECK_START_PERIOD=${WORLD_HEALTHCHECK_START_PERIOD:-$DEFAULT_WORLD_HEALTHCHECK_START_PERIOD}
for hc_key in "${HEALTHCHECK_KEYS[@]}"; do
default_var="DEFAULT_${hc_key}"
printf -v "$hc_key" '%s' "${!hc_key:-${!default_var}}"
done
unset hc_key default_var
MODULE_ELUNA=${MODULE_ELUNA:-$DEFAULT_MODULE_ELUNA}
BACKUP_PATH=${BACKUP_PATH:-$DEFAULT_BACKUP_PATH}
local project_image_prefix
project_image_prefix="$(sanitize_project_name "$DEFAULT_PROJECT_NAME")"
if [ "$STACK_IMAGE_MODE" = "playerbots" ]; then
AC_AUTHSERVER_IMAGE_PLAYERBOTS_VALUE="$(resolve_project_image_tag "$project_image_prefix" "authserver-playerbots")"
AC_WORLDSERVER_IMAGE_PLAYERBOTS_VALUE="$(resolve_project_image_tag "$project_image_prefix" "worldserver-playerbots")"
AC_DB_IMPORT_IMAGE_VALUE="$(resolve_project_image_tag "$project_image_prefix" "db-import-playerbots")"
AC_CLIENT_DATA_IMAGE_PLAYERBOTS_VALUE="$(resolve_project_image_tag "$project_image_prefix" "client-data-playerbots")"
else
AC_AUTHSERVER_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_AUTH_IMAGE_PLAYERBOTS"
AC_WORLDSERVER_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_WORLD_IMAGE_PLAYERBOTS"
AC_DB_IMPORT_IMAGE_VALUE="$DEFAULT_AC_DB_IMPORT_IMAGE"
AC_CLIENT_DATA_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_CLIENT_DATA_IMAGE_PLAYERBOTS"
fi
AC_AUTHSERVER_IMAGE_MODULES_VALUE="$(resolve_project_image_tag "$project_image_prefix" "authserver-modules-latest")"
AC_WORLDSERVER_IMAGE_MODULES_VALUE="$(resolve_project_image_tag "$project_image_prefix" "worldserver-modules-latest")"
{
cat <<EOF
# Generated by setup.sh
# Compose overrides (set to 1 to include matching file under compose-overrides/)
# mysql-expose.yml -> exposes MySQL externally via COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED
# worldserver-debug-logging.yml -> raises log verbosity via COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED
COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED=$COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED
COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=$COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED
COMPOSE_PROJECT_NAME=$DEFAULT_PROJECT_NAME
STORAGE_PATH=$STORAGE_PATH
STORAGE_PATH_LOCAL=$LOCAL_STORAGE_ROOT
STORAGE_CONFIG_PATH=$(get_template_value "STORAGE_CONFIG_PATH")
STORAGE_LOGS_PATH=$(get_template_value "STORAGE_LOGS_PATH")
STORAGE_MODULES_PATH=$(get_template_value "STORAGE_MODULES_PATH")
STORAGE_LUA_SCRIPTS_PATH=$(get_template_value "STORAGE_LUA_SCRIPTS_PATH")
STORAGE_MODULES_META_PATH=$(get_template_value "STORAGE_MODULES_META_PATH")
STORAGE_MODULE_SQL_PATH=$(get_template_value "STORAGE_MODULE_SQL_PATH")
STORAGE_INSTALL_MARKERS_PATH=$(get_template_value "STORAGE_INSTALL_MARKERS_PATH")
STORAGE_CLIENT_DATA_PATH=$(get_template_value "STORAGE_CLIENT_DATA_PATH")
STORAGE_LOCAL_SOURCE_PATH=$(get_template_value "STORAGE_LOCAL_SOURCE_PATH")
BACKUP_PATH=$BACKUP_PATH
TZ=$DEFAULT_TZ
# Database
MYSQL_IMAGE=$DEFAULT_MYSQL_IMAGE
MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD
MYSQL_ROOT_HOST=$DEFAULT_MYSQL_ROOT_HOST
MYSQL_USER=$DEFAULT_MYSQL_USER
MYSQL_PORT=$DEFAULT_MYSQL_INTERNAL_PORT
MYSQL_EXTERNAL_PORT=$MYSQL_EXTERNAL_PORT
MYSQL_DISABLE_BINLOG=${MYSQL_DISABLE_BINLOG:-$DEFAULT_MYSQL_DISABLE_BINLOG}
MYSQL_CONFIG_DIR=${MYSQL_CONFIG_DIR:-$DEFAULT_MYSQL_CONFIG_DIR}
MYSQL_CHARACTER_SET=$DEFAULT_MYSQL_CHARACTER_SET
MYSQL_COLLATION=$DEFAULT_MYSQL_COLLATION
MYSQL_MAX_CONNECTIONS=$DEFAULT_MYSQL_MAX_CONNECTIONS
MYSQL_INNODB_BUFFER_POOL_SIZE=$DEFAULT_MYSQL_INNODB_BUFFER_POOL_SIZE
MYSQL_INNODB_LOG_FILE_SIZE=$DEFAULT_MYSQL_INNODB_LOG_FILE_SIZE
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
MYSQL_RUNTIME_TMPFS_SIZE=${MYSQL_RUNTIME_TMPFS_SIZE:-$DEFAULT_MYSQL_RUNTIME_TMPFS_SIZE}
MYSQL_HOST=$DEFAULT_MYSQL_HOST
DB_WAIT_RETRIES=$DB_WAIT_RETRIES
DB_WAIT_SLEEP=$DB_WAIT_SLEEP
DB_AUTH_NAME=$DEFAULT_DB_AUTH_NAME
DB_WORLD_NAME=$DEFAULT_DB_WORLD_NAME
DB_CHARACTERS_NAME=$DEFAULT_DB_CHARACTERS_NAME
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
AC_DB_IMPORT_IMAGE=$AC_DB_IMPORT_IMAGE_VALUE
# Database Import Settings
DB_RECONNECT_SECONDS=$DB_RECONNECT_SECONDS
DB_RECONNECT_ATTEMPTS=$DB_RECONNECT_ATTEMPTS
DB_UPDATES_ALLOWED_MODULES=$DB_UPDATES_ALLOWED_MODULES
DB_UPDATES_REDUNDANCY=$DB_UPDATES_REDUNDANCY
DB_LOGIN_WORKER_THREADS=$DB_LOGIN_WORKER_THREADS
DB_WORLD_WORKER_THREADS=$DB_WORLD_WORKER_THREADS
DB_CHARACTER_WORKER_THREADS=$DB_CHARACTER_WORKER_THREADS
DB_LOGIN_SYNCH_THREADS=$DB_LOGIN_SYNCH_THREADS
DB_WORLD_SYNCH_THREADS=$DB_WORLD_SYNCH_THREADS
DB_CHARACTER_SYNCH_THREADS=$DB_CHARACTER_SYNCH_THREADS
# Services (images)
AC_AUTHSERVER_IMAGE=$DEFAULT_AC_AUTHSERVER_IMAGE
AC_WORLDSERVER_IMAGE=$DEFAULT_AC_WORLDSERVER_IMAGE
AC_AUTHSERVER_IMAGE_PLAYERBOTS=${AC_AUTHSERVER_IMAGE_PLAYERBOTS_VALUE}
AC_WORLDSERVER_IMAGE_PLAYERBOTS=${AC_WORLDSERVER_IMAGE_PLAYERBOTS_VALUE}
AC_AUTHSERVER_IMAGE_MODULES=${AC_AUTHSERVER_IMAGE_MODULES_VALUE}
AC_WORLDSERVER_IMAGE_MODULES=${AC_WORLDSERVER_IMAGE_MODULES_VALUE}
# Client data images
AC_CLIENT_DATA_IMAGE=$DEFAULT_AC_CLIENT_DATA_IMAGE
AC_CLIENT_DATA_IMAGE_PLAYERBOTS=$AC_CLIENT_DATA_IMAGE_PLAYERBOTS_VALUE
CLIENT_DATA_CACHE_PATH=$DEFAULT_CLIENT_DATA_CACHE_PATH
CLIENT_DATA_PATH=$CLIENT_DATA_PATH
# Build artifacts
DOCKER_IMAGE_TAG=$DEFAULT_DOCKER_IMAGE_TAG
AC_AUTHSERVER_IMAGE_BASE=$DEFAULT_AUTHSERVER_IMAGE_BASE
AC_WORLDSERVER_IMAGE_BASE=$DEFAULT_WORLDSERVER_IMAGE_BASE
AC_DB_IMPORT_IMAGE_BASE=$DEFAULT_DB_IMPORT_IMAGE_BASE
AC_CLIENT_DATA_IMAGE_BASE=$DEFAULT_CLIENT_DATA_IMAGE_BASE
# Container user
CONTAINER_USER=$CONTAINER_USER
# Containers
CONTAINER_MYSQL=$DEFAULT_CONTAINER_MYSQL
CONTAINER_DB_IMPORT=$DEFAULT_CONTAINER_DB_IMPORT
CONTAINER_DB_INIT=$DEFAULT_CONTAINER_DB_INIT
CONTAINER_DB_GUARD=$(get_template_value "CONTAINER_DB_GUARD")
CONTAINER_BACKUP=$DEFAULT_CONTAINER_BACKUP
CONTAINER_MODULES=$DEFAULT_CONTAINER_MODULES
CONTAINER_POST_INSTALL=$DEFAULT_CONTAINER_POST_INSTALL
# Database Guard Defaults
DB_GUARD_RECHECK_SECONDS=$(get_template_value "DB_GUARD_RECHECK_SECONDS")
DB_GUARD_RETRY_SECONDS=$(get_template_value "DB_GUARD_RETRY_SECONDS")
DB_GUARD_WAIT_ATTEMPTS=$(get_template_value "DB_GUARD_WAIT_ATTEMPTS")
DB_GUARD_HEALTH_MAX_AGE=$(get_template_value "DB_GUARD_HEALTH_MAX_AGE")
DB_GUARD_HEALTHCHECK_INTERVAL=$(get_template_value "DB_GUARD_HEALTHCHECK_INTERVAL")
DB_GUARD_HEALTHCHECK_TIMEOUT=$(get_template_value "DB_GUARD_HEALTHCHECK_TIMEOUT")
DB_GUARD_HEALTHCHECK_RETRIES=$(get_template_value "DB_GUARD_HEALTHCHECK_RETRIES")
DB_GUARD_VERIFY_INTERVAL_SECONDS=$(get_template_value "DB_GUARD_VERIFY_INTERVAL_SECONDS")
# Module SQL staging
STAGE_PATH_MODULE_SQL=$(get_template_value "STAGE_PATH_MODULE_SQL")
# Modules rebuild source path
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH
# SQL Source Overlay
SOURCE_DIR=$(get_template_value "SOURCE_DIR")
AC_SQL_SOURCE_PATH=$(get_template_value "AC_SQL_SOURCE_PATH")
# Ports
AUTH_EXTERNAL_PORT=$AUTH_EXTERNAL_PORT
AUTH_PORT=$DEFAULT_AUTH_INTERNAL_PORT
WORLD_EXTERNAL_PORT=$REALM_PORT
WORLD_PORT=$DEFAULT_WORLD_INTERNAL_PORT
SOAP_EXTERNAL_PORT=$SOAP_EXTERNAL_PORT
SOAP_PORT=$DEFAULT_SOAP_INTERNAL_PORT
# Realm
SERVER_ADDRESS=$SERVER_ADDRESS
REALM_PORT=$REALM_PORT
# Backups
BACKUP_RETENTION_DAYS=$BACKUP_RETENTION_DAYS
BACKUP_RETENTION_HOURS=$BACKUP_RETENTION_HOURS
BACKUP_DAILY_TIME=$BACKUP_DAILY_TIME
BACKUP_INTERVAL_MINUTES=$(get_template_value "BACKUP_INTERVAL_MINUTES")
BACKUP_EXTRA_DATABASES=$(get_template_value "BACKUP_EXTRA_DATABASES")
BACKUP_HEALTHCHECK_MAX_MINUTES=$BACKUP_HEALTHCHECK_MAX_MINUTES
BACKUP_HEALTHCHECK_GRACE_SECONDS=$BACKUP_HEALTHCHECK_GRACE_SECONDS
EOF
echo
echo "# Modules"
for module_key in "${MODULE_KEYS[@]}"; do
local module_value="${!module_key:-0}"
# Only write enabled modules (value=1) to .env
if [ "$module_value" = "1" ]; then
printf "%s=%s\n" "$module_key" "$module_value"
fi
done
cat <<EOF
# Client data
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
# Server configuration
SERVER_CONFIG_PRESET=$SERVER_CONFIG_PRESET
# Playerbot runtime
PLAYERBOT_ENABLED=$PLAYERBOT_ENABLED
PLAYERBOT_MIN_BOTS=$PLAYERBOT_MIN_BOTS
PLAYERBOT_MAX_BOTS=$PLAYERBOT_MAX_BOTS
STACK_IMAGE_MODE=$STACK_IMAGE_MODE
STACK_SOURCE_VARIANT=$STACK_SOURCE_VARIANT
MODULES_ENABLED_LIST=$MODULES_ENABLED_LIST
MODULES_CPP_LIST=$MODULES_CPP_LIST
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
# Eluna
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
AC_ELUNA_SCRIPT_PATH=$DEFAULT_ELUNA_SCRIPT_PATH
AC_ELUNA_REQUIRE_PATHS=$DEFAULT_ELUNA_REQUIRE_PATHS
AC_ELUNA_REQUIRE_CPATHS=$DEFAULT_ELUNA_REQUIRE_CPATHS
AC_ELUNA_AUTO_RELOAD_INTERVAL=$DEFAULT_ELUNA_AUTO_RELOAD_INTERVAL
# Tools
PMA_HOST=$DEFAULT_CONTAINER_MYSQL
PMA_PORT=$DEFAULT_MYSQL_INTERNAL_PORT
PMA_USER=$DEFAULT_PMA_USER
PMA_EXTERNAL_PORT=$DEFAULT_PMA_EXTERNAL_PORT
PMA_ARBITRARY=$DEFAULT_PMA_ARBITRARY
PMA_ABSOLUTE_URI=$DEFAULT_PMA_ABSOLUTE_URI
PMA_UPLOAD_LIMIT=$DEFAULT_PMA_UPLOAD_LIMIT
PMA_MEMORY_LIMIT=$DEFAULT_PMA_MEMORY_LIMIT
PMA_MAX_EXECUTION_TIME=$DEFAULT_PMA_MAX_EXECUTION_TIME
KEIRA3_EXTERNAL_PORT=$DEFAULT_KEIRA3_EXTERNAL_PORT
KEIRA_DATABASE_HOST=$DEFAULT_CONTAINER_MYSQL
KEIRA_DATABASE_PORT=$DEFAULT_MYSQL_INTERNAL_PORT
# Health checks
EOF
for hc_key in "${HEALTHCHECK_KEYS[@]}"; do
printf "%s=%s\n" "$hc_key" "${!hc_key}"
done
cat <<EOF
# Networking
NETWORK_NAME=$DEFAULT_NETWORK_NAME
NETWORK_SUBNET=$DEFAULT_NETWORK_SUBNET
NETWORK_GATEWAY=$DEFAULT_NETWORK_GATEWAY
# Storage helpers
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
# Helper images
ALPINE_GIT_IMAGE=$DEFAULT_ALPINE_GIT_IMAGE
ALPINE_IMAGE=$DEFAULT_ALPINE_IMAGE
EOF
} > "$ENV_OUT"
local staging_modules_dir="${LOCAL_STORAGE_ROOT_ABS}/modules"
mkdir -p "$staging_modules_dir"
local module_state_string=""
for module_state_var in "${MODULE_KEYS[@]}"; do
local module_value="${!module_state_var:-0}"
module_state_string+="${module_state_var}=${module_value}|"
done
printf '%s' "$module_state_string" > "${staging_modules_dir}/.modules_state"
if [ "$NEEDS_CXX_REBUILD" != "1" ]; then
rm -f "${staging_modules_dir}/.requires_rebuild" 2>/dev/null || true
fi
say SUCCESS ".env written to $ENV_OUT"
}

View File

@@ -1,451 +0,0 @@
# Module selection workflow for setup.sh
setup_select_modules() {
local MODE_SELECTION=""
local MODE_PRESET_NAME=""
declare -A MODULE_PRESET_CONFIGS=()
declare -A MODULE_PRESET_LABELS=()
declare -A MODULE_PRESET_DESCRIPTIONS=()
declare -A MODULE_PRESET_ORDER=()
local CONFIG_DIR="$SCRIPT_DIR/config/module-profiles"
if [ ! -x "$MODULE_PROFILES_HELPER" ]; then
say ERROR "Profile helper not found or not executable at $MODULE_PROFILES_HELPER"
exit 1
fi
if [ -d "$CONFIG_DIR" ]; then
while IFS=$'\t' read -r preset_name preset_modules preset_label preset_desc preset_order; do
[ -n "$preset_name" ] || continue
MODULE_PRESET_CONFIGS["$preset_name"]="$preset_modules"
MODULE_PRESET_LABELS["$preset_name"]="$preset_label"
MODULE_PRESET_DESCRIPTIONS["$preset_name"]="$preset_desc"
MODULE_PRESET_ORDER["$preset_name"]="${preset_order:-10000}"
done < <(python3 "$MODULE_PROFILES_HELPER" list "$CONFIG_DIR")
fi
local missing_presets=0
for required_preset in "$DEFAULT_PRESET_SUGGESTED" "$DEFAULT_PRESET_PLAYERBOTS"; do
if [ -z "${MODULE_PRESET_CONFIGS[$required_preset]:-}" ]; then
say ERROR "Missing module preset config/module-profiles/${required_preset}.json"
missing_presets=1
fi
done
if [ "$missing_presets" -eq 1 ]; then
exit 1
fi
if [ -n "$CLI_MODULE_PRESET" ]; then
if [ -n "${MODULE_PRESET_CONFIGS[$CLI_MODULE_PRESET]:-}" ]; then
MODE_SELECTION="preset"
MODE_PRESET_NAME="$CLI_MODULE_PRESET"
else
say ERROR "Unknown module preset: $CLI_MODULE_PRESET"
exit 1
fi
fi
if [ -n "$MODE_SELECTION" ] && [ "$MODE_SELECTION" != "preset" ]; then
MODE_PRESET_NAME=""
fi
if [ -n "$CLI_MODULE_MODE" ]; then
case "${CLI_MODULE_MODE,,}" in
1|suggested) MODE_SELECTION=1 ;;
2|playerbots) MODE_SELECTION=2 ;;
3|manual) MODE_SELECTION=3 ;;
4|none) MODE_SELECTION=4 ;;
*) say ERROR "Invalid module mode: ${CLI_MODULE_MODE}"; exit 1 ;;
esac
if [ "$MODE_SELECTION" = "1" ]; then
MODE_PRESET_NAME="$DEFAULT_PRESET_SUGGESTED"
elif [ "$MODE_SELECTION" = "2" ]; then
MODE_PRESET_NAME="$DEFAULT_PRESET_PLAYERBOTS"
fi
fi
if [ -z "$MODE_SELECTION" ] && [ ${#MODULE_ENABLE_SET[@]} -gt 0 ]; then
MODE_SELECTION=3
fi
if [ ${#MODULE_ENABLE_SET[@]} -gt 0 ] && [ -n "$MODE_SELECTION" ] && [ "$MODE_SELECTION" != "3" ] && [ "$MODE_SELECTION" != "4" ]; then
say INFO "Switching module preset to manual to honor --enable-modules list."
MODE_SELECTION=3
fi
if [ "$MODE_SELECTION" = "4" ] && [ ${#MODULE_ENABLE_SET[@]} -gt 0 ]; then
say ERROR "--enable-modules cannot be used together with module-mode=none."
exit 1
fi
if [ "$MODE_SELECTION" = "preset" ] && [ -n "$CLI_MODULE_PRESET" ]; then
MODE_PRESET_NAME="$CLI_MODULE_PRESET"
fi
# Function to determine source branch for a preset
get_preset_source_branch() {
local preset_name="$1"
local preset_modules="${MODULE_PRESET_CONFIGS[$preset_name]:-}"
# Check if playerbots module is in the preset
if [[ "$preset_modules" == *"MODULE_PLAYERBOTS"* ]]; then
echo "azerothcore-playerbots"
else
echo "azerothcore-wotlk"
fi
}
# Module config
say HEADER "MODULE PRESET"
printf " %s) %s\n" "1" "⭐ Suggested Modules"
printf " %s (%s)\n" "Baseline solo-friendly quality of life mix" "azerothcore-wotlk"
printf " %s) %s\n" "2" "🤖 Playerbots + Suggested modules"
printf " %s (%s)\n" "Suggested stack plus playerbots enabled" "azerothcore-playerbots"
printf " %s) %s\n" "3" "⚙️ Manual selection"
printf " %s (%s)\n" "Choose individual modules manually" "(depends on modules)"
printf " %s) %s\n" "4" "🚫 No modules"
printf " %s (%s)\n" "Pure AzerothCore with no modules" "azerothcore-wotlk"
local menu_index=5
declare -A MENU_PRESET_INDEX=()
local -a ORDERED_PRESETS=()
for preset_name in "${!MODULE_PRESET_CONFIGS[@]}"; do
if [ "$preset_name" = "$DEFAULT_PRESET_SUGGESTED" ] || [ "$preset_name" = "$DEFAULT_PRESET_PLAYERBOTS" ]; then
continue
fi
local order="${MODULE_PRESET_ORDER[$preset_name]:-10000}"
ORDERED_PRESETS+=("$(printf '%05d::%s' "$order" "$preset_name")")
done
if [ ${#ORDERED_PRESETS[@]} -gt 0 ]; then
IFS=$'\n' ORDERED_PRESETS=($(printf '%s\n' "${ORDERED_PRESETS[@]}" | sort))
fi
for entry in "${ORDERED_PRESETS[@]}"; do
local preset_name="${entry#*::}"
[ -n "${MODULE_PRESET_CONFIGS[$preset_name]:-}" ] || continue
local pretty_name preset_desc
if [ -n "${MODULE_PRESET_LABELS[$preset_name]:-}" ]; then
pretty_name="${MODULE_PRESET_LABELS[$preset_name]}"
else
pretty_name=$(echo "$preset_name" | tr '_-' ' ' | awk '{for(i=1;i<=NF;i++){$i=toupper(substr($i,1,1)) substr($i,2)}}1')
fi
preset_desc="${MODULE_PRESET_DESCRIPTIONS[$preset_name]:-No description available}"
local source_branch
source_branch=$(get_preset_source_branch "$preset_name")
printf " %s) %s\n" "$menu_index" "$pretty_name"
printf " %s (%s)\n" "$preset_desc" "$source_branch"
MENU_PRESET_INDEX[$menu_index]="$preset_name"
menu_index=$((menu_index + 1))
done
local max_option=$((menu_index - 1))
if [ "$NON_INTERACTIVE" = "1" ] && [ -z "$MODE_SELECTION" ]; then
MODE_SELECTION=1
fi
if [ -z "$MODE_SELECTION" ]; then
local selection_input
while true; do
read -p "$(echo -e "${YELLOW}🔧 Select module configuration [1-${max_option}]: ${NC}")" selection_input
if [[ "$selection_input" =~ ^[0-9]+$ ]] && [ "$selection_input" -ge 1 ] && [ "$selection_input" -le "$max_option" ]; then
if [ -n "${MENU_PRESET_INDEX[$selection_input]:-}" ]; then
MODE_SELECTION="preset"
MODE_PRESET_NAME="${MENU_PRESET_INDEX[$selection_input]}"
else
MODE_SELECTION="$selection_input"
fi
break
fi
say ERROR "Please select a number between 1 and ${max_option}"
done
else
if [ "$MODE_SELECTION" = "preset" ]; then
say INFO "Module preset set to ${MODE_PRESET_NAME}."
else
say INFO "Module preset set to ${MODE_SELECTION}."
fi
fi
local AC_AUTHSERVER_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_AUTH_IMAGE_PLAYERBOTS"
local AC_WORLDSERVER_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_WORLD_IMAGE_PLAYERBOTS"
local AC_AUTHSERVER_IMAGE_MODULES_VALUE="$DEFAULT_AUTH_IMAGE_MODULES"
local AC_WORLDSERVER_IMAGE_MODULES_VALUE="$DEFAULT_WORLD_IMAGE_MODULES"
local AC_CLIENT_DATA_IMAGE_PLAYERBOTS_VALUE="$DEFAULT_CLIENT_DATA_IMAGE_PLAYERBOTS"
local AC_DB_IMPORT_IMAGE_VALUE="$DEFAULT_AC_DB_IMPORT_IMAGE"
local mod_var
for mod_var in "${!MODULE_ENABLE_SET[@]}"; do
if [ -n "${KNOWN_MODULE_LOOKUP[$mod_var]:-}" ]; then
printf -v "$mod_var" '%s' "1"
fi
done
auto_enable_module_dependencies
ensure_module_platforms
if [ "${MODULE_OLLAMA_CHAT:-0}" = "1" ] && [ "${MODULE_PLAYERBOTS:-0}" != "1" ]; then
say INFO "Automatically enabling MODULE_PLAYERBOTS for MODULE_OLLAMA_CHAT."
MODULE_PLAYERBOTS=1
MODULE_ENABLE_SET["MODULE_PLAYERBOTS"]=1
fi
declare -A DISABLED_MODULE_REASONS=(
[MODULE_AHBOT]="Requires upstream Addmod_ahbotScripts symbol (fails link)"
[MODULE_LEVEL_GRANT]="QuestCountLevel module relies on removed ConfigMgr APIs and fails to build"
)
PLAYERBOT_ENABLED=0
PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
NEEDS_CXX_REBUILD=0
local module_mode_label=""
if [ "$MODE_SELECTION" = "1" ]; then
MODE_PRESET_NAME="$DEFAULT_PRESET_SUGGESTED"
apply_module_preset "${MODULE_PRESET_CONFIGS[$DEFAULT_PRESET_SUGGESTED]}"
local preset_label="${MODULE_PRESET_LABELS[$DEFAULT_PRESET_SUGGESTED]:-Suggested Modules}"
module_mode_label="preset 1 (${preset_label})"
elif [ "$MODE_SELECTION" = "2" ]; then
MODE_PRESET_NAME="$DEFAULT_PRESET_PLAYERBOTS"
apply_module_preset "${MODULE_PRESET_CONFIGS[$DEFAULT_PRESET_PLAYERBOTS]}"
local preset_label="${MODULE_PRESET_LABELS[$DEFAULT_PRESET_PLAYERBOTS]:-Playerbots + Suggested}"
module_mode_label="preset 2 (${preset_label})"
elif [ "$MODE_SELECTION" = "3" ]; then
MODE_PRESET_NAME=""
say INFO "Answer y/n for each module (organized by category)"
for key in "${!DISABLED_MODULE_REASONS[@]}"; do
say WARNING "${key#MODULE_}: ${DISABLED_MODULE_REASONS[$key]}"
done
local -a selection_keys=("${MODULE_KEYS_SORTED[@]}")
if [ ${#selection_keys[@]} -eq 0 ]; then
selection_keys=("${MODULE_KEYS[@]}")
fi
# Define category display order and titles
local -a category_order=(
"automation" "quality-of-life" "gameplay-enhancement" "npc-service"
"pvp" "progression" "economy" "social" "account-wide"
"customization" "scripting" "admin" "premium" "minigame"
"content" "rewards" "developer" "database" "tooling" "uncategorized"
)
declare -A category_titles=(
["automation"]="🤖 Automation"
["quality-of-life"]="✨ Quality of Life"
["gameplay-enhancement"]="⚔️ Gameplay Enhancement"
["npc-service"]="🏪 NPC Services"
["pvp"]="⚡ PvP"
["progression"]="📈 Progression"
["economy"]="💰 Economy"
["social"]="👥 Social"
["account-wide"]="👤 Account-Wide"
["customization"]="🎨 Customization"
["scripting"]="📜 Scripting"
["admin"]="🔧 Admin Tools"
["premium"]="💎 Premium/VIP"
["minigame"]="🎮 Mini-Games"
["content"]="🏰 Content"
["rewards"]="🎁 Rewards"
["developer"]="🛠️ Developer Tools"
["database"]="🗄️ Database"
["tooling"]="🔨 Tooling"
["uncategorized"]="📦 Miscellaneous"
)
declare -A processed_categories=()
render_category() {
local cat="$1"
local module_list="${modules_by_category[$cat]:-}"
[ -n "$module_list" ] || return 0
local has_valid_modules=0
local -a module_array
IFS=' ' read -ra module_array <<< "$module_list"
for key in "${module_array[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local status_lc="${MODULE_STATUS_MAP[$key],,}"
if [ -z "$status_lc" ] || [ "$status_lc" = "active" ]; then
has_valid_modules=1
break
fi
done
[ "$has_valid_modules" = "1" ] || return 0
local cat_title="${category_titles[$cat]:-$cat}"
printf '\n%b\n' "${BOLD}${CYAN}═══ ${cat_title} ═══${NC}"
local first_in_cat=1
for key in "${module_array[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local status_lc="${MODULE_STATUS_MAP[$key],,}"
if [ -n "$status_lc" ] && [ "$status_lc" != "active" ]; then
local reason="${MODULE_BLOCK_REASON_MAP[$key]:-Blocked in manifest}"
say WARNING "${key#MODULE_} is blocked: ${reason}"
printf -v "$key" '%s' "0"
continue
fi
if [ "$first_in_cat" -ne 1 ]; then
printf '\n'
fi
first_in_cat=0
local prompt_label
prompt_label="$(module_display_name "$key")"
if [ "${MODULE_NEEDS_BUILD_MAP[$key]}" = "1" ]; then
prompt_label="${prompt_label} (requires build)"
fi
local description="${MODULE_DESCRIPTION_MAP[$key]:-}"
if [ -n "$description" ]; then
printf '%b\n' "${BLUE} ${MODULE_NAME_MAP[$key]:-$key}: ${description}${NC}"
fi
local special_message="${MODULE_SPECIAL_MESSAGE_MAP[$key]:-}"
if [ -n "$special_message" ]; then
printf '%b\n' "${MAGENTA}💡 ${special_message}${NC}"
fi
local repo="${MODULE_REPO_MAP[$key]:-}"
if [ -n "$repo" ]; then
printf '%b\n' "${GREEN}🔗 ${repo}${NC}"
fi
local default_answer
default_answer="$(module_default "$key")"
local response
response=$(ask_yn "$prompt_label" "$default_answer")
if [ "$response" = "1" ]; then
printf -v "$key" '%s' "1"
else
printf -v "$key" '%s' "0"
fi
done
processed_categories["$cat"]=1
}
# Group modules by category using arrays
declare -A modules_by_category
local key
for key in "${selection_keys[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local category="${MODULE_CATEGORY_MAP[$key]:-uncategorized}"
if [ -z "${modules_by_category[$category]:-}" ]; then
modules_by_category[$category]="$key"
else
modules_by_category[$category]="${modules_by_category[$category]} $key"
fi
done
# Process modules by category (ordered, then any new categories)
local cat
for cat in "${category_order[@]}"; do
render_category "$cat"
done
for cat in "${!modules_by_category[@]}"; do
[ -n "${processed_categories[$cat]:-}" ] && continue
render_category "$cat"
done
module_mode_label="preset 3 (Manual)"
elif [ "$MODE_SELECTION" = "4" ]; then
for key in "${MODULE_KEYS[@]}"; do
printf -v "$key" '%s' "0"
done
module_mode_label="preset 4 (No modules)"
elif [ "$MODE_SELECTION" = "preset" ]; then
local preset_modules="${MODULE_PRESET_CONFIGS[$MODE_PRESET_NAME]}"
if [ -n "$preset_modules" ]; then
apply_module_preset "$preset_modules"
say INFO "Applied preset '${MODE_PRESET_NAME}'."
else
say WARNING "Preset '${MODE_PRESET_NAME}' did not contain any module selections."
fi
local preset_label="${MODULE_PRESET_LABELS[$MODE_PRESET_NAME]:-$MODE_PRESET_NAME}"
module_mode_label="preset (${preset_label})"
fi
auto_enable_module_dependencies
ensure_module_platforms
if [ -n "$CLI_PLAYERBOT_ENABLED" ]; then
if [[ "$CLI_PLAYERBOT_ENABLED" != "0" && "$CLI_PLAYERBOT_ENABLED" != "1" ]]; then
say ERROR "--playerbot-enabled must be 0 or 1"
exit 1
fi
PLAYERBOT_ENABLED="$CLI_PLAYERBOT_ENABLED"
fi
if [ -n "$CLI_PLAYERBOT_MIN" ]; then
if ! [[ "$CLI_PLAYERBOT_MIN" =~ ^[0-9]+$ ]]; then
say ERROR "--playerbot-min-bots must be numeric"
exit 1
fi
PLAYERBOT_MIN_BOTS="$CLI_PLAYERBOT_MIN"
fi
if [ -n "$CLI_PLAYERBOT_MAX" ]; then
if ! [[ "$CLI_PLAYERBOT_MAX" =~ ^[0-9]+$ ]]; then
say ERROR "--playerbot-max-bots must be numeric"
exit 1
fi
PLAYERBOT_MAX_BOTS="$CLI_PLAYERBOT_MAX"
fi
if [ "$MODULE_PLAYERBOTS" = "1" ]; then
if [ -z "$CLI_PLAYERBOT_ENABLED" ]; then
PLAYERBOT_ENABLED=1
fi
PLAYERBOT_MIN_BOTS=$(ask "Minimum concurrent playerbots" "${CLI_PLAYERBOT_MIN:-$DEFAULT_PLAYERBOT_MIN}" validate_number)
PLAYERBOT_MAX_BOTS=$(ask "Maximum concurrent playerbots" "${CLI_PLAYERBOT_MAX:-$DEFAULT_PLAYERBOT_MAX}" validate_number)
fi
if [ -n "$PLAYERBOT_MIN_BOTS" ] && [ -n "$PLAYERBOT_MAX_BOTS" ]; then
if [ "$PLAYERBOT_MAX_BOTS" -lt "$PLAYERBOT_MIN_BOTS" ]; then
say WARNING "Playerbot max bots ($PLAYERBOT_MAX_BOTS) lower than min ($PLAYERBOT_MIN_BOTS); adjusting max to match min."
PLAYERBOT_MAX_BOTS="$PLAYERBOT_MIN_BOTS"
fi
fi
for mod_var in "${MODULE_KEYS[@]}"; do
if [ "${MODULE_NEEDS_BUILD_MAP[$mod_var]}" = "1" ]; then
eval "value=\${$mod_var:-0}"
if [ "$value" = "1" ]; then
NEEDS_CXX_REBUILD=1
break
fi
fi
done
local enabled_module_keys=()
local enabled_cpp_module_keys=()
for mod_var in "${MODULE_KEYS[@]}"; do
eval "value=\${$mod_var:-0}"
if [ "$value" = "1" ]; then
enabled_module_keys+=("$mod_var")
if [ "${MODULE_NEEDS_BUILD_MAP[$mod_var]}" = "1" ]; then
enabled_cpp_module_keys+=("$mod_var")
fi
fi
done
MODULES_ENABLED_LIST=""
MODULES_CPP_LIST=""
if [ ${#enabled_module_keys[@]} -gt 0 ]; then
MODULES_ENABLED_LIST="$(IFS=','; printf '%s' "${enabled_module_keys[*]}")"
fi
if [ ${#enabled_cpp_module_keys[@]} -gt 0 ]; then
MODULES_CPP_LIST="$(IFS=','; printf '%s' "${enabled_cpp_module_keys[*]}")"
fi
# Determine source variant based ONLY on playerbots module
STACK_SOURCE_VARIANT="core"
if [ "$MODULE_PLAYERBOTS" = "1" ] || [ "$PLAYERBOT_ENABLED" = "1" ]; then
STACK_SOURCE_VARIANT="playerbots"
fi
# Determine image mode based on source variant and build requirements
STACK_IMAGE_MODE="standard"
if [ "$STACK_SOURCE_VARIANT" = "playerbots" ]; then
STACK_IMAGE_MODE="playerbots"
elif [ "$NEEDS_CXX_REBUILD" = "1" ]; then
STACK_IMAGE_MODE="modules"
fi
MODULES_REQUIRES_CUSTOM_BUILD="$NEEDS_CXX_REBUILD"
MODULES_REQUIRES_PLAYERBOT_SOURCE="0"
if [ "$STACK_SOURCE_VARIANT" = "playerbots" ]; then
MODULES_REQUIRES_PLAYERBOT_SOURCE="1"
fi
export NEEDS_CXX_REBUILD
MODULE_MODE_LABEL="$module_mode_label"
}

View File

@@ -1,244 +0,0 @@
# Module metadata and helpers for setup.sh
# setup.sh -> scripts/bash/lib/common.sh (shared helpers)
source "$SCRIPT_DIR/scripts/bash/lib/common.sh"
normalize_module_name(){
local mod="$1"
mod="${mod^^}"
mod="${mod//-/_}"
mod="${mod//./_}"
mod="${mod// /_}"
if [[ "$mod" = MOD_* ]]; then
mod="${mod#MOD_}"
fi
if [[ "$mod" != MODULE_* ]]; then
mod="MODULE_${mod}"
fi
echo "$mod"
}
declare -A MODULE_ENABLE_SET=()
module_default(){
local key="$1"
if [ "${MODULE_ENABLE_SET[$key]:-0}" = "1" ]; then
echo y
return
fi
local current
eval "current=\${$key:-${MODULE_DEFAULT_VALUES[$key]:-0}}"
if [ "$current" = "1" ]; then
echo y
else
echo n
fi
}
apply_module_preset(){
local preset_list="$1"
local IFS=','
for item in $preset_list; do
local mod="${item//[[:space:]]/}"
[ -z "$mod" ] && continue
if [ -n "${KNOWN_MODULE_LOOKUP[$mod]:-}" ]; then
printf -v "$mod" '%s' "1"
else
say WARNING "Preset references unknown module $mod"
fi
done
}
# ==============================
# Module metadata / defaults
# ==============================
MODULE_MANIFEST_PATH="$SCRIPT_DIR/config/module-manifest.json"
MODULE_MANIFEST_HELPER="$SCRIPT_DIR/scripts/python/setup_manifest.py"
MODULE_PROFILES_HELPER="$SCRIPT_DIR/scripts/python/setup_profiles.py"
ENV_TEMPLATE_FILE="$SCRIPT_DIR/.env.template"
declare -a MODULE_KEYS=()
declare -a MODULE_KEYS_SORTED=()
declare -A MODULE_NAME_MAP=()
declare -A MODULE_TYPE_MAP=()
declare -A MODULE_STATUS_MAP=()
declare -A MODULE_BLOCK_REASON_MAP=()
declare -A MODULE_NEEDS_BUILD_MAP=()
declare -A MODULE_REQUIRES_MAP=()
declare -A MODULE_NOTES_MAP=()
declare -A MODULE_DESCRIPTION_MAP=()
declare -A MODULE_CATEGORY_MAP=()
declare -A MODULE_SPECIAL_MESSAGE_MAP=()
declare -A MODULE_REPO_MAP=()
declare -A MODULE_DEFAULT_VALUES=()
declare -A KNOWN_MODULE_LOOKUP=()
declare -A ENV_TEMPLATE_VALUES=()
MODULE_METADATA_INITIALIZED=0
load_env_template_values() {
local template_file="$ENV_TEMPLATE_FILE"
if [ ! -f "$template_file" ]; then
echo "ERROR: .env.template file not found at $template_file" >&2
exit 1
fi
while IFS= read -r raw_line || [ -n "$raw_line" ]; do
local line="${raw_line%%#*}"
line="${line%%$'\r'}"
line="$(echo "$line" | sed 's/[[:space:]]*$//')"
[ -n "$line" ] || continue
[[ "$line" == *=* ]] || continue
local key="${line%%=*}"
local value="${line#*=}"
key="$(echo "$key" | sed 's/[[:space:]]//g')"
value="$(echo "$value" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
[ -n "$key" ] || continue
ENV_TEMPLATE_VALUES["$key"]="$value"
done < "$template_file"
}
load_module_manifest_metadata() {
if [ ! -f "$MODULE_MANIFEST_PATH" ]; then
echo "ERROR: Module manifest not found at $MODULE_MANIFEST_PATH" >&2
exit 1
fi
if [ ! -x "$MODULE_MANIFEST_HELPER" ]; then
echo "ERROR: Manifest helper not found or not executable at $MODULE_MANIFEST_HELPER" >&2
exit 1
fi
require_cmd python3
mapfile -t MODULE_KEYS < <(
python3 "$MODULE_MANIFEST_HELPER" keys "$MODULE_MANIFEST_PATH"
)
if [ ${#MODULE_KEYS[@]} -eq 0 ]; then
echo "ERROR: No modules defined in manifest $MODULE_MANIFEST_PATH" >&2
exit 1
fi
while IFS=$'\t' read -r key name needs_build module_type status block_reason requires notes description category special_message repo; do
[ -n "$key" ] || continue
# Convert placeholder back to empty string
[ "$block_reason" = "-" ] && block_reason=""
[ "$requires" = "-" ] && requires=""
[ "$notes" = "-" ] && notes=""
[ "$description" = "-" ] && description=""
[ "$category" = "-" ] && category=""
[ "$special_message" = "-" ] && special_message=""
[ "$repo" = "-" ] && repo=""
MODULE_NAME_MAP["$key"]="$name"
MODULE_NEEDS_BUILD_MAP["$key"]="$needs_build"
MODULE_TYPE_MAP["$key"]="$module_type"
MODULE_STATUS_MAP["$key"]="$status"
MODULE_BLOCK_REASON_MAP["$key"]="$block_reason"
MODULE_REQUIRES_MAP["$key"]="$requires"
MODULE_NOTES_MAP["$key"]="$notes"
MODULE_DESCRIPTION_MAP["$key"]="$description"
MODULE_CATEGORY_MAP["$key"]="$category"
MODULE_SPECIAL_MESSAGE_MAP["$key"]="$special_message"
MODULE_REPO_MAP["$key"]="$repo"
KNOWN_MODULE_LOOKUP["$key"]=1
done < <(python3 "$MODULE_MANIFEST_HELPER" metadata "$MODULE_MANIFEST_PATH")
mapfile -t MODULE_KEYS_SORTED < <(
python3 "$MODULE_MANIFEST_HELPER" sorted-keys "$MODULE_MANIFEST_PATH"
)
}
initialize_module_defaults() {
if [ "$MODULE_METADATA_INITIALIZED" = "1" ]; then
return
fi
load_env_template_values
load_module_manifest_metadata
for key in "${MODULE_KEYS[@]}"; do
if [ -z "${ENV_TEMPLATE_VALUES[$key]+_}" ]; then
echo "ERROR: .env.template missing default value for ${key}" >&2
exit 1
fi
local default="${ENV_TEMPLATE_VALUES[$key]}"
MODULE_DEFAULT_VALUES["$key"]="$default"
printf -v "$key" '%s' "$default"
done
MODULE_METADATA_INITIALIZED=1
}
reset_modules_to_defaults() {
for key in "${MODULE_KEYS[@]}"; do
printf -v "$key" '%s' "${MODULE_DEFAULT_VALUES[$key]}"
done
}
module_display_name() {
local key="$1"
local name="${MODULE_NAME_MAP[$key]:-$key}"
local note="${MODULE_NOTES_MAP[$key]}"
if [ -n "$note" ]; then
echo "${name} - ${note}"
else
echo "$name"
fi
}
auto_enable_module_dependencies() {
local changed=1
while [ "$changed" -eq 1 ]; do
changed=0
for key in "${MODULE_KEYS[@]}"; do
local enabled
eval "enabled=\${$key:-0}"
[ "$enabled" = "1" ] || continue
local requires_csv="${MODULE_REQUIRES_MAP[$key]}"
IFS=',' read -r -a deps <<< "${requires_csv}"
for dep in "${deps[@]}"; do
dep="${dep//[[:space:]]/}"
[ -n "$dep" ] || continue
[ -n "${KNOWN_MODULE_LOOKUP[$dep]:-}" ] || continue
local dep_value
eval "dep_value=\${$dep:-0}"
if [ "$dep_value" != "1" ]; then
say INFO "Automatically enabling ${dep#MODULE_} (required by ${key#MODULE_})."
printf -v "$dep" '%s' "1"
MODULE_ENABLE_SET["$dep"]=1
changed=1
fi
done
done
done
}
ensure_module_platforms() {
local needs_platform=0
local key
for key in "${MODULE_KEYS[@]}"; do
case "$key" in
MODULE_ELUNA|MODULE_AIO) continue ;;
esac
local value
eval "value=\${$key:-0}"
if [ "$value" = "1" ]; then
needs_platform=1
break
fi
done
if [ "$needs_platform" != "1" ]; then
return 0
fi
local platform
for platform in MODULE_ELUNA MODULE_AIO; do
[ -n "${KNOWN_MODULE_LOOKUP[$platform]:-}" ] || continue
local platform_value
eval "platform_value=\${$platform:-0}"
if [ "$platform_value" != "1" ]; then
local platform_name="${MODULE_NAME_MAP[$platform]:-${platform#MODULE_}}"
say INFO "Automatically enabling ${platform_name} to support selected modules."
printf -v "$platform" '%s' "1"
MODULE_ENABLE_SET["$platform"]=1
fi
done
return 0
}

View File

@@ -1,112 +0,0 @@
# Summary, path setup, and output helpers for setup.sh
print_summary() {
# setup.sh -> scripts/bash/setup/ui.sh (say)
local SUMMARY_MODE_TEXT="$MODULE_MODE_LABEL"
if [ -z "$SUMMARY_MODE_TEXT" ]; then
SUMMARY_MODE_TEXT="${CLI_MODULE_MODE:-}"
fi
say HEADER "SUMMARY"
printf " %-18s %s\n" "Server Address:" "$SERVER_ADDRESS"
printf " %-18s Realm:%s Auth:%s SOAP:%s MySQL:%s\n" "Ports:" "$REALM_PORT" "$AUTH_EXTERNAL_PORT" "$SOAP_EXTERNAL_PORT" "$MYSQL_EXTERNAL_PORT"
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
printf " %-18s %s\n" "Modules images:" "$DEFAULT_AUTH_IMAGE_MODULES | $DEFAULT_WORLD_IMAGE_MODULES"
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
printf " %-18s %s\n" "Playerbot Min Bots:" "$PLAYERBOT_MIN_BOTS"
printf " %-18s %s\n" "Playerbot Max Bots:" "$PLAYERBOT_MAX_BOTS"
printf " %-18s" "Enabled Modules:"
local enabled_modules=()
for module_var in "${MODULE_KEYS[@]}"; do
eval "value=\${$module_var:-0}"
if [ "$value" = "1" ]; then
enabled_modules+=("${module_var#MODULE_}")
fi
done
if [ ${#enabled_modules[@]} -eq 0 ]; then
printf " none\n"
else
printf "\n"
for module in "${enabled_modules[@]}"; do
printf " • %s\n" "$module"
done
fi
if [ "$NEEDS_CXX_REBUILD" = "1" ]; then
printf " %-18s detected (source rebuild required)\n" "C++ modules:"
fi
}
configure_local_storage_paths() {
LOCAL_STORAGE_ROOT="${STORAGE_PATH_LOCAL:-./local-storage}"
LOCAL_STORAGE_ROOT="${LOCAL_STORAGE_ROOT%/}"
[ -z "$LOCAL_STORAGE_ROOT" ] && LOCAL_STORAGE_ROOT="."
LOCAL_STORAGE_ROOT_ABS="$LOCAL_STORAGE_ROOT"
if [[ "$LOCAL_STORAGE_ROOT_ABS" != /* ]]; then
LOCAL_STORAGE_ROOT_ABS="$SCRIPT_DIR/${LOCAL_STORAGE_ROOT_ABS#./}"
fi
LOCAL_STORAGE_ROOT_ABS="${LOCAL_STORAGE_ROOT_ABS%/}"
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_ROOT"
export STORAGE_PATH STORAGE_PATH_LOCAL
local module_export_var
for module_export_var in "${MODULE_KEYS[@]}"; do
export "$module_export_var"
done
}
handle_rebuild_sentinel() {
# setup.sh -> scripts/bash/setup/ui.sh (say)
if [ "$NEEDS_CXX_REBUILD" != "1" ]; then
return 0
fi
echo ""
say WARNING "These modules require compiling AzerothCore from source."
say INFO "Run './build.sh' to compile your custom modules before deployment."
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
mkdir -p "$(dirname "$sentinel")"
if touch "$sentinel" 2>/dev/null; then
say INFO "Build sentinel created at $sentinel"
return 0
fi
say WARNING "Could not create build sentinel at $sentinel (permissions/ownership); forcing with sudo..."
if command -v sudo >/dev/null 2>&1; then
if sudo mkdir -p "$(dirname "$sentinel")" \
&& sudo chown -R "$(id -u):$(id -g)" "$(dirname "$sentinel")" \
&& sudo touch "$sentinel"; then
say INFO "Build sentinel created at $sentinel (after fixing ownership)"
else
say ERROR "Failed to force build sentinel creation at $sentinel. Fix permissions and rerun setup."
exit 1
fi
else
say ERROR "Cannot force build sentinel creation (sudo unavailable). Fix permissions on $(dirname "$sentinel") and rerun setup."
exit 1
fi
}
set_rebuild_source_path() {
local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore"
if [ "$STACK_SOURCE_VARIANT" = "playerbots" ]; then
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
fi
# Persist rebuild source path for downstream build scripts
MODULES_REBUILD_SOURCE_PATH="$default_source_rel"
}
print_final_next_steps() {
say INFO "Ready to bring your realm online:"
if [ "$NEEDS_CXX_REBUILD" = "1" ]; then
printf ' 🔨 First, build custom modules: ./build.sh\n'
printf ' 🚀 Then deploy your realm: ./deploy.sh\n'
else
printf ' 🚀 Quick deploy: ./deploy.sh\n'
fi
}

View File

@@ -1,153 +0,0 @@
# Setup UI and prompting helpers for setup.sh
# setup.sh -> scripts/bash/lib/common.sh (shared colors/logging)
if [ -n "${SCRIPT_DIR:-}" ] && [ -f "$SCRIPT_DIR/scripts/bash/lib/common.sh" ]; then
source "$SCRIPT_DIR/scripts/bash/lib/common.sh"
fi
# Extra UI colors not in common.sh
MAGENTA='\033[0;35m'
BOLD='\033[1m'
: "${RED:=\033[0;31m}"
: "${GREEN:=\033[0;32m}"
: "${YELLOW:=\033[1;33m}"
: "${BLUE:=\033[0;34m}"
: "${CYAN:=\033[0;36m}"
: "${NC:=\033[0m}"
say(){
local t=$1
shift
case "$t" in
INFO) echo -e "${BLUE} $*${NC}";;
SUCCESS) echo -e "${GREEN}$*${NC}";;
WARNING) echo -e "${YELLOW}⚠️ $*${NC}";;
ERROR) echo -e "${RED}$*${NC}";;
HEADER) echo -e "\n${MAGENTA}=== $* ===${NC}";;
esac
}
validate_ip(){ [[ $1 =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; }
validate_port(){ [[ $1 =~ ^[0-9]+$ ]] && [ $1 -ge 1 ] && [ $1 -le 65535 ]; }
validate_number(){ [[ $1 =~ ^[0-9]+$ ]]; }
ask(){
local prompt="$1"; local def="$2"; local validator="$3"; local v
while true; do
if [ "$NON_INTERACTIVE" = "1" ]; then
v="$def"
else
if [ -n "$def" ]; then
read -p "$(echo -e "${YELLOW}🔧 ${prompt} [${def}]: ${NC}")" v; v=${v:-$def}
else
read -p "$(echo -e "${YELLOW}🔧 ${prompt}: ${NC}")" v
fi
fi
if [ -z "$v" ] && [ "$NON_INTERACTIVE" = "1" ]; then
say ERROR "Non-interactive mode requires a value for '${prompt}'."
exit 1
fi
if [ -z "$validator" ] || $validator "$v"; then
echo "$v"
return 0
fi
if [ "$NON_INTERACTIVE" = "1" ]; then
say ERROR "Invalid value '${v}' provided for '${prompt}' in non-interactive mode."
exit 1
fi
say ERROR "Invalid input. Please try again."
done
}
ask_yn(){
local p="$1"; local d="$2"; local v
if [ "$NON_INTERACTIVE" = "1" ]; then
if [ "$d" = "y" ]; then
echo 1
else
echo 0
fi
return 0
fi
while true; do
if [ "$d" = "y" ]; then
read -p "$(echo -e "${YELLOW}🔧 ${p} [Y/n]: ${NC}")" v; v=${v:-y}
else
read -p "$(echo -e "${YELLOW}🔧 ${p} [y/N]: ${NC}")" v; v=${v:-n}
fi
case "$v" in
[Yy]*) echo 1; return 0;;
[Nn]*) echo 0; return 0;;
esac
say ERROR "Please answer y or n"
done
}
show_wow_header() {
if [ -t 1 ] && command -v clear >/dev/null 2>&1; then
clear >/dev/null 2>&1 || true
fi
echo -e "${RED}"
cat <<'ASCII'
##
### :*
##### .**#
###### ***##
****###* *****##.
******##- ******###.
.*********###= ********###
************##### #****###:+* ********####
***********+****##########**********##**# ********#####
********=+***********######**********######*#**+*******###+
-+*****=**************#######*******####**#####**##*****####-
++**++****************#########**####***####***#####****####:
:++*******************#*******####*****#****######***##*****#######
*= -++++++******************************###**********###******######
.+***. :++++++++***************************#+*#*-*******************#**+
++*****= =+++=+++***************************+**###**************++*#####*
-++*****+++- -=++++++++*********+++++**###**+++=+*###**+*********##+++*+++##
+++*********+++=-=+++++++++****+++***+++++*####***+++**=**#*==***#####*++***+*+
+++++***********++=-=++++++++*++****=++*++*#######**+=-=+****+*#########***==+*#*
=+++++++*****++++===-++++++++=+++++=++*+=-+#**#**=####****#**+-+**************##*
++++++++++++++======++++++++=====+++++=-+++*+##########*****==*######*****####
+++++++=++++++====++++++++++========---++++*****#######**==***#*******####*
++===++++++++=====+++++++=+++:::--:::.++++++*****####**+=**************#
=+++++=: =+=====-+++++++++++++++++++++==+++--==----:-++++++****####****+=+*+*******:
++++++++++++++++==+++++++++++++++++++++=+=-===-----:+++++++++**+++****####***+++
=++++++++++++++++++++++++++++++++++++=++++======----==+++++++=+************:
:++++++++++++++=+++++++++++++++++++======-------:-====+****************.
=----=+++-==++++++*******++++++++++++++===============****************=
-=---==-=====--+++++++++++++++++++++++++++===+++++++********++#***#++******
+++++========+=====----++++++++++++++++===+++++===--=**********+=++*++********
+++==========-=============-----:-=++=====+++++++++++++++=-=***********+*********
==----=+===+=================+++++++++++++++++++++++++=-********************
.======++++++===============---:::::==++++++++++++++++++++++=**********++*******:
+++==--::-=+++++++++++++========+===--=+- :::=-=++++++++++++++++++++++ +*****++**+***
.-----::::-=++++++++++++++++++==::-----++. :=+++++++++++++++++++*..-+*********=
:=+++++++++++++++++==.:--===-+++++++++++**++++++:::-********
++++++++++++++++++=+++++++++++++**+++++*****==******
.++++++++++++=-:.-+++++++++***++++************+
+++=========:.=+=-::++*****+*************
-++++++++==+: ..::=-. ..::::=********
.+========+==+++==========---::-+*-
++++++++++++=======-======
++++++++++++++======++
-=======++++++:
...
:::. :::::::::.,:::::: :::::::.. ... :::::::::::: :: .: .,-::::: ... :::::::.. .,::::::
;;`;; '`````;;;;;;;'''' ;;;;``;;;; .;;;;;;;.;;;;;;;;'''',;; ;;, ,;;;'````' .;;;;;;;. ;;;;``;;;; ;;;;''''
,[[ '[[, .n[[' [[cccc [[[,/[[[' ,[[ \[[, [[ ,[[[,,,[[[ [[[ ,[[ \[[,[[[,/[[[' [[cccc
c$$$cc$$$c ,$$P" $$"""" $$$$$$c $$$, $$$ $$ "$$$"""$$$ $$$ $$$, $$$$$$$$$c $$""""
888 888,,888bo,_ 888oo,__ 888b "88bo,"888,_ _,88P 88, 888 "88o`88bo,__,o,"888,_ _,88P888b "88bo,888oo,__
YMM ""` `""*UMM """"YUMMMMMMM "W" "YMMMMMP" MMM MMM YMM "YUMMMMMP" "YMMMMMP" MMMM "W" """"\MMM
ASCII
echo -e "${NC}"
}
show_realm_configured(){
echo -e "\n${GREEN}⚔️ Your realm configuration has been forged! ⚔️${NC}"
echo -e "${GREEN}🏰 Ready to deploy your World of Warcraft server${NC}"
echo -e "${GREEN}🗡️ May your realm bring epic adventures!${NC}\n"
}

View File

@@ -4,13 +4,18 @@ set -e
# Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
info(){ echo -e "${BLUE} $*${NC}"; }
ok(){ echo -e "${GREEN}$*${NC}"; }
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
err(){ echo -e "${RED}$*${NC}"; }
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
ENV_FILE=""
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
source "$PROJECT_DIR/scripts/bash/project_name.sh"
source "$PROJECT_DIR/scripts/bash/compose_overrides.sh"
source "$PROJECT_DIR/scripts/bash/lib/common.sh"
PROFILES=(db services-standard client-data modules tools)
SKIP_DEPLOY=false
QUICK=false

View File

@@ -33,8 +33,6 @@ if [ -z "${APPLY_SENDTRAINERLIST_PATCH:-}" ]; then
else
APPLY_SENDTRAINERLIST_PATCH="${APPLY_SENDTRAINERLIST_PATCH}"
fi
# Override keyword patch: always apply (C++11 best practice)
APPLY_OVERRIDE_PATCH="${APPLY_OVERRIDE_PATCH:-1}"
if [ -z "$MODULE_DIR" ] || [ ! -d "$MODULE_DIR" ]; then
echo "❌ mod-ale-patches: Invalid module directory: $MODULE_DIR"
@@ -70,44 +68,6 @@ apply_movepath_patch() {
fi
}
# Apply override keyword patch
apply_override_patch() {
local found_files=()
# Search for .cpp and .h files that need override keyword
while IFS= read -r -d '' file; do
if grep -l 'void OnPlayerLogin(Player\* player)' "$file" >/dev/null 2>&1; then
found_files+=("$file")
fi
done < <(find "$MODULE_DIR" -name "*.cpp" -o -name "*.h" -print0)
if [ ${#found_files[@]} -eq 0 ]; then
echo " ✅ No files need override keyword fix"
return 0
fi
local patch_count=0
for file in "${found_files[@]}"; do
# Check if OnPlayerLogin exists without override keyword
if grep -q 'void OnPlayerLogin(Player\* player) {' "$file" && ! grep -q 'void OnPlayerLogin(Player\* player) override {' "$file"; then
if sed -i 's/void OnPlayerLogin(Player\* player) {/void OnPlayerLogin(Player* player) override {/' "$file"; then
echo " ✅ Applied override keyword fix to $(basename "$file")"
patch_count=$((patch_count + 1))
else
echo " ❌ Failed to apply override keyword fix to $(basename "$file")"
return 2
fi
fi
done
if [ $patch_count -eq 0 ]; then
echo " ✅ Override keyword fix already present"
else
echo " ✅ Applied override keyword fix to $patch_count file(s)"
fi
return 0
}
# Apply SendTrainerList compatibility patch
apply_sendtrainerlist_patch() {
local target_file="$MODULE_DIR/src/LuaEngine/methods/PlayerMethods.h"
@@ -117,10 +77,10 @@ apply_sendtrainerlist_patch() {
return 1
fi
# Check if the buggy code exists (with ->GetGUID())
if grep -q 'player->GetSession()->SendTrainerList(obj->GetGUID());' "$target_file"; then
# Apply the fix by casting to Creature* instead of using GetGUID()
if sed -i 's/player->GetSession()->SendTrainerList(obj->GetGUID());/if (Creature* creature = obj->ToCreature()) player->GetSession()->SendTrainerList(creature);/' "$target_file"; then
# Check if the buggy code exists (without GetGUID())
if grep -q 'player->GetSession()->SendTrainerList(obj);' "$target_file"; then
# Apply the fix by adding ->GetGUID()
if sed -i 's/player->GetSession()->SendTrainerList(obj);/player->GetSession()->SendTrainerList(obj->GetGUID());/' "$target_file"; then
echo " ✅ Applied SendTrainerList compatibility fix"
return 0
else
@@ -135,11 +95,6 @@ apply_sendtrainerlist_patch() {
# Apply all patches
patch_count=0
if [ "$APPLY_OVERRIDE_PATCH" = "1" ]; then
if apply_override_patch; then
patch_count=$((patch_count + 1))
fi
fi
if [ "$APPLY_MOVEPATH_PATCH" = "1" ]; then
if apply_movepath_patch; then
patch_count=$((patch_count + 1))

View File

@@ -1,161 +0,0 @@
#!/usr/bin/env python3
"""
Apply a module profile to .env file for CI/CD builds.
This script reads a module profile JSON and enables the specified modules
in the .env file, ready for automated builds.
"""
import argparse
import json
import sys
from pathlib import Path
from typing import List, Set
def load_profile(profile_path: Path) -> List[str]:
"""Load module list from a profile JSON file."""
try:
with open(profile_path, 'r') as f:
data = json.load(f)
except FileNotFoundError:
print(f"ERROR: Profile not found: {profile_path}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError as e:
print(f"ERROR: Invalid JSON in profile: {e}", file=sys.stderr)
sys.exit(1)
modules = data.get('modules', [])
if not isinstance(modules, list):
print("ERROR: 'modules' must be a list in profile JSON", file=sys.stderr)
sys.exit(1)
return [m.strip() for m in modules if m.strip()]
def read_env_template(template_path: Path) -> List[str]:
"""Read the .env.template file."""
try:
with open(template_path, 'r') as f:
return f.readlines()
except FileNotFoundError:
print(f"ERROR: Template not found: {template_path}", file=sys.stderr)
sys.exit(1)
def apply_profile_to_env(template_lines: List[str], enabled_modules: Set[str]) -> List[str]:
"""
Process template lines and enable specified modules.
Sets MODULE_* variables to 1 if they're in enabled_modules, otherwise keeps template value.
"""
output_lines = []
for line in template_lines:
stripped = line.strip()
# Check if this is a MODULE_ variable line
if stripped.startswith('MODULE_') and '=' in stripped:
# Extract the module name (before the =)
module_name = stripped.split('=')[0].strip()
if module_name in enabled_modules:
# Enable this module
output_lines.append(f"{module_name}=1\n")
else:
# Keep original line (usually =0 or commented)
output_lines.append(line)
else:
# Not a module line, keep as-is
output_lines.append(line)
return output_lines
def write_env_file(env_path: Path, lines: List[str]):
"""Write the processed lines to .env file."""
try:
with open(env_path, 'w') as f:
f.writelines(lines)
print(f"✅ Applied profile to {env_path}")
except IOError as e:
print(f"ERROR: Failed to write .env file: {e}", file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
description='Apply a module profile to .env file for automated builds'
)
parser.add_argument(
'profile',
help='Name of the profile (e.g., RealmMaster) or path to profile JSON'
)
parser.add_argument(
'--env-template',
default='.env.template',
help='Path to .env.template file (default: .env.template)'
)
parser.add_argument(
'--env-output',
default='.env',
help='Path to output .env file (default: .env)'
)
parser.add_argument(
'--profiles-dir',
default='config/module-profiles',
help='Directory containing profile JSON files (default: config/module-profiles)'
)
parser.add_argument(
'--list-modules',
action='store_true',
help='List modules that will be enabled and exit'
)
args = parser.parse_args()
# Resolve profile path
profile_path = Path(args.profile)
if not profile_path.exists():
# Try treating it as a profile name
profile_path = Path(args.profiles_dir) / f"{args.profile}.json"
if not profile_path.exists():
print(f"ERROR: Profile not found: {args.profile}", file=sys.stderr)
print(f" Tried: {Path(args.profile)}", file=sys.stderr)
print(f" Tried: {profile_path}", file=sys.stderr)
sys.exit(1)
# Load the profile
print(f"📋 Loading profile: {profile_path.name}")
enabled_modules = set(load_profile(profile_path))
if args.list_modules:
print(f"\nModules to be enabled ({len(enabled_modules)}):")
for module in sorted(enabled_modules):
print(f"{module}")
return
print(f"✓ Found {len(enabled_modules)} modules in profile")
# Read template
template_path = Path(args.env_template)
template_lines = read_env_template(template_path)
# Apply profile
output_lines = apply_profile_to_env(template_lines, enabled_modules)
# Write output
env_path = Path(args.env_output)
write_env_file(env_path, output_lines)
print(f"✓ Profile '{profile_path.stem}' applied successfully")
print(f"\nEnabled modules:")
for module in sorted(enabled_modules)[:10]: # Show first 10
print(f"{module}")
if len(enabled_modules) > 10:
print(f" ... and {len(enabled_modules) - 10} more")
if __name__ == '__main__':
main()

1934
setup.sh

File diff suppressed because it is too large Load Diff