feat: upgrade

This commit is contained in:
uprightbass360
2025-11-20 02:11:24 -05:00
parent 9deff01441
commit 5f7bdcb7e7
25 changed files with 1502 additions and 777 deletions

View File

@@ -45,10 +45,33 @@ DEFAULT_MOUNT_STORAGE_PATH=/mnt/azerothcore-data
# ===================== # =====================
CONTAINER_DB_IMPORT=ac-db-import CONTAINER_DB_IMPORT=ac-db-import
CONTAINER_DB_INIT=ac-db-init CONTAINER_DB_INIT=ac-db-init
CONTAINER_DB_GUARD=ac-db-guard
CONTAINER_BACKUP=ac-backup CONTAINER_BACKUP=ac-backup
CONTAINER_MODULES=ac-modules CONTAINER_MODULES=ac-modules
CONTAINER_POST_INSTALL=ac-post-install CONTAINER_POST_INSTALL=ac-post-install
# =====================
# Database Guard Defaults
# =====================
DB_GUARD_RECHECK_SECONDS=120
DB_GUARD_RETRY_SECONDS=10
DB_GUARD_WAIT_ATTEMPTS=60
DB_GUARD_HEALTH_MAX_AGE=180
DB_GUARD_HEALTHCHECK_INTERVAL=30s
DB_GUARD_HEALTHCHECK_TIMEOUT=10s
DB_GUARD_HEALTHCHECK_RETRIES=5
DB_GUARD_VERIFY_INTERVAL_SECONDS=86400
# =====================
# Module SQL staging
# =====================
MODULE_SQL_STAGE_PATH=${STORAGE_PATH_LOCAL}/module-sql-updates
# =====================
# SQL Source Overlay
# =====================
AC_SQL_SOURCE_PATH=${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql
# ===================== # =====================
# Images # Images
# ===================== # =====================

4
.gitignore vendored
View File

@@ -15,4 +15,6 @@ scripts/__pycache__/
.env .env
package-lock.json package-lock.json
package.json package.json
todo.md todo.md
.gocache/
.module-ledger/

View File

@@ -1,86 +0,0 @@
db-auth|mod-system-vip|auth_vip|f781de4da4fba99242296181e22e28fc6b2e38e0
db-auth|mod-transmog|acore_cms_subscriptions|c804b0e88c9c91b742716b7a847a3d0ce0c3fb9d
db-characters|mod-arena-replay|replayarena_savedgames|3ca9e8cb2a0c6e088551972375cc0df629588822
db-characters|mod-arena-replay|replayarena|3d9bf3f52fe06662698c91263a98c3212ee21876
db-characters|mod-guildhouse|2024_04_07_guildhouse|5dca40fd889cdea761eaf4d0bb645e01ab867f01
db-characters|mod-keep-out|mko_map_exploit|552afe6b73d636027ff32f8bec5f8b19311e1c14
db-characters|mod-morphsummon|morphsummon_ddl|6517a1f6dcfcdba6751c522e2774d950130c22a4
db-characters|mod-npc-beastmaster|track_tamed_pets|1e1eced65e59444ab316663ea4bd570a86b64af1
db-characters|mod-npc-talent-template|npc_talent_template_data_1_80_pvp_s6|0a5a2348a61fc432dbece4a9af8ab8aadc3dbcbb
db-characters|mod-npc-talent-template|npc_talent_template_data_2_70_pve_t6|b25a265545ffc623813a7552d7fd12f54c0c295e
db-characters|mod-npc-talent-template|npc_talent_template|3974cff297e416f544df2165fc4e745768a463bf
db-characters|mod-ollama-chat|2025_03_30_personalities|78c8b634af1667b21766179b6ffbbc016fea2b45
db-characters|mod-ollama-chat|2025_05_30_personalities|57a17e0d383c394935727e8eccee3b3f78a982eb
db-characters|mod-ollama-chat|2025_05_31_personality_template|119148f5036b9ee47a2146e1e1b694497339ce81
db-characters|mod-ollama-chat|2025_06_14_chat_history|75d4ad09b0fefc42bbe710d67dcf489adffccbbe
db-characters|mod-ollama-chat|2025_07_24_sentiment_tracking|66f118bc1594ce4dda6e06589549fe04429bc28f
db-characters|mod-ollama-chat|2025_11_01_personality_manual_only|abbe4c501e58589f28c72476559bf6b6b8d200e4
db-characters|mod-player-bot-level-brackets|2025_07_31_bot_level_brackets_guild_tracker|a61d3f82a66d2c2b985af20d8b3adf0524514dd8
db-characters|mod-playerbots|playerbots_arena_team_names|b138b117bf7a9ad78dc6eb39e06a314684992d3d
db-characters|mod-playerbots|playerbots_guild_names|ffba9d76f83dcd66ee9432b60496b9ce36034b6f
db-characters|mod-playerbots|playerbots_names|3ab14f4cc46475193d3feb6f59173a4968be802b
db-characters|mod-premium|mod_character_premium|0ab728ae41aa7742c10355df323701cb4c34717a
db-characters|mod-reagent-bank|create_table|8a13d5973a5dbc5b5e3024ec4467ccc331e71736
db-characters|mod-resurrection-scroll|mod_ressurection_scroll|715d24ca46efd6327648bce4fd2a9492ffe33557
db-characters|mod-reward-played-time|reward_system|d04c8e1e3e053d480f0ddfd79d12437ba43c36ad
db-characters|mod-solocraft|mod_solo_craft|3f28a230d561df88d55e3255f35c9403fa4ab99a
db-characters|mod-transmog|trasmorg|3b229fd50da650ef50afdbb21fedfbb5a0e24f6d
db-characters|mod-war-effort|wareffort_setup|ac92fd409872e994f3fecd4cc25c8c48cb59e0b3
db-characters|mod-zone-difficulty|zone_difficulty_char_tables|2a39a74da6cf4cee9091d325778632e06b428a71
db-characters|mod-zone-difficulty|zone_difficulty_completion_logs|6fb609271e3d2b7b0c5be74350ddf634d348cdb2
db-world|mod-1v1-arena|1v1_Battlemaster|75a070d3795a99dd0c98416912ea3c479b9311af
db-world|mod-aoe-loot|aoe_loot_acore_string|f5c4cb3d0cb43edbd598171e5be859e3d659522e
db-world|mod-arac|arac|025553c939b88c841976f902c104b8626dd2ecb3
db-world|mod-arena-replay|ArenaReplayWorld|8506f875a4e4c3d8f64a7990240ecc17f65babd6
db-world|mod-assistant|mod_assistant|58c230a8242ea743e4f447e1fb3e2c9c1f846e6a
db-world|mod-global-chat|acworld.GlobalChat|609ade0af83a833e58d8982cdb4701c2c0f8ee9b
db-world|mod-guildhouse|2024_04_07_00_creatures_objects|bf3e65f2fc7fb83cc686cd7b9a41f8ba916c4f2d
db-world|mod-guildhouse|2024_04_07_01_guildhouse_spawns|22b77f976e9994b2bebd52e07bd54ffe31c646be
db-world|mod-guildhouse|2024_04_07_02_innkeeper|41aaa59701ef3fe6fc54d0d26e20e152bbf921db
db-world|mod-instance-reset|mod_instance_reset_2024_03_14|c77d478c8359e1bccb69c93c422b95882d8ce3f2
db-world|mod-item-level-up|mod_levelitem|7becc9bf5a63efdd7aad1065738164544be397e2
db-world|mod-keep-out|mko_map_lock|beab3dc731b7e4a9f34a5afdd0eeffb3f649f51c
db-world|mod-morphsummon|morphsummon|6649b89b7f7289fbb338109ede6104db03e4511d
db-world|mod-npc-beastmaster|beastmaster_tames_inserts|3a7ba9e972a3fefc119d4de900c6294242932096
db-world|mod-npc-beastmaster|beastmaster_tames|a2e40f6baa6d86e3fd03e2f4dbbad571a997090b
db-world|mod-npc-beastmaster|npc_beastmaster|c3ca84592e27d9a39daa978042b5b372c52a55a4
db-world|mod-npc-buffer|npc_buffer|8dd892be87417f5dad950be69332f80032b8310b
db-world|mod-npc-enchanter|npc_enchanter|ef7055ed79f0759e67ef2d9325d9f050f2ce1a04
db-world|mod-npc-free-professions|mod_npc_free_professions_01|64c7080c00b674b9a7d795027fcb9c49fea00d8e
db-world|mod-npc-talent-template|npc_talent_template_command|b69b04c4b51470c666e38303457674a94b95ffaa
db-world|mod-npc-talent-template|npc_talent_template_data|77b3352f090cec47d89283fd7d332bf416c920ae
db-world|mod-playerbots|charsections_dbc|1419fc755de287ead249f28260d415398deecea9
db-world|mod-playerbots|emotetextsound_dbc|da8d68f9171609f0a3b73991f05ebbd52ce99566
db-world|mod-playerbots|playerbots_rpg_races|886990a2470606b2f2e09a9c2c946c42befa44d6
db-world|mod-premium|2023_08_11_04|5f89f04dd43e7b7b0a22678a2f2b31f158317b35
db-world|mod-premium|mod_premium_item_9017|93c951753184881295ef64b46448d89eae600b52
db-world|mod-promotion-azerothcore|promotion_rewards_Azerothcore_creature|e39efa874725789c99c8e86b73ac5671f054ca5b
db-world|mod-promotion-azerothcore|text_npc|66996471e9e83f21123559acb9d5d62b61848900
db-world|mod-random-enchants|item_enchatment_random_tiers|7dfe329125822db82f815b10e4256c078833f088
db-world|mod-reagent-bank|reagent_bank_NPC|be563dc8d8e243c9f43d300e6274fadd4421e56d
db-world|mod-solocraft|mod_solo_craft|fc1555c2150d9f7a1ec1d70778db95f5da371cba
db-world|mod-system-vip|item_creatures_template|92141e12eb0d8da5bb00a572da12f1d7b0a679f1
db-world|mod-tic-tac-toe|tic_tac_toe_world|f4c1fa407de3e246303c02dee42a8e140612cdd9
db-world|mod-transmog|trasm_world_NPC|69f55bb4d9376471af4e02065b90db666b89e85e
db-world|mod-transmog|trasm_world_VendorItems|0846fd392ef0a7fd4cc28b8b90db73ed649a4138
db-world|mod-transmog|trasm_world_texts|20bafe51a2b0c4c3a305e4ee018184c33f7ebacf
db-world|mod-war-effort|quests|9dcd49ab44054db721d3b2b9a6876d1d3f6516fd
db-world|mod-war-effort|warevent|96d4cbb9624f4f05784182f4942706d7e9eca2b1
db-world|mod-weekend-xp|mod-weekend-xp-texts|3216d75b9b88a944d059c7c99c1ee01c3b4f4d5e
db-world|mod-worgoblin|worgoblin|9019ee82ebfe8feee9b80a58ca134f742de940f3
db-world|mod-zone-difficulty|zone_difficulty_disallowed_buffs|0d72a2e773c15839191f32aa4b80a4bb3f74735f
db-world|mod-zone-difficulty|zone_difficulty_info_content|628567f62e3ddba197a537a5506b386d69e5c852
db-world|mod-zone-difficulty|zone_difficulty_info|2b9737c50639ae1f3f006d69b83507c1979d9374
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_ai_cavernsoftime|2498ee172737b6f4452cf8edbb3a0c957b0002ea
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_ai_gruul_magtheridon|63221f8519419d2ffaf41ddd4876229afedbdbe8
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_ai|d5134847c312b0c4907715ebb58f8ff7704e3f3e
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_creatureoverrides|d245ce0ad3aae1bcfa84576d3f3932430de349e7
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_creatures|0b46ddc0acddd4faeb29e51bada7b53882a76d78
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_instance_data|db1cc3993e1393c33074ed3a20dbe2ce481f837e
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_rewards_hyjal|f5b24bd6478500482cb48edb0941cd9722c9c82e
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_rewards_za|5c05e73d2d93acba35daef37afb035c5c9bb78ea
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_rewards|f71d780bdd72758f51e0d155e89aba027448d903
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_swp_rewards|61c991edacb3fa53c069e6ecde76b3368882c482
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_t5_rewards|7d2cc8b2a2194f4908c4b54419b3394a162d5438
db-world|mod-zone-difficulty|zone_difficulty_mythicmode_t6_rewards|377c2adfc7d5ff9153f95cb7e400a89564407dbe
db-world|mod-zone-difficulty|zone_difficulty_spelloverrides|e1af6796f982c1c5c26a1b03962d2a76b40acf49

View File

@@ -23,7 +23,7 @@ services:
- /usr/local/bin/mysql-entrypoint.sh - /usr/local/bin/mysql-entrypoint.sh
volumes: volumes:
- ./scripts/bash/mysql-entrypoint.sh:/usr/local/bin/mysql-entrypoint.sh:ro - ./scripts/bash/mysql-entrypoint.sh:/usr/local/bin/mysql-entrypoint.sh:ro
- ${STORAGE_PATH_LOCAL}/mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro - ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro
- ${MYSQL_CONFIG_DIR:-${STORAGE_PATH}/config/mysql/conf.d}:/etc/mysql/conf.d - ${MYSQL_CONFIG_DIR:-${STORAGE_PATH}/config/mysql/conf.d}:/etc/mysql/conf.d
@@ -65,7 +65,9 @@ services:
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs - ${STORAGE_PATH}/logs:/azerothcore/logs
- ${STORAGE_PATH_LOCAL}/mysql-data:/var/lib/mysql-persistent - ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
- mysql-data:/var/lib/mysql-persistent
- ${STORAGE_PATH}/modules:/modules - ${STORAGE_PATH}/modules:/modules
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro - ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
@@ -99,6 +101,71 @@ services:
/tmp/db-import-conditional.sh /tmp/db-import-conditional.sh
restart: "no" restart: "no"
ac-db-guard:
profiles: ["db"]
image: ${AC_DB_IMPORT_IMAGE}
container_name: ${CONTAINER_DB_GUARD}
user: "${CONTAINER_USER}"
userns_mode: "keep-id"
depends_on:
ac-mysql:
condition: service_healthy
ac-storage-init:
condition: service_completed_successfully
ac-db-import:
condition: service_completed_successfully
networks:
- azerothcore
volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
- mysql-data:/var/lib/mysql-persistent
- ${STORAGE_PATH}/modules:/modules
- ${BACKUP_PATH}:/backups
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
- ./scripts/bash/db-guard.sh:/tmp/db-guard.sh:ro
environment:
AC_DATA_DIR: "/azerothcore/data"
AC_LOGS_DIR: "/azerothcore/logs"
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
CONTAINER_MYSQL: ${CONTAINER_MYSQL}
MYSQL_PORT: ${MYSQL_PORT}
MYSQL_USER: ${MYSQL_USER}
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
DB_AUTH_NAME: ${DB_AUTH_NAME}
DB_WORLD_NAME: ${DB_WORLD_NAME}
DB_CHARACTERS_NAME: ${DB_CHARACTERS_NAME}
DB_PLAYERBOTS_NAME: ${DB_PLAYERBOTS_NAME}
DB_GUARD_RECHECK_SECONDS: ${DB_GUARD_RECHECK_SECONDS}
DB_GUARD_RETRY_SECONDS: ${DB_GUARD_RETRY_SECONDS}
DB_GUARD_WAIT_ATTEMPTS: ${DB_GUARD_WAIT_ATTEMPTS}
entrypoint:
- /bin/bash
- -c
- |
chmod +x /tmp/db-import-conditional.sh /tmp/restore-and-stage.sh 2>/dev/null || true
exec /bin/bash /tmp/db-guard.sh
restart: unless-stopped
healthcheck:
test:
- "CMD"
- "sh"
- "-c"
- >
file=/tmp/db-guard.ready;
[ -f "$${file}" ] || exit 1;
now=$$(date +%s);
mod=$$(stat -c %Y "$${file}" 2>/dev/null) || exit 1;
[ $$(( now - mod )) -lt ${DB_GUARD_HEALTH_MAX_AGE} ] || exit 1
interval: ${DB_GUARD_HEALTHCHECK_INTERVAL}
timeout: ${DB_GUARD_HEALTHCHECK_TIMEOUT}
retries: ${DB_GUARD_HEALTHCHECK_RETRIES}
ac-db-init: ac-db-init:
profiles: ["db"] profiles: ["db"]
image: ${MYSQL_IMAGE} image: ${MYSQL_IMAGE}
@@ -108,7 +175,7 @@ services:
ac-db-import: ac-db-import:
condition: service_completed_successfully condition: service_completed_successfully
volumes: volumes:
- ${STORAGE_PATH_LOCAL}/mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
networks: networks:
- azerothcore - azerothcore
@@ -237,7 +304,7 @@ services:
profiles: ["client-data", "client-data-bots"] profiles: ["client-data", "client-data-bots"]
image: ${ALPINE_IMAGE} image: ${ALPINE_IMAGE}
container_name: ac-volume-init container_name: ac-volume-init
user: "0:0" user: "${CONTAINER_USER}"
volumes: volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data - ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
- client-data-cache:/cache - client-data-cache:/cache
@@ -245,11 +312,16 @@ services:
- sh - sh
- -c - -c
- | - |
mkdir -p /azerothcore/data mkdir -p /azerothcore/data /cache
echo "🔧 Fixing Docker volume permissions..." if [ "$(id -u)" -eq 0 ]; then
chown -R ${CONTAINER_USER} /azerothcore/data /cache echo "🔧 Normalizing client-data volume ownership..."
chmod -R 755 /azerothcore/data /cache chown -R ${CONTAINER_USER} /azerothcore/data /cache
echo "✅ Docker volume permissions fixed" chmod -R 755 /azerothcore/data /cache
echo "✅ Docker volume permissions fixed"
else
echo " Running as $(id -u):$(id -g); skipping ownership changes."
fi
echo "📦 Client data volumes ready"
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -258,7 +330,7 @@ services:
profiles: ["db", "modules"] profiles: ["db", "modules"]
image: ${ALPINE_IMAGE} image: ${ALPINE_IMAGE}
container_name: ac-storage-init container_name: ac-storage-init
user: "0:0" user: "${CONTAINER_USER}"
volumes: volumes:
- ${STORAGE_PATH}:/storage-root - ${STORAGE_PATH}:/storage-root
- ${STORAGE_PATH_LOCAL}:/local-storage-root - ${STORAGE_PATH_LOCAL}:/local-storage-root
@@ -270,11 +342,15 @@ services:
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
mkdir -p /storage-root/config/mysql/conf.d mkdir -p /storage-root/config/mysql/conf.d
mkdir -p /storage-root/client-data mkdir -p /storage-root/client-data
mkdir -p /storage-root/backups /local-storage-root/mysql-data mkdir -p /storage-root/backups
# Fix ownership of root directories and all contents # Fix ownership of root directories and all contents
chown -R ${CONTAINER_USER} /storage-root /local-storage-root if [ "$(id -u)" -eq 0 ]; then
chmod -R 755 /storage-root /local-storage-root chown -R ${CONTAINER_USER} /storage-root /local-storage-root
echo "✅ Storage permissions initialized" chmod -R 755 /storage-root /local-storage-root
echo "✅ Storage permissions initialized"
else
echo " Running as $(id -u):$(id -g); assuming host permissions are already correct."
fi
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -333,18 +409,19 @@ services:
- sh - sh
- -c - -c
- | - |
echo "📦 Installing 7z for faster extraction..." echo "📦 Installing 7z + gosu for client data extraction..."
apt-get update -qq && apt-get install -y p7zip-full apt-get update -qq && apt-get install -y p7zip-full gosu
mkdir -p /cache gosu ${CONTAINER_USER} bash -c '
if [ -f /tmp/scripts/bash/download-client-data.sh ]; then set -e
chmod +x /tmp/scripts/bash/download-client-data.sh 2>/dev/null || true mkdir -p /cache
bash /tmp/scripts/bash/download-client-data.sh if [ -f /tmp/scripts/bash/download-client-data.sh ]; then
echo "🔧 Fixing ownership of extracted files..." chmod +x /tmp/scripts/bash/download-client-data.sh 2>/dev/null || true
chown -R ${CONTAINER_USER} /azerothcore/data bash /tmp/scripts/bash/download-client-data.sh
echo "✅ Client data extraction and ownership setup complete" echo "✅ Client data extraction completed under UID $(id -u)"
else else
echo "No local client-data script" echo "No local client-data script"
fi fi
'
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -450,8 +527,8 @@ services:
depends_on: depends_on:
ac-mysql: ac-mysql:
condition: service_healthy condition: service_healthy
ac-db-import: ac-db-guard:
condition: service_completed_successfully condition: service_healthy
ac-db-init: ac-db-init:
condition: service_completed_successfully condition: service_completed_successfully
environment: environment:
@@ -486,8 +563,8 @@ services:
depends_on: depends_on:
ac-mysql: ac-mysql:
condition: service_healthy condition: service_healthy
ac-db-import: ac-db-guard:
condition: service_completed_successfully condition: service_healthy
ac-db-init: ac-db-init:
condition: service_completed_successfully condition: service_completed_successfully
environment: environment:
@@ -525,6 +602,8 @@ services:
condition: service_healthy condition: service_healthy
ac-client-data-playerbots: ac-client-data-playerbots:
condition: service_completed_successfully condition: service_completed_successfully
ac-db-guard:
condition: service_healthy
environment: environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}" AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}" AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
@@ -578,6 +657,8 @@ services:
condition: service_healthy condition: service_healthy
ac-client-data-standard: ac-client-data-standard:
condition: service_completed_successfully condition: service_completed_successfully
ac-db-guard:
condition: service_healthy
environment: environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}" AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}" AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
@@ -648,11 +729,10 @@ services:
command: command:
- -c - -c
- | - |
apk add --no-cache curl bash git python3 apk add --no-cache curl bash git python3 su-exec
(chmod +x /tmp/scripts/bash/manage-modules.sh /tmp/scripts/bash/manage-modules-sql.sh 2>/dev/null || true) && /tmp/scripts/bash/manage-modules.sh chmod +x /tmp/scripts/bash/manage-modules.sh /tmp/scripts/bash/manage-modules-sql.sh 2>/dev/null || true
# Fix permissions after module operations echo "🔐 Running module manager as ${CONTAINER_USER}"
chown -R ${CONTAINER_USER} /modules /azerothcore/env/dist/etc 2>/dev/null || true su-exec ${CONTAINER_USER} /bin/sh -c 'set -e; cd /modules && /tmp/scripts/bash/manage-modules.sh'
chmod -R 755 /modules /azerothcore/env/dist/etc 2>/dev/null || true
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -697,14 +777,10 @@ services:
- sh - sh
- -c - -c
- | - |
apk add --no-cache bash curl docker-cli apk add --no-cache bash curl docker-cli su-exec
chown -R ${CONTAINER_USER} /azerothcore/config /install-markers 2>/dev/null || true chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true
chmod -R 755 /azerothcore/config /install-markers 2>/dev/null || true echo "📥 Running post-install as ${CONTAINER_USER}"
echo "📥 Running local auto-post-install script..." su-exec ${CONTAINER_USER} bash /tmp/scripts/bash/auto-post-install.sh
(chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true) && bash /tmp/scripts/bash/auto-post-install.sh
# Fix permissions for all files created during post-install
chown -R ${CONTAINER_USER} /azerothcore/config /install-markers 2>/dev/null || true
chmod -R 755 /azerothcore/config /install-markers 2>/dev/null || true
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -774,6 +850,8 @@ services:
volumes: volumes:
client-data-cache: client-data-cache:
driver: local driver: local
mysql-data:
driver: local
networks: networks:
azerothcore: azerothcore:

View File

@@ -122,11 +122,11 @@ flowchart TB
- **Worldserver debug logging** Need extra verbosity temporarily? Flip `COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=1` to include `compose-overrides/worldserver-debug-logging.yml`, which bumps `AC_LOG_LEVEL` across all worldserver profiles. Turn it back off once you're done to avoid noisy logs. - **Worldserver debug logging** Need extra verbosity temporarily? Flip `COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=1` to include `compose-overrides/worldserver-debug-logging.yml`, which bumps `AC_LOG_LEVEL` across all worldserver profiles. Turn it back off once you're done to avoid noisy logs.
- **Binary logging toggle** `MYSQL_DISABLE_BINLOG=1` appends `--skip-log-bin` via the MySQL wrapper entrypoint to keep disk churn low (and match Playerbot guidance). Flip the flag to `0` to re-enable binlogs for debugging or replication. - **Binary logging toggle** `MYSQL_DISABLE_BINLOG=1` appends `--skip-log-bin` via the MySQL wrapper entrypoint to keep disk churn low (and match Playerbot guidance). Flip the flag to `0` to re-enable binlogs for debugging or replication.
- **Drop-in configs** Any `.cnf` placed in `${STORAGE_PATH}/config/mysql/conf.d` (exposed via `MYSQL_CONFIG_DIR`) is mounted into `/etc/mysql/conf.d`. Use this to add custom tunables or temporarily override the binlog setting without touching the image. - **Drop-in configs** Any `.cnf` placed in `${STORAGE_PATH}/config/mysql/conf.d` (exposed via `MYSQL_CONFIG_DIR`) is mounted into `/etc/mysql/conf.d`. Use this to add custom tunables or temporarily override the binlog setting without touching the image.
- **Forcing a fresh database import** The MySQL data volume (`local-storage/mysql-data`) tracks whether a restore/import completed via the sentinel file `.restore-completed`. The import workflow now double-checks the live MySQL runtime before trusting that sentinel, and automatically logs `Restoration marker found, but databases are empty - forcing re-import` (while deleting the stale marker) if it detects an empty tmpfs. Manual cleanup is only needed when you intentionally want to rerun the import; in that case delete the sentinel and run `docker compose run --rm ac-db-import` or the full `./scripts/bash/stage-modules.sh`. Leave the sentinel alone during normal operations so the import job doesnt wipe existing data on every start. - **Forcing a fresh database import** MySQLs persistent files (and the `.restore-*` sentinels) now live inside the Docker volume `mysql-data` at `/var/lib/mysql-persistent`. The import workflow still double-checks the live runtime before trusting those markers, logging `Restoration marker found, but databases are empty - forcing re-import` if the tmpfs is empty. When you intentionally need to rerun the import, delete the sentinel with `docker run --rm -v mysql-data:/var/lib/mysql-persistent alpine sh -c 'rm -f /var/lib/mysql-persistent/.restore-completed'` and then execute `docker compose run --rm ac-db-import` or `./scripts/bash/stage-modules.sh`. Leave the sentinel alone during normal operations so the import job doesnt wipe existing data on every start.
- **Module-driven SQL migration** Module code is staged through the `ac-modules` service and `scripts/bash/manage-modules.sh`, while SQL payloads are copied into the running `ac-worldserver` container by `scripts/bash/stage-modules.sh`. The staging script maintains a ledger at `storage/modules/.modules-meta/module-sql-ledger.txt` (mirrored in the container) so identical SQL files arent copied twice, and it prunes any staged update thats already recorded in the database `updates` table. If you ever need to force a re-stage, delete that ledger file and rerun the script. Always trigger module/deploy workflows via these scripts rather than copying repositories manually; this keeps C++ builds, Lua assets, and SQL migrations synchronized with the database state. - **Module-driven SQL migration** Module code is staged through the `ac-modules` service and `scripts/bash/manage-modules.sh`, while SQL payloads are copied into the running `ac-worldserver` container by `scripts/bash/stage-modules.sh`. Every run clears `/azerothcore/data/sql/updates/{db_world,db_characters,db_auth}` and recopies all enabled module SQL files with deterministic names, letting AzerothCores built-in updater decide what to apply. Always trigger module/deploy workflows via these scripts rather than copying repositories manually; this keeps C++ builds, Lua assets, and SQL migrations synchronized with the database state.
### Restore-aware module SQL ### Restore-aware module SQL
When a backup successfully restores, the `ac-db-import` container automatically executes `scripts/bash/restore-and-stage.sh`. The helper refreshes the module SQL ledger in shared storage (using the snapshot stored alongside the backup when available, or rebuilding it from the modules directory) and writes a `.restore-prestaged` marker so the next `./scripts/bash/stage-modules.sh` run knows to repopulate `/azerothcore/data/sql/updates/*` before the worldserver boots. The staging script now recopies every module SQL file with deterministic names, letting AzerothCores built-in updater decide whether an individual script should run while leaving already-applied files in place so the server never complains about missing history. If the snapshot is missing (legacy backup) the helper simply rebuilds the ledger and still sets the flag, so the runtime staging pass behaves exactly the same. When a backup successfully restores, the `ac-db-import` container automatically executes `scripts/bash/restore-and-stage.sh`, which simply drops `storage/modules/.modules-meta/.restore-prestaged`. The next `./scripts/bash/stage-modules.sh --yes` clears any previously staged files and recopies every enabled module SQL file before the worldserver boots. AzerothCores auto-updater then scans `/azerothcore/data/sql/updates/*`, applies any scripts that arent recorded in the `updates` tables yet, and skips the rest—without ever complaining about missing history files.
## Compose Overrides ## Compose Overrides
@@ -168,15 +168,16 @@ To tweak MySQL settings, place `.cnf` snippets in `storage/config/mysql/conf.d`.
**Local Storage** (`STORAGE_PATH_LOCAL` - default: `./local-storage`) **Local Storage** (`STORAGE_PATH_LOCAL` - default: `./local-storage`)
``` ```
local-storage/ local-storage/
├── mysql-data/ # MySQL persistent data (tmpfs runtime + persistent snapshot)
├── client-data-cache/ # Downloaded WoW client data archives ├── client-data-cache/ # Downloaded WoW client data archives
├── source/ # AzerothCore source repository (created during builds) ├── source/ # AzerothCore source repository (created during builds)
│ └── azerothcore-playerbots/ # Playerbot fork (when playerbots enabled) │ └── azerothcore-playerbots/ # Playerbot fork (when playerbots enabled)
└── images/ # Exported Docker images for remote deployment └── images/ # Exported Docker images for remote deployment
``` ```
Local storage now only hosts build artifacts, cached downloads, and helper images; the database files have moved into a dedicated Docker volume.
**Docker Volume** **Docker Volumes**
- `client-data-cache` - Temporary storage for client data downloads - `client-data-cache` Temporary storage for client data downloads
- `mysql-data` MySQL persistent data + `.restore-*` sentinels (`/var/lib/mysql-persistent`)
This separation ensures database and build artifacts stay on fast local storage while configuration, modules, and backups can be shared across hosts via NFS. This separation ensures database and build artifacts stay on fast local storage while configuration, modules, and backups can be shared across hosts via NFS.

View File

@@ -181,16 +181,20 @@ The system automatically detects and restores backups on first startup:
### Restore Safety Checks & Sentinels ### Restore Safety Checks & Sentinels
Because MySQL stores its hot data in a tmpfs (`/var/lib/mysql-runtime`) while persisting only backups and status markers under `local-storage/mysql-data`, it is possible for the runtime data to be wiped (for example, after a host reboot) while the sentinel `.restore-completed` file still claims the databases are ready. To prevent the worldserver and authserver from entering restart loops, the `ac-db-import` workflow now performs an explicit sanity check before trusting those markers: Because MySQL stores its hot data in a tmpfs (`/var/lib/mysql-runtime`) while persisting the durable files inside the Docker volume `mysql-data` (mounted at `/var/lib/mysql-persistent`), it is possible for the runtime data to be wiped (for example, after a host reboot) while the sentinel `.restore-completed` file still claims the databases are ready. To prevent the worldserver and authserver from entering restart loops, the `ac-db-import` workflow now performs an explicit sanity check before trusting those markers:
- The import script queries MySQL for the combined table count across `acore_auth`, `acore_world`, and `acore_characters`. - The import script queries MySQL for the combined table count across `acore_auth`, `acore_world`, and `acore_characters`.
- If **any tables exist**, the script logs `Backup restoration completed successfully` and skips the expensive restore just as before. - If **any tables exist**, the script logs `Backup restoration completed successfully` and skips the expensive restore just as before.
- If **no tables are found or the query fails**, the script logs `Restoration marker found, but databases are empty - forcing re-import`, automatically clears the stale marker, and reruns the backup restore + `dbimport` pipeline so services always start with real data. - If **no tables are found or the query fails**, the script logs `Restoration marker found, but databases are empty - forcing re-import`, automatically clears the stale marker, and reruns the backup restore + `dbimport` pipeline so services always start with real data.
To complement that one-shot safety net, the long-running `ac-db-guard` service now watches the runtime tmpfs. It polls MySQL, and if it ever finds those schemas empty (the usual symptom after a daemon restart), it automatically reruns `db-import-conditional.sh` to rehydrate from the most recent backup before marking itself healthy. All auth/world services now depend on `ac-db-guard`'s health check, guaranteeing that AzerothCore never boots without real tables in memory. The guard also mounts the working SQL tree from `local-storage/source/azerothcore-playerbots/data/sql` into the db containers so that every `dbimport` run uses the exact SQL that matches your checked-out source, even if the Docker image was built earlier.
Because new features sometimes require schema changes even when the databases already contain data, `ac-db-guard` now performs a `dbimport` verification sweep (configurable via `DB_GUARD_VERIFY_INTERVAL_SECONDS`) to proactively apply any outstanding updates from the mounted SQL tree. By default it runs once per bootstrap and then every 24 hours, so the auth/world servers always see the columns/tables expected by their binaries without anyone having to run host scripts manually.
Manual intervention is only required if you intentionally want to force a fresh import despite having data. In that scenario: Manual intervention is only required if you intentionally want to force a fresh import despite having data. In that scenario:
1. Stop the stack: `docker compose down` 1. Stop the stack: `docker compose down`
2. Delete the sentinel: `rm -f local-storage/mysql-data/.restore-completed` 2. Delete the sentinel inside the volume: `docker run --rm -v mysql-data:/var/lib/mysql-persistent alpine sh -c 'rm -f /var/lib/mysql-persistent/.restore-completed'`
3. Run `docker compose run --rm ac-db-import` 3. Run `docker compose run --rm ac-db-import`
See [docs/ADVANCED.md#database-hardening](ADVANCED.md#database-hardening) for more background on the tmpfs/persistent split and why the sentinel exists, and review [docs/TROUBLESHOOTING.md](TROUBLESHOOTING.md#database-connection-issues) for quick steps when the automation logs the warning above. See [docs/ADVANCED.md#database-hardening](ADVANCED.md#database-hardening) for more background on the tmpfs/persistent split and why the sentinel exists, and review [docs/TROUBLESHOOTING.md](TROUBLESHOOTING.md#database-connection-issues) for quick steps when the automation logs the warning above.
@@ -412,26 +416,13 @@ SOURCE /path/to/your/file.sql;
docker exec -i ac-mysql mysql -uroot -pPASSWORD acore_world < yourfile.sql docker exec -i ac-mysql mysql -uroot -pPASSWORD acore_world < yourfile.sql
``` ```
### Module SQL Ledger & Deduplication ### Module SQL Staging
`./scripts/bash/stage-modules.sh` now keeps a lightweight ledger at `storage/modules/.modules-meta/module-sql-ledger.txt` (also mounted inside containers at `/azerothcore/modules/.modules-meta/module-sql-ledger.txt`). Each staged SQL file is recorded as: `./scripts/bash/stage-modules.sh` recopies every enabled module SQL file into `/azerothcore/data/sql/updates/{db_world,db_characters,db_auth}` each time it runs. Files are named deterministically (`MODULE_mod-name_file.sql`) and left on disk permanently. AzerothCores auto-updater consults the `updates` tables to decide whether a script needs to run; if it already ran, the entry in `updates` prevents a reapply, but leaving the file in place avoids “missing history” warnings and provides a clear audit trail.
```
<database-scope>|<module>|<base_filename>|<hash>
```
When the script runs again it hashes every module SQL file and skips any entry whose `(db, module, filename)` already matches with the same hash. This prevents re-copying identical SQL after a backup restore and stops worldserver from reapplying inserts that already exist in the database. If a database restore is detected (`local-storage/mysql-data/.restore-completed` changed), the ledger is automatically reset so every module SQL file is recopied exactly once. The ledger is automatically updated anytime a file changes so only the modified SQL is restaged.
The stage script also cross-checks MySQLs `updates` table before copying files and prunes any staged file whose identifier already exists there. That means even if a file gets stuck in `/azerothcore/data/sql/updates/<db>` (e.g., after an interrupted run), it is removed before worldserver starts if the database already recorded it.
### Restore-Time SQL Reconciliation ### Restore-Time SQL Reconciliation
During a backup restore the `ac-db-import` service now runs `scripts/bash/restore-and-stage.sh`, which consolidates the old restore workflow with module SQL staging. Every backup created by the scheduler now includes a snapshot of the module ledger at `module-sql-ledger.txt` (for example `storage/backups/hourly/20250101_120000/module-sql-ledger.txt`). The restore script: During a backup restore the `ac-db-import` service now runs `scripts/bash/restore-and-stage.sh`, which simply drops `storage/modules/.modules-meta/.restore-prestaged`. On the next `./scripts/bash/stage-modules.sh --yes`, the script sees the flag, clears any previously staged files, and recopies every enabled SQL file before worldserver boots. Because the files are always present, AzerothCores updater has the complete history it needs to apply or skip scripts correctly—no hash/ledger bookkeeping required.
- Refreshes `storage/modules/.modules-meta/module-sql-ledger.txt` using the snapshot bundled with the backup (or rebuilds it from the modules directory if the snapshot is missing).
- Writes `storage/modules/.modules-meta/.restore-prestaged` to signal that the next `./scripts/bash/stage-modules.sh` run must repopulate `/azerothcore/data/sql/updates/*` before worldserver comes online.
The staging script now recopies every module SQL file—regardless of whether it has already been applied—using deterministic names like `MODULE_mod-npc-buffer_npc_buffer.sql`. AzerothCores built-in updater consults the `updates` tables to decide what should actually run, so already-applied files remain on disk purely to keep history intact and avoid “file missing” warnings. If a legacy backup doesnt contain the ledger snapshot the helper simply rebuilds it and still sets the flag, so the runtime staging pass behaves the same. Run `rm -f storage/modules/.modules-meta/module-sql-ledger.txt` and rerun `./scripts/bash/stage-modules.sh --yes` if you intentionally need to reseed the ledger from scratch.
This snapshot-driven workflow means restoring a new backup automatically replays any newly added module SQL while avoiding duplicate inserts for modules that were already present. See **[docs/ADVANCED.md](ADVANCED.md)** for a deeper look at the marker workflow and container responsibilities. This snapshot-driven workflow means restoring a new backup automatically replays any newly added module SQL while avoiding duplicate inserts for modules that were already present. See **[docs/ADVANCED.md](ADVANCED.md)** for a deeper look at the marker workflow and container responsibilities.
@@ -440,16 +431,12 @@ This snapshot-driven workflow means restoring a new backup automatically replays
If you intentionally need to reapply all module SQL (for example after manually cleaning tables): If you intentionally need to reapply all module SQL (for example after manually cleaning tables):
1. Stop services: `docker compose down` 1. Stop services: `docker compose down`
2. Remove the SQL ledger so the next run rehashes everything: 2. (Optional) Drop the relevant records from the `updates` table if you want AzerothCore to rerun them, e.g.:
```bash
rm -f storage/modules/.modules-meta/module-sql-ledger.txt
```
3. (Optional) Drop the relevant records from the `updates` table if you want AzerothCore to rerun them, e.g.:
```bash ```bash
docker exec -it ac-mysql mysql -uroot -p \ docker exec -it ac-mysql mysql -uroot -p \
-e "DELETE FROM acore_characters.updates WHERE name LIKE '%MODULE_mod-ollama-chat%';" -e "DELETE FROM acore_characters.updates WHERE name LIKE '%MODULE_mod-ollama-chat%';"
``` ```
4. Run `./scripts/bash/stage-modules.sh --yes` 3. Run `./scripts/bash/stage-modules.sh --yes`
Only perform step 3 if you understand the impact—deleting entries causes worldserver to execute those SQL scripts again on next startup. Only perform step 3 if you understand the impact—deleting entries causes worldserver to execute those SQL scripts again on next startup.

View File

@@ -52,8 +52,8 @@ docker exec ac-mysql mysql -u root -p -e "SELECT 1;"
# Forcing a fresh import (if schema missing/invalid) # Forcing a fresh import (if schema missing/invalid)
# 1. Stop the stack # 1. Stop the stack
docker compose down docker compose down
# 2. Remove the sentinel created after a successful restore # 2. Remove the sentinel created after a successful restore (inside the docker volume)
sudo rm -f local-storage/mysql-data/.restore-completed docker run --rm -v mysql-data:/var/lib/mysql-persistent alpine sh -c 'rm -f /var/lib/mysql-persistent/.restore-completed'
# 3. Re-run the import pipeline (either stand-alone or via stage-modules) # 3. Re-run the import pipeline (either stand-alone or via stage-modules)
docker compose run --rm ac-db-import docker compose run --rm ac-db-import
# or # or
@@ -61,6 +61,16 @@ docker compose run --rm ac-db-import
# #
# See docs/ADVANCED.md#database-hardening for details on the sentinel workflow and why it's required. # See docs/ADVANCED.md#database-hardening for details on the sentinel workflow and why it's required.
**Permission denied writing to local-storage or storage**
```bash
# Reset ownership/permissions on the shared directories
./scripts/bash/repair-storage-permissions.sh
```
> This script reuses the same helper container as the staging workflow to `chown`
> `storage/`, `local-storage/`, and module metadata paths back to the current
> host UID/GID so tools like `scripts/python/modules.py` can regenerate
> `modules.env` without manual intervention.
# Check database initialization # Check database initialization
docker logs ac-db-init docker logs ac-db-init
docker logs ac-db-import docker logs ac-db-import
@@ -77,31 +87,18 @@ docker logs ac-worldserver
# 2. Remove the staged SQL file that keeps replaying: # 2. Remove the staged SQL file that keeps replaying:
docker exec ac-worldserver rm /azerothcore/data/sql/updates/<db>/<filename>.sql docker exec ac-worldserver rm /azerothcore/data/sql/updates/<db>/<filename>.sql
# 3. (Optional) Clean the module SQL ledger so staging rehashes everything # 3. Re-run the staging workflow
rm -f storage/modules/.modules-meta/module-sql-ledger.txt
# 4. Re-run the staging workflow
./scripts/bash/stage-modules.sh --yes ./scripts/bash/stage-modules.sh --yes
# 5. Restart the worldserver container # 4. Restart the worldserver container
docker compose restart ac-worldserver-playerbots # or the profile you use docker compose restart ac-worldserver-playerbots # or the profile you use
# See docs/DATABASE_MANAGEMENT.md#module-sql-management for details on the ledger # See docs/DATABASE_MANAGEMENT.md#module-sql-management for details on the workflow.
# and docs/ADVANCED.md#restore-aware-module-sql for the import workflow.
``` ```
**Legacy backup missing module SQL snapshot** **Legacy backup missing module SQL snapshot**
New backups include `module-sql-ledger.txt` which lets `ac-db-import` automatically restage only the SQL that didnt ship with the backup. If you restored an older backup youll see `No module SQL snapshot found ...` in the import logs and no extra SQL will be staged. Thats intentional to avoid duplicate inserts. Legacy backups behave the same as new ones now—just rerun `./scripts/bash/stage-modules.sh --yes` after a restore and the updater will apply whatever the database still needs.
1. Decide if you really need to restage modules (for example you know new modules were added after the backup was taken).
2. Remove the host ledger so the next run copies every SQL file:
```bash
rm -f storage/modules/.modules-meta/module-sql-ledger.txt
```
3. Rerun `./scripts/bash/stage-modules.sh --yes` to restage and restart the stack.
After you take a new backup the snapshot will exist and future restores wont need this manual step.
**Source rebuild issues** **Source rebuild issues**
```bash ```bash

View File

@@ -44,7 +44,7 @@ services:
image: ${MYSQL_IMAGE} image: ${MYSQL_IMAGE}
container_name: ac-mysql container_name: ac-mysql
volumes: volumes:
- ${STORAGE_PATH_LOCAL}/mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro - ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro
command: command:
- mysqld - mysqld
@@ -65,6 +65,7 @@ services:
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs - ${STORAGE_PATH}/logs:/azerothcore/logs
- mysql-data:/var/lib/mysql-persistent
``` ```
> **Tip:** Need custom bind mounts for DBC overrides like in the upstream doc? Add them to `${STORAGE_PATH}/client-data` or mount extra read-only paths under the `ac-worldserver-*` service. RealmMaster already downloads `data.zip` via `ac-client-data-*` containers, so you can drop additional files beside the cached dataset. > **Tip:** Need custom bind mounts for DBC overrides like in the upstream doc? Add them to `${STORAGE_PATH}/client-data` or mount extra read-only paths under the `ac-worldserver-*` service. RealmMaster already downloads `data.zip` via `ac-client-data-*` containers, so you can drop additional files beside the cached dataset.
@@ -82,6 +83,23 @@ services:
For a full architecture diagram, cross-reference [README → Architecture Overview](../README.md#architecture-overview). For a full architecture diagram, cross-reference [README → Architecture Overview](../README.md#architecture-overview).
### Storage / Bind Mount Map
| Host Path | Mounted In | Purpose / Notes |
|-----------|------------|-----------------|
| `${STORAGE_PATH}/config` | `ac-authserver-*`, `ac-worldserver-*`, `ac-db-import`, `ac-db-guard`, `ac-post-install` | Holds `authserver.conf`, `worldserver.conf`, `dbimport.conf`, and module configs. Generated from the `.dist` templates during `setup.sh` / `auto-post-install.sh`. |
| `${STORAGE_PATH}/logs` | `ac-worldserver-*`, `ac-authserver-*`, `ac-db-import`, `ac-db-guard` | Persistent server logs (mirrors upstream `logs/` bind mount). |
| `${STORAGE_PATH}/modules` | `ac-worldserver-*`, `ac-db-import`, `ac-db-guard`, `ac-modules` | Cloned module repositories live here. `ac-modules` / `stage-modules.sh` sync this tree. |
| `${STORAGE_PATH}/lua_scripts` | `ac-worldserver-*` | Custom Lua scripts (same structure as upstream `lua_scripts`). |
| `${STORAGE_PATH}/backups` | `ac-db-import`, `ac-backup`, `ac-mysql` (via `mysql-data` volume) | Automatic hourly/daily SQL dumps. `ac-db-import` restores from here on cold start. |
| `${STORAGE_PATH}/client-data` | `ac-client-data-*`, `ac-worldserver-*`, `ac-authserver-*` | Cached `Data.zip` plus optional DBC/maps/vmaps overrides. Equivalent to mounting `data` in the original instructions. |
| `${STORAGE_PATH}/module-sql-updates` *(host literal path only used when you override the default)* | *(legacy, see below)* | Prior to this update, this path stayed under `storage/`. It now defaults to `${STORAGE_PATH_LOCAL}/module-sql-updates` so it can sit on a writable share even if `storage/` is NFS read-only. |
| `${STORAGE_PATH_LOCAL}/module-sql-updates` | `ac-db-import`, `ac-db-guard` (mounted as `/modules-sql`) | **New:** `stage-modules.sh` copies every staged `MODULE_*.sql` into this directory. The guard and importer copy from `/modules-sql` into `/azerothcore/data/sql/updates/*` before running `dbimport`, so historical module SQL is preserved across container rebuilds. |
| `${STORAGE_PATH_LOCAL}/client-data-cache` | `ac-client-data-*` | Download cache for `Data.zip`. Keeps the upstream client-data instructions intact. |
| `${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql` | `ac-db-import`, `ac-db-guard` | Mounted read-only so dbimport always sees the checked-out SQL (matches the upstream “mount the source tree” advice). |
| `mysql-data` (named volume) | `ac-mysql`, `ac-db-import`, `ac-db-init`, `ac-backup` | Stores the persistent InnoDB files. Runtime tmpfs lives inside the container, just like the original guides “tmpfs + bind mount” pattern. |
> Hosting storage over NFS/SMB? Point `STORAGE_PATH` at your read-only export and keep `STORAGE_PATH_LOCAL` on a writable tier for caches (`client-data-cache`, `module-sql-updates`, etc.). `stage-modules.sh` and `repair-storage-permissions.sh` respect those split paths.
## Familiar Workflow Using RealmMaster Commands ## Familiar Workflow Using RealmMaster Commands

View File

@@ -100,7 +100,14 @@ else
# Skip core config files (already handled) # Skip core config files (already handled)
case "$filename" in case "$filename" in
authserver.conf|worldserver.conf|dbimport.conf) authserver.conf|worldserver.conf)
continue
;;
dbimport.conf)
if [ ! -f "$conffile" ] || grep -q "Updates.ExceptionShutdownDelay" "$conffile"; then
echo " 📝 Creating/refreshing $filename from $(basename "$file")"
cp "$file" "$conffile"
fi
continue continue
;; ;;
esac esac
@@ -140,6 +147,14 @@ else
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/worldserver.conf || true sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/worldserver.conf || true
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/worldserver.conf || true sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/worldserver.conf || true
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/worldserver.conf || true sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/worldserver.conf || true
if [ -f "/azerothcore/config/dbimport.conf" ]; then
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^PlayerbotsDatabaseInfo *=.*|PlayerbotsDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_PLAYERBOTS_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^MySQLExecutable *=.*|MySQLExecutable = \"/usr/bin/mysql\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^TempDir *=.*|TempDir = \"/azerothcore/env/dist/temp\"|" /azerothcore/config/dbimport.conf || true
fi
update_playerbots_conf /azerothcore/config/modules/playerbots.conf update_playerbots_conf /azerothcore/config/modules/playerbots.conf
update_playerbots_conf /azerothcore/config/modules/playerbots.conf.dist update_playerbots_conf /azerothcore/config/modules/playerbots.conf.dist

View File

@@ -165,14 +165,6 @@ EOF
EOF EOF
fi fi
# Capture module SQL ledger snapshot if available
local ledger_src="/modules-meta/module-sql-ledger.txt"
if [ -f "$ledger_src" ]; then
cp "$ledger_src" "$target_dir/module-sql-ledger.txt"
else
log " Module SQL ledger not found (modules/meta missing); snapshot not included in this backup"
fi
# Create completion marker to indicate backup is finished # Create completion marker to indicate backup is finished
touch "$target_dir/.backup_complete" touch "$target_dir/.backup_complete"

178
scripts/bash/db-guard.sh Normal file
View File

@@ -0,0 +1,178 @@
#!/bin/bash
# Continuously ensure the MySQL runtime tmpfs contains the restored data.
# If the runtime tables are missing (for example after a host reboot),
# automatically rerun db-import-conditional to hydrate from backups.
set -euo pipefail
log(){ echo "🛡️ [db-guard] $*"; }
warn(){ echo "⚠️ [db-guard] $*" >&2; }
err(){ echo "❌ [db-guard] $*" >&2; }
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
MYSQL_PORT="${MYSQL_PORT:-3306}"
MYSQL_USER="${MYSQL_USER:-root}"
MYSQL_PASS="${MYSQL_ROOT_PASSWORD:-root}"
IMPORT_SCRIPT="${DB_GUARD_IMPORT_SCRIPT:-/tmp/db-import-conditional.sh}"
RECHECK_SECONDS="${DB_GUARD_RECHECK_SECONDS:-120}"
RETRY_SECONDS="${DB_GUARD_RETRY_SECONDS:-10}"
WAIT_ATTEMPTS="${DB_GUARD_WAIT_ATTEMPTS:-60}"
VERIFY_INTERVAL="${DB_GUARD_VERIFY_INTERVAL_SECONDS:-0}"
VERIFY_FILE="${DB_GUARD_VERIFY_FILE:-/tmp/db-guard.last-verify}"
HEALTH_FILE="${DB_GUARD_HEALTH_FILE:-/tmp/db-guard.ready}"
STATUS_FILE="${DB_GUARD_STATUS_FILE:-/tmp/db-guard.status}"
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
declare -a DB_SCHEMAS=()
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
value="${!var:-}"
if [ -n "$value" ]; then
DB_SCHEMAS+=("$value")
fi
done
if [ -n "${DB_GUARD_EXTRA_DATABASES:-}" ]; then
IFS=',' read -ra extra <<< "${DB_GUARD_EXTRA_DATABASES}"
for db in "${extra[@]}"; do
if [ -n "${db// }" ]; then
DB_SCHEMAS+=("${db// }")
fi
done
fi
if [ "${#DB_SCHEMAS[@]}" -eq 0 ]; then
DB_SCHEMAS=(acore_auth acore_world acore_characters)
fi
SCHEMA_LIST_SQL="$(printf "'%s'," "${DB_SCHEMAS[@]}")"
SCHEMA_LIST_SQL="${SCHEMA_LIST_SQL%,}"
mark_ready(){
mkdir -p "$(dirname "$HEALTH_FILE")" 2>/dev/null || true
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$STATUS_FILE" >/dev/null
: > "$ERROR_FILE"
printf '%s\n' "$*" > "$HEALTH_FILE"
}
mark_unhealthy(){
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$ERROR_FILE" >&2
rm -f "$HEALTH_FILE" 2>/dev/null || true
}
wait_for_mysql(){
local attempts="$WAIT_ATTEMPTS"
while [ "$attempts" -gt 0 ]; do
if MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -e "SELECT 1" >/dev/null 2>&1; then
return 0
fi
attempts=$((attempts - 1))
sleep "$RETRY_SECONDS"
done
return 1
}
table_count(){
local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema IN (${SCHEMA_LIST_SQL});"
MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -N -B -e "$query"
}
rehydrate(){
if [ ! -x "$IMPORT_SCRIPT" ]; then
err "Import script not found at ${IMPORT_SCRIPT}"
return 1
fi
"$IMPORT_SCRIPT"
}
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
sync_host_stage_files(){
local host_root="${MODULE_SQL_HOST_PATH}"
[ -d "$host_root" ] || return 0
for dir in db_world db_characters db_auth db_playerbots; do
local src="$host_root/$dir"
local dest="/azerothcore/data/sql/updates/$dir"
mkdir -p "$dest"
rm -f "$dest"/MODULE_*.sql >/dev/null 2>&1 || true
if [ -d "$src" ]; then
cp -a "$src"/MODULE_*.sql "$dest"/ >/dev/null 2>&1 || true
fi
done
}
dbimport_verify(){
local bin_dir="/azerothcore/env/dist/bin"
ensure_dbimport_conf
sync_host_stage_files
if [ ! -x "${bin_dir}/dbimport" ]; then
warn "dbimport binary not found at ${bin_dir}/dbimport"
return 1
fi
log "Running dbimport verification sweep..."
if (cd "$bin_dir" && ./dbimport); then
log "dbimport verification finished successfully"
return 0
fi
warn "dbimport verification reported issues - review dbimport logs"
return 1
}
maybe_run_verification(){
if [ "${VERIFY_INTERVAL}" -lt 0 ]; then
return 0
fi
local now last_run=0
now="$(date +%s)"
if [ -f "$VERIFY_FILE" ]; then
last_run="$(cat "$VERIFY_FILE" 2>/dev/null || echo 0)"
if [ "$VERIFY_INTERVAL" -eq 0 ]; then
return 0
fi
if [ $((now - last_run)) -lt "${VERIFY_INTERVAL}" ]; then
return 0
fi
fi
if dbimport_verify; then
echo "$now" > "$VERIFY_FILE"
else
warn "dbimport verification failed; will retry in ${VERIFY_INTERVAL}s"
fi
}
log "Watching MySQL (${MYSQL_HOST}:${MYSQL_PORT}) for ${#DB_SCHEMAS[@]} schemas: ${DB_SCHEMAS[*]}"
while true; do
if ! wait_for_mysql; then
mark_unhealthy "MySQL is unreachable after ${WAIT_ATTEMPTS} attempts"
sleep "$RETRY_SECONDS"
continue
fi
count="$(table_count 2>/dev/null || echo "")"
if [ -n "$count" ]; then
if [ "$count" -gt 0 ] 2>/dev/null; then
mark_ready "Detected ${count} tables across tracked schemas"
maybe_run_verification
sleep "$RECHECK_SECONDS"
continue
fi
fi
warn "No tables detected across ${DB_SCHEMAS[*]}; running rehydrate workflow..."
if rehydrate; then
log "Rehydrate complete - rechecking tables"
sleep 5
continue
fi
mark_unhealthy "Rehydrate workflow failed - retrying in ${RETRY_SECONDS}s"
sleep "$RETRY_SECONDS"
done

View File

@@ -63,6 +63,33 @@ verify_databases_populated() {
return 1 return 1
} }
wait_for_mysql(){
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
local mysql_port="${MYSQL_PORT:-3306}"
local mysql_user="${MYSQL_USER:-root}"
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
local max_attempts=30
local delay=2
while [ $max_attempts -gt 0 ]; do
if MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -e "SELECT 1" >/dev/null 2>&1; then
return 0
fi
max_attempts=$((max_attempts - 1))
sleep "$delay"
done
echo "❌ Unable to connect to MySQL at ${mysql_host}:${mysql_port} after multiple attempts"
return 1
}
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
case "${1:-}" in case "${1:-}" in
-h|--help) -h|--help)
print_help print_help
@@ -79,6 +106,11 @@ esac
echo "🔧 Conditional AzerothCore Database Import" echo "🔧 Conditional AzerothCore Database Import"
echo "========================================" echo "========================================"
if ! wait_for_mysql; then
echo "❌ MySQL service is unavailable; aborting database import"
exit 1
fi
# Restoration status markers - use writable location # Restoration status markers - use writable location
RESTORE_STATUS_DIR="/var/lib/mysql-persistent" RESTORE_STATUS_DIR="/var/lib/mysql-persistent"
MARKER_STATUS_DIR="/tmp" MARKER_STATUS_DIR="/tmp"
@@ -325,24 +357,7 @@ if [ -n "$backup_path" ]; then
return 0 return 0
fi fi
# Create dbimport config for verification ensure_dbimport_conf
echo "📝 Creating dbimport configuration for verification..."
mkdir -p /azerothcore/env/dist/etc
TEMP_DIR="/azerothcore/env/dist/temp"
mkdir -p "$TEMP_DIR"
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
Updates.EnableDatabases = 15
Updates.AutoSetup = 1
TempDir = "${TEMP_DIR}"
MySQLExecutable = "${MYSQL_EXECUTABLE}"
Updates.AllowedModules = "all"
SourceDirectory = "/azerothcore"
EOF
cd /azerothcore/env/dist/bin cd /azerothcore/env/dist/bin
echo "🔄 Running dbimport to apply any missing updates..." echo "🔄 Running dbimport to apply any missing updates..."
@@ -416,30 +431,7 @@ CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COL
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; } SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
echo "✅ Fresh databases created - proceeding with schema import" echo "✅ Fresh databases created - proceeding with schema import"
echo "📝 Creating dbimport configuration..." ensure_dbimport_conf
mkdir -p /azerothcore/env/dist/etc
TEMP_DIR="/azerothcore/env/dist/temp"
mkdir -p "$TEMP_DIR"
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
Updates.EnableDatabases = 15
Updates.AutoSetup = 1
TempDir = "${TEMP_DIR}"
MySQLExecutable = "${MYSQL_EXECUTABLE}"
Updates.AllowedModules = "all"
LoginDatabase.WorkerThreads = 1
LoginDatabase.SynchThreads = 1
WorldDatabase.WorkerThreads = 1
WorldDatabase.SynchThreads = 1
CharacterDatabase.WorkerThreads = 1
CharacterDatabase.SynchThreads = 1
SourceDirectory = "/azerothcore"
Updates.ExceptionShutdownDelay = 10000
EOF
echo "🚀 Running database import..." echo "🚀 Running database import..."
cd /azerothcore/env/dist/bin cd /azerothcore/env/dist/bin

View File

@@ -21,6 +21,8 @@ fi
STORAGE_PATH="${STORAGE_PATH:-./storage}" STORAGE_PATH="${STORAGE_PATH:-./storage}"
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}" STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
BACKUP_ROOT="${STORAGE_PATH}/backups" BACKUP_ROOT="${STORAGE_PATH}/backups"
MYSQL_DATA_VOLUME_NAME="${MYSQL_DATA_VOLUME_NAME:-mysql-data}"
ALPINE_IMAGE="${ALPINE_IMAGE:-alpine:latest}"
shopt -s nullglob shopt -s nullglob
sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz) sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz)
@@ -32,7 +34,25 @@ if [ ! -d "$IMPORT_DIR" ] || [ ${#sql_files[@]} -eq 0 ]; then
fi fi
# Exit if backup system already has databases restored # Exit if backup system already has databases restored
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then has_restore_marker(){
# Prefer Docker volume marker (post-migration), fall back to legacy host path
if command -v docker >/dev/null 2>&1; then
if docker volume inspect "$MYSQL_DATA_VOLUME_NAME" >/dev/null 2>&1; then
if docker run --rm \
-v "${MYSQL_DATA_VOLUME_NAME}:/var/lib/mysql-persistent" \
"$ALPINE_IMAGE" \
sh -c 'test -f /var/lib/mysql-persistent/.restore-completed' >/dev/null 2>&1; then
return 0
fi
fi
fi
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
return 0
fi
return 1
}
if has_restore_marker; then
echo "✅ Database already restored - skipping import" echo "✅ Database already restored - skipping import"
exit 0 exit 0
fi fi

View File

@@ -75,13 +75,6 @@ for db in "${dbs[@]}"; do
echo "[manual] ✅ ${db}" echo "[manual] ✅ ${db}"
done done
ledger_src="/modules-meta/module-sql-ledger.txt"
if [ -f "${ledger_src}" ]; then
cp "${ledger_src}" "${TARGET_DIR}/module-sql-ledger.txt"
else
echo "[manual] Module SQL ledger not found; snapshot not included"
fi
size="$(du -sh "${TARGET_DIR}" | cut -f1)" size="$(du -sh "${TARGET_DIR}" | cut -f1)"
cat > "${TARGET_DIR}/manifest.json" <<EOF cat > "${TARGET_DIR}/manifest.json" <<EOF
{ {

View File

@@ -0,0 +1,139 @@
#!/bin/bash
# Normalize permissions across storage/ and local-storage/ so host processes
# (and CI tools) can read/write module metadata without manual chown.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
ENV_FILE="$PROJECT_ROOT/.env"
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
usage(){
cat <<'EOF'
Usage: repair-storage-permissions.sh [options]
Ensures common storage directories are writable by the current host user.
Options:
--path <dir> Additional directory to fix (can be passed multiple times)
--silent Reduce output (only errors/warnings)
-h, --help Show this help message
EOF
}
read_env(){
local key="$1" default="$2" env_path="$ENV_FILE" value=""
if [ -f "$env_path" ]; then
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ] && [ -f "$TEMPLATE_FILE" ]; then
value="$(grep -E "^${key}=" "$TEMPLATE_FILE" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then
value="$default"
fi
printf '%s\n' "$value"
}
silent=0
declare -a extra_paths=()
while [ $# -gt 0 ]; do
case "$1" in
--path)
shift
[ $# -gt 0 ] || { echo "Missing value for --path" >&2; exit 1; }
extra_paths+=("$1")
;;
--silent)
silent=1
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
shift
done
log(){ [ "$silent" -eq 1 ] || echo "$*"; }
warn(){ echo "⚠️ $*" >&2; }
resolve_path(){
local path="$1"
if [[ "$path" != /* ]]; then
path="${path#./}"
path="$PROJECT_ROOT/$path"
fi
printf '%s\n' "$(cd "$(dirname "$path")" 2>/dev/null && pwd 2>/dev/null)/$(basename "$path")"
}
ensure_host_writable(){
local target="$1"
[ -n "$target" ] || return 0
mkdir -p "$target" 2>/dev/null || true
[ -d "$target" ] || { warn "Path not found: $target"; return 0; }
local uid gid
uid="$(id -u)"
gid="$(id -g)"
if chown -R "$uid":"$gid" "$target" 2>/dev/null; then
:
elif command -v docker >/dev/null 2>&1; then
local helper_image
helper_image="$(read_env ALPINE_IMAGE "alpine:latest")"
if ! docker run --rm -u 0:0 -v "$target":/workspace "$helper_image" \
sh -c "chown -R ${uid}:${gid} /workspace" >/dev/null 2>&1; then
warn "Failed to adjust ownership for $target"
return 1
fi
else
warn "Cannot adjust ownership for $target (docker unavailable)"
return 1
fi
chmod -R ug+rwX "$target" 2>/dev/null || true
return 0
}
STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
declare -a targets=(
"$STORAGE_PATH"
"$STORAGE_PATH/modules"
"$STORAGE_PATH/modules/.modules-meta"
"$STORAGE_PATH/backups"
"$STORAGE_PATH/logs"
"$STORAGE_PATH/lua_scripts"
"$STORAGE_PATH/install-markers"
"$STORAGE_PATH/client-data"
"$STORAGE_PATH/config"
"$LOCAL_STORAGE_PATH"
"$LOCAL_STORAGE_PATH/modules"
"$LOCAL_STORAGE_PATH/client-data-cache"
"$LOCAL_STORAGE_PATH/source"
"$LOCAL_STORAGE_PATH/images"
)
targets+=("${extra_paths[@]}")
declare -A seen=()
for raw in "${targets[@]}"; do
[ -n "$raw" ] || continue
resolved="$(resolve_path "$raw")"
if [ -n "${seen[$resolved]:-}" ]; then
continue
fi
seen["$resolved"]=1
log "🔧 Fixing permissions for $resolved"
ensure_host_writable "$resolved"
done
log "✅ Storage permissions refreshed"

View File

@@ -1,103 +1,22 @@
#!/bin/bash #!/bin/bash
# Refresh the module SQL ledger after a database restore so the runtime staging # Refresh the module metadata after a database restore so runtime staging knows
# flow knows exactly which files to copy into /azerothcore/data/sql/updates/*. # to re-copy SQL files.
set -euo pipefail set -euo pipefail
info(){ echo "🔧 [restore-stage] $*"; } info(){ echo "🔧 [restore-stage] $*"; }
warn(){ echo "⚠️ [restore-stage] $*" >&2; } warn(){ echo "⚠️ [restore-stage] $*" >&2; }
MODULES_DIR="${MODULES_DIR:-/modules}" MODULES_DIR="${MODULES_DIR:-/modules}"
RESTORE_SOURCE_DIR="${RESTORE_SOURCE_DIR:-}"
MODULES_META_DIR="${MODULES_DIR}/.modules-meta" MODULES_META_DIR="${MODULES_DIR}/.modules-meta"
LEDGER_FILE="${MODULES_META_DIR}/module-sql-ledger.txt"
RESTORE_FLAG="${MODULES_META_DIR}/.restore-prestaged" RESTORE_FLAG="${MODULES_META_DIR}/.restore-prestaged"
SNAPSHOT_FILE=""
ensure_modules_dir(){ if [ ! -d "$MODULES_DIR" ]; then
if [ ! -d "$MODULES_DIR" ]; then warn "Modules directory not found at ${MODULES_DIR}; skipping restore-time staging prep."
warn "Modules directory not found at ${MODULES_DIR}; skipping restore-time staging prep." exit 0
exit 0
fi
}
hash_sql_file(){
local sql_file="$1"
if command -v sha1sum >/dev/null 2>&1; then
sha1sum "$sql_file" | awk '{print $1}'
elif command -v md5sum >/dev/null 2>&1; then
md5sum "$sql_file" | awk '{print $1}'
else
return 1
fi
}
collect_sql_files(){
local db_type="$1" legacy="$2"
local -a patterns=(
"$MODULES_DIR"/*/data/sql/"$db_type"/*.sql
"$MODULES_DIR"/*/data/sql/"$db_type"/base/*.sql
"$MODULES_DIR"/*/data/sql/"$db_type"/updates/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/base/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/updates/*.sql
)
declare -A seen=()
local -a files=()
for pattern in "${patterns[@]}"; do
for path in $pattern; do
[ -f "$path" ] || continue
if [ -z "${seen[$path]:-}" ]; then
seen["$path"]=1
files+=("$path")
fi
done
done
if [ ${#files[@]} -eq 0 ]; then
return 0
fi
printf '%s\n' "${files[@]}" | sort
}
rebuild_ledger(){
local tmp_file
tmp_file="$(mktemp)"
for db_type in db-world db-characters db-auth; do
local legacy=""
case "$db_type" in
db-world) legacy="world" ;;
db-characters) legacy="characters" ;;
db-auth) legacy="auth" ;;
esac
while IFS= read -r sql_file; do
[ -n "$sql_file" ] || continue
[ -f "$sql_file" ] || continue
local module_name base_name hash
module_name="$(echo "$sql_file" | sed 's|.*/modules/||' | cut -d'/' -f1)"
base_name="$(basename "$sql_file" .sql)"
if ! hash="$(hash_sql_file "$sql_file")"; then
continue
fi
printf '%s|%s|%s|%s\n' "$db_type" "$module_name" "$base_name" "$hash" >> "$tmp_file"
done < <(collect_sql_files "$db_type" "$legacy")
done
sort -u "$tmp_file" > "$LEDGER_FILE"
rm -f "$tmp_file"
}
ensure_modules_dir
mkdir -p "$MODULES_META_DIR" 2>/dev/null || true
if [ -n "$RESTORE_SOURCE_DIR" ] && [ -f "${RESTORE_SOURCE_DIR}/module-sql-ledger.txt" ]; then
SNAPSHOT_FILE="${RESTORE_SOURCE_DIR}/module-sql-ledger.txt"
info "Snapshot found in backup (${SNAPSHOT_FILE}); syncing to host ledger."
cp "$SNAPSHOT_FILE" "$LEDGER_FILE"
else
warn "Module SQL snapshot not found in backup; rebuilding ledger from module sources."
rebuild_ledger
fi fi
mkdir -p "$MODULES_META_DIR" 2>/dev/null || true
touch "$RESTORE_FLAG" touch "$RESTORE_FLAG"
echo "restore_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > "$RESTORE_FLAG" echo "restore_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > "$RESTORE_FLAG"
info "Ledger ready at ${LEDGER_FILE}; runtime staging will copy SQL before worldserver starts."
info "Flagged ${RESTORE_FLAG} to force staging on next ./scripts/bash/stage-modules.sh run." info "Flagged ${RESTORE_FLAG} to force staging on next ./scripts/bash/stage-modules.sh run."

View File

@@ -40,72 +40,7 @@ ensure_host_writable(){
} }
seed_sql_ledger_if_needed(){ seed_sql_ledger_if_needed(){
local sentinel="$1" ledger="$2" : # No-op; ledger removed
mkdir -p "$(dirname "$ledger")" 2>/dev/null || true
local need_seed=0
local reason=""
if [ ! -f "$ledger" ] || [ ! -s "$ledger" ]; then
need_seed=1
reason="Module SQL ledger missing; rebuilding."
elif [ -f "$sentinel" ] && [ "$sentinel" -nt "$ledger" ]; then
need_seed=1
reason="Database restore detected; seeding module SQL ledger."
fi
if [ "$need_seed" -ne 1 ]; then
touch "$ledger" 2>/dev/null || true
return 0
fi
echo "${reason}"
local tmp_file="${ledger}.tmp"
> "$tmp_file"
shopt -s nullglob
for db_type in db-world db-characters db-auth; do
local legacy_name=""
case "$db_type" in
db-world) legacy_name="world" ;;
db-characters) legacy_name="characters" ;;
db-auth) legacy_name="auth" ;;
esac
local search_paths=(
"$MODULES_DIR"/*/data/sql/"$db_type"
"$MODULES_DIR"/*/data/sql/"$db_type"/base
"$MODULES_DIR"/*/data/sql/"$db_type"/updates
"$MODULES_DIR"/*/data/sql/"$legacy_name"
"$MODULES_DIR"/*/data/sql/"$legacy_name"/base
)
for module_dir in "${search_paths[@]}"; do
for sql_file in "$module_dir"/*.sql; do
[ -e "$sql_file" ] || continue
local module_name
module_name="$(echo "$sql_file" | sed 's|.*/modules/||' | cut -d'/' -f1)"
local base_name
base_name="$(basename "$sql_file" .sql)"
local hash_cmd=""
if command -v sha1sum >/dev/null 2>&1; then
hash_cmd="sha1sum"
elif command -v md5sum >/dev/null 2>&1; then
hash_cmd="md5sum"
fi
local file_hash=""
if [ -n "$hash_cmd" ]; then
file_hash=$($hash_cmd "$sql_file" | awk '{print $1}')
fi
[ -n "$file_hash" ] || continue
printf '%s|%s|%s|%s\n' "$db_type" "$module_name" "$base_name" "$file_hash" >> "$tmp_file"
done
done
done
shopt -u nullglob
sort -u "$tmp_file" > "$ledger"
rm -f "$tmp_file"
} }
sync_local_staging(){ sync_local_staging(){
@@ -323,11 +258,20 @@ if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH" LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
fi fi
LOCAL_STORAGE_PATH="$(canonical_path "$LOCAL_STORAGE_PATH")" LOCAL_STORAGE_PATH="$(canonical_path "$LOCAL_STORAGE_PATH")"
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_PATH"
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild" SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta" MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
MODULES_SQL_LEDGER_HOST="$MODULES_META_DIR/module-sql-ledger.txt"
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged" RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt" MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
MODULE_SQL_STAGE_PATH="$(read_env MODULE_SQL_STAGE_PATH "$STORAGE_PATH/module-sql-updates")"
MODULE_SQL_STAGE_PATH="$(eval "echo \"$MODULE_SQL_STAGE_PATH\"")"
if [[ "$MODULE_SQL_STAGE_PATH" != /* ]]; then
MODULE_SQL_STAGE_PATH="$PROJECT_DIR/$MODULE_SQL_STAGE_PATH"
fi
MODULE_SQL_STAGE_PATH="$(canonical_path "$MODULE_SQL_STAGE_PATH")"
mkdir -p "$MODULE_SQL_STAGE_PATH"
ensure_host_writable "$MODULE_SQL_STAGE_PATH"
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
declare -A ENABLED_MODULES=() declare -A ENABLED_MODULES=()
@@ -353,6 +297,9 @@ module_is_enabled(){
return 1 return 1
} }
# Load the enabled module list (if present) so staging respects disabled modules.
load_enabled_modules
# Define module mappings (from rebuild-with-modules.sh) # Define module mappings (from rebuild-with-modules.sh)
declare -A MODULE_REPO_MAP=( declare -A MODULE_REPO_MAP=(
[MODULE_AOE_LOOT]=mod-aoe-loot [MODULE_AOE_LOOT]=mod-aoe-loot
@@ -474,8 +421,6 @@ sync_local_staging
echo "🎬 Staging services with profile: services-$TARGET_PROFILE" echo "🎬 Staging services with profile: services-$TARGET_PROFILE"
echo "⏳ Pulling images and starting containers; this can take several minutes on first run." echo "⏳ Pulling images and starting containers; this can take several minutes on first run."
load_enabled_modules
# Stop any currently running services # Stop any currently running services
echo "🛑 Stopping current services..." echo "🛑 Stopping current services..."
docker compose \ docker compose \
@@ -496,6 +441,36 @@ case "$TARGET_PROFILE" in
esac esac
# Stage module SQL to core updates directory (after containers start) # Stage module SQL to core updates directory (after containers start)
host_stage_clear(){
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
}
host_stage_reset_dir(){
local dir="$1"
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
}
copy_to_host_stage(){
local file_path="$1"
local core_dir="$2"
local target_name="$3"
local src_dir
src_dir="$(dirname "$file_path")"
local base_name
base_name="$(basename "$file_path")"
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
-v "$src_dir":/src \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1
}
stage_module_sql_to_core() { stage_module_sql_to_core() {
show_staging_step "Module SQL Staging" "Preparing module database updates" show_staging_step "Module SQL Staging" "Preparing module database updates"
@@ -524,6 +499,7 @@ stage_module_sql_to_core() {
fi fi
echo "📦 Staging module SQL files to core updates directory..." echo "📦 Staging module SQL files to core updates directory..."
host_stage_clear
# Create core updates directories inside container # Create core updates directories inside container
docker exec ac-worldserver bash -c " docker exec ac-worldserver bash -c "
@@ -536,13 +512,10 @@ stage_module_sql_to_core() {
local staged_count=0 local staged_count=0
local total_skipped=0 local total_skipped=0
local total_failed=0 local total_failed=0
local RESTORE_SENTINEL="$LOCAL_STORAGE_PATH/mysql-data/.restore-completed"
ensure_host_writable "$MODULES_META_DIR"
seed_sql_ledger_if_needed "$RESTORE_SENTINEL" "$MODULES_SQL_LEDGER_HOST"
docker exec ac-worldserver bash -c "find /azerothcore/data/sql/updates -name '*_MODULE_*.sql' -delete" >/dev/null 2>&1 || true docker exec ac-worldserver bash -c "find /azerothcore/data/sql/updates -name '*_MODULE_*.sql' -delete" >/dev/null 2>&1 || true
shopt -s nullglob shopt -s nullglob
for db_type in db-world db-characters db-auth; do for db_type in db-world db-characters db-auth db-playerbots; do
local core_dir="" local core_dir=""
local legacy_name="" local legacy_name=""
case "$db_type" in case "$db_type" in
@@ -558,9 +531,14 @@ stage_module_sql_to_core() {
core_dir="db_auth" core_dir="db_auth"
legacy_name="auth" legacy_name="auth"
;; ;;
db-playerbots)
core_dir="db_playerbots"
legacy_name="playerbots"
;;
esac esac
docker exec ac-worldserver bash -c "mkdir -p /azerothcore/data/sql/updates/$core_dir" >/dev/null 2>&1 || true docker exec ac-worldserver bash -c "mkdir -p /azerothcore/data/sql/updates/$core_dir" >/dev/null 2>&1 || true
host_stage_reset_dir "$core_dir"
local counter=0 local counter=0
local skipped=0 local skipped=0
@@ -602,28 +580,15 @@ stage_module_sql_to_core() {
continue continue
fi fi
local hash_cmd=""
if command -v sha1sum >/dev/null 2>&1; then
hash_cmd="sha1sum"
elif command -v md5sum >/dev/null 2>&1; then
hash_cmd="md5sum"
fi
local file_hash=""
if [ -n "$hash_cmd" ]; then
file_hash=$($hash_cmd "$sql_file" | awk '{print $1}')
fi
local ledger_key="$db_type|$module_name|$base_name"
local target_name="MODULE_${module_name}_${base_name}.sql" local target_name="MODULE_${module_name}_${base_name}.sql"
if ! copy_to_host_stage "$sql_file" "$core_dir" "$target_name"; then
echo " ❌ Failed to copy to host staging: $module_name/$db_type/$(basename "$sql_file")"
failed=$((failed + 1))
continue
fi
if docker cp "$sql_file" "ac-worldserver:/azerothcore/data/sql/updates/$core_dir/$target_name" >/dev/null; then if docker cp "$sql_file" "ac-worldserver:/azerothcore/data/sql/updates/$core_dir/$target_name" >/dev/null; then
echo " ✓ Staged $module_name/$db_type/$(basename "$sql_file")" echo " ✓ Staged $module_name/$db_type/$(basename "$sql_file")"
counter=$((counter + 1)) counter=$((counter + 1))
if [ -n "$file_hash" ]; then
local tmp_file="${MODULES_SQL_LEDGER_HOST}.tmp"
grep -Fv "${ledger_key}|" "$MODULES_SQL_LEDGER_HOST" > "$tmp_file" 2>/dev/null || true
printf '%s|%s\n' "$ledger_key" "$file_hash" >> "$tmp_file"
mv "$tmp_file" "$MODULES_SQL_LEDGER_HOST" 2>/dev/null || true
fi
else else
echo " ❌ Failed to copy: $module_name/$(basename "$sql_file")" echo " ❌ Failed to copy: $module_name/$(basename "$sql_file")"
failed=$((failed + 1)) failed=$((failed + 1))

293
scripts/bash/statusjson.sh Executable file
View File

@@ -0,0 +1,293 @@
#!/usr/bin/env python3
import json
import os
import re
import socket
import subprocess
import time
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parents[2]
ENV_FILE = PROJECT_DIR / ".env"
def load_env():
env = {}
if ENV_FILE.exists():
for line in ENV_FILE.read_text().splitlines():
if not line or line.strip().startswith('#'):
continue
if '=' not in line:
continue
key, val = line.split('=', 1)
val = val.split('#', 1)[0].strip()
env[key.strip()] = val
return env
def read_env(env, key, default=""):
return env.get(key, default)
def docker_exists(name):
result = subprocess.run([
"docker", "ps", "-a", "--format", "{{.Names}}"
], capture_output=True, text=True)
names = set(result.stdout.split())
return name in names
def docker_inspect(name, template):
try:
result = subprocess.run([
"docker", "inspect", f"--format={template}", name
], capture_output=True, text=True, check=True)
return result.stdout.strip()
except subprocess.CalledProcessError:
return ""
def service_snapshot(name, label):
status = "missing"
health = "none"
started = ""
image = ""
exit_code = ""
if docker_exists(name):
status = docker_inspect(name, "{{.State.Status}}") or status
health = docker_inspect(name, "{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}") or health
started = docker_inspect(name, "{{.State.StartedAt}}") or ""
image = docker_inspect(name, "{{.Config.Image}}") or ""
exit_code = docker_inspect(name, "{{.State.ExitCode}}") or "0"
return {
"name": name,
"label": label,
"status": status,
"health": health,
"started_at": started,
"image": image,
"exit_code": exit_code,
}
def port_reachable(port):
if not port:
return False
try:
port = int(port)
except ValueError:
return False
try:
with socket.create_connection(("127.0.0.1", port), timeout=1):
return True
except OSError:
return False
def module_list(env):
import json
from pathlib import Path
# Load module manifest
manifest_path = PROJECT_DIR / "config" / "module-manifest.json"
manifest_map = {}
if manifest_path.exists():
try:
manifest_data = json.loads(manifest_path.read_text())
for mod in manifest_data.get("modules", []):
manifest_map[mod["key"]] = mod
except Exception:
pass
modules = []
pattern = re.compile(r"^MODULE_([A-Z0-9_]+)=1$")
if ENV_FILE.exists():
for line in ENV_FILE.read_text().splitlines():
m = pattern.match(line.strip())
if m:
key = "MODULE_" + m.group(1)
raw = m.group(1).lower().replace('_', ' ')
title = raw.title()
# Look up manifest info
mod_info = manifest_map.get(key, {})
modules.append({
"name": title,
"key": key,
"description": mod_info.get("description", "No description available"),
"category": mod_info.get("category", "unknown"),
"type": mod_info.get("type", "unknown")
})
return modules
def dir_info(path):
p = Path(path)
exists = p.exists()
size = "--"
if exists:
try:
result = subprocess.run(
["du", "-sh", str(p)],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
check=False,
)
if result.stdout:
size = result.stdout.split()[0]
except Exception:
size = "--"
return {"path": str(p), "exists": exists, "size": size}
def volume_info(name, fallback=None):
candidates = [name]
if fallback:
candidates.append(fallback)
for cand in candidates:
result = subprocess.run(["docker", "volume", "inspect", cand], capture_output=True, text=True)
if result.returncode == 0:
try:
data = json.loads(result.stdout)[0]
return {
"name": cand,
"exists": True,
"mountpoint": data.get("Mountpoint", "-")
}
except Exception:
pass
return {"name": name, "exists": False, "mountpoint": "-"}
def expand_path(value, env):
storage = read_env(env, "STORAGE_PATH", "./storage")
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
value = value.replace('${STORAGE_PATH}', storage)
value = value.replace('${STORAGE_PATH_LOCAL}', local_storage)
return value
def mysql_query(env, database, query):
password = read_env(env, "MYSQL_ROOT_PASSWORD")
user = read_env(env, "MYSQL_USER", "root")
if not password or not database:
return 0
cmd = [
"docker", "exec", "ac-mysql",
"mysql", "-N", "-B",
f"-u{user}", f"-p{password}", database,
"-e", query
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
value = result.stdout.strip().splitlines()[-1]
return int(value)
except Exception:
return 0
def user_stats(env):
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;")
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;")
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;")
return {
"accounts": accounts,
"online": online,
"characters": characters,
"active7d": active,
}
def docker_stats():
"""Get CPU and memory stats for running containers"""
try:
result = subprocess.run([
"docker", "stats", "--no-stream", "--no-trunc",
"--format", "{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}"
], capture_output=True, text=True, check=True, timeout=4)
stats = {}
for line in result.stdout.strip().splitlines():
parts = line.split('\t')
if len(parts) == 4:
name, cpu, mem_usage, mem_perc = parts
# Parse CPU percentage (e.g., "0.50%" -> 0.50)
cpu_val = cpu.replace('%', '').strip()
try:
cpu_float = float(cpu_val)
except ValueError:
cpu_float = 0.0
# Parse memory percentage
mem_perc_val = mem_perc.replace('%', '').strip()
try:
mem_perc_float = float(mem_perc_val)
except ValueError:
mem_perc_float = 0.0
stats[name] = {
"cpu": cpu_float,
"memory": mem_usage.strip(),
"memory_percent": mem_perc_float
}
return stats
except Exception:
return {}
def main():
env = load_env()
project = read_env(env, "COMPOSE_PROJECT_NAME", "acore-compose")
network = read_env(env, "NETWORK_NAME", "azerothcore")
services = [
("ac-mysql", "MySQL"),
("ac-backup", "Backup"),
("ac-volume-init", "Volume Init"),
("ac-storage-init", "Storage Init"),
("ac-db-init", "DB Init"),
("ac-db-import", "DB Import"),
("ac-authserver", "Auth Server"),
("ac-worldserver", "World Server"),
("ac-client-data", "Client Data"),
("ac-modules", "Module Manager"),
("ac-post-install", "Post Install"),
("ac-phpmyadmin", "phpMyAdmin"),
("ac-keira3", "Keira3"),
]
service_data = [service_snapshot(name, label) for name, label in services]
port_entries = [
{"name": "Auth", "port": read_env(env, "AUTH_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "AUTH_EXTERNAL_PORT"))},
{"name": "World", "port": read_env(env, "WORLD_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "WORLD_EXTERNAL_PORT"))},
{"name": "SOAP", "port": read_env(env, "SOAP_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "SOAP_EXTERNAL_PORT"))},
{"name": "MySQL", "port": read_env(env, "MYSQL_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "MYSQL_EXTERNAL_PORT")) if read_env(env, "COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED", "0") == "1" else False},
{"name": "phpMyAdmin", "port": read_env(env, "PMA_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "PMA_EXTERNAL_PORT"))},
{"name": "Keira3", "port": read_env(env, "KEIRA3_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "KEIRA3_EXTERNAL_PORT"))},
]
storage_path = expand_path(read_env(env, "STORAGE_PATH", "./storage"), env)
local_storage_path = expand_path(read_env(env, "STORAGE_PATH_LOCAL", "./local-storage"), env)
client_data_path = expand_path(read_env(env, "CLIENT_DATA_PATH", f"{storage_path}/client-data"), env)
storage_info = {
"storage": dir_info(storage_path),
"local_storage": dir_info(local_storage_path),
"client_data": dir_info(client_data_path),
"modules": dir_info(os.path.join(storage_path, "modules")),
"local_modules": dir_info(os.path.join(local_storage_path, "modules")),
}
volumes = {
"client_cache": volume_info(f"{project}_client-data-cache"),
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
}
data = {
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"project": project,
"network": network,
"services": service_data,
"ports": port_entries,
"modules": module_list(env),
"storage": storage_info,
"volumes": volumes,
"users": user_stats(env),
"stats": docker_stats(),
}
print(json.dumps(data))
if __name__ == "__main__":
main()

View File

@@ -69,6 +69,12 @@ section_header "Phase 1 Integration Test Suite"
info "Project root: $PROJECT_ROOT" info "Project root: $PROJECT_ROOT"
info "Test started: $(date)" info "Test started: $(date)"
# Ensure storage directories are writable before generating module state
if [ -x "$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" ]; then
info "Normalizing storage permissions"
"$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" --silent || true
fi
# Test 1: Verify .env exists # Test 1: Verify .env exists
test_header "Environment Configuration Check" test_header "Environment Configuration Check"
if [ -f .env ]; then if [ -f .env ]; then
@@ -273,11 +279,10 @@ fi
# Test 11: Restore + Module Staging Automation # Test 11: Restore + Module Staging Automation
test_header "Restore + Module Staging Automation" test_header "Restore + Module Staging Automation"
if grep -q "restore-and-stage.sh" docker-compose.yml && \ if grep -q "restore-and-stage.sh" docker-compose.yml && \
grep -q ".restore-prestaged" scripts/bash/restore-and-stage.sh && \ grep -q ".restore-prestaged" scripts/bash/restore-and-stage.sh; then
grep -q "module-sql-ledger" scripts/bash/restore-and-stage.sh; then ok "restore-and-stage.sh wired into compose and flags stage-modules to recopy SQL"
ok "restore-and-stage.sh wired into compose, refreshes ledger snapshot, and flags staging"
else else
err "restore-and-stage.sh missing compose wiring or ledger/flag handling" err "restore-and-stage.sh missing compose wiring or flag handling"
fi fi
# Test 12: Docker Compose configuration check # Test 12: Docker Compose configuration check

10
scripts/go/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module acore-compose/statusdash
go 1.22.2
require (
github.com/gizak/termui/v3 v3.1.0 // indirect
github.com/mattn/go-runewidth v0.0.2 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect
)

8
scripts/go/go.sum Normal file
View File

@@ -0,0 +1,8 @@
github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc=
github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY=
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=

373
scripts/go/statusdash.go Normal file
View File

@@ -0,0 +1,373 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os/exec"
"strings"
"time"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
type Service struct {
Name string `json:"name"`
Label string `json:"label"`
Status string `json:"status"`
Health string `json:"health"`
StartedAt string `json:"started_at"`
Image string `json:"image"`
ExitCode string `json:"exit_code"`
}
type ContainerStats struct {
CPU float64 `json:"cpu"`
Memory string `json:"memory"`
MemoryPercent float64 `json:"memory_percent"`
}
type Port struct {
Name string `json:"name"`
Port string `json:"port"`
Reachable bool `json:"reachable"`
}
type DirInfo struct {
Path string `json:"path"`
Exists bool `json:"exists"`
Size string `json:"size"`
}
type VolumeInfo struct {
Name string `json:"name"`
Exists bool `json:"exists"`
Mountpoint string `json:"mountpoint"`
}
type UserStats struct {
Accounts int `json:"accounts"`
Online int `json:"online"`
Characters int `json:"characters"`
Active7d int `json:"active7d"`
}
type Module struct {
Name string `json:"name"`
Key string `json:"key"`
Description string `json:"description"`
Category string `json:"category"`
Type string `json:"type"`
}
type Snapshot struct {
Timestamp string `json:"timestamp"`
Project string `json:"project"`
Network string `json:"network"`
Services []Service `json:"services"`
Ports []Port `json:"ports"`
Modules []Module `json:"modules"`
Storage map[string]DirInfo `json:"storage"`
Volumes map[string]VolumeInfo `json:"volumes"`
Users UserStats `json:"users"`
Stats map[string]ContainerStats `json:"stats"`
}
func runSnapshot() (*Snapshot, error) {
cmd := exec.Command("./scripts/bash/statusjson.sh")
output, err := cmd.Output()
if err != nil {
return nil, err
}
snap := &Snapshot{}
if err := json.Unmarshal(output, snap); err != nil {
return nil, err
}
return snap, nil
}
func buildServicesTable(s *Snapshot) *TableNoCol {
table := NewTableNoCol()
rows := [][]string{{"Service", "Status", "Health", "CPU%", "Memory"}}
for _, svc := range s.Services {
cpu := "-"
mem := "-"
if stats, ok := s.Stats[svc.Name]; ok {
cpu = fmt.Sprintf("%.1f", stats.CPU)
mem = strings.Split(stats.Memory, " / ")[0] // Just show used, not total
}
// Combine health with exit code for stopped containers
health := svc.Health
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
}
rows = append(rows, []string{svc.Label, svc.Status, health, cpu, mem})
}
table.Rows = rows
table.RowSeparator = false
table.Border = true
table.Title = "Services"
return table
}
func buildPortsTable(s *Snapshot) *TableNoCol {
table := NewTableNoCol()
rows := [][]string{{"Port", "Number", "Reachable"}}
for _, p := range s.Ports {
state := "down"
if p.Reachable {
state = "up"
}
rows = append(rows, []string{p.Name, p.Port, state})
}
table.Rows = rows
table.RowSeparator = true
table.Border = true
table.Title = "Ports"
return table
}
func buildModulesList(s *Snapshot) *widgets.List {
list := widgets.NewList()
list.Title = fmt.Sprintf("Modules (%d)", len(s.Modules))
rows := make([]string, len(s.Modules))
for i, mod := range s.Modules {
rows[i] = mod.Name
}
list.Rows = rows
list.WrapText = false
list.Border = true
list.BorderStyle = ui.NewStyle(ui.ColorCyan)
list.SelectedRowStyle = ui.NewStyle(ui.ColorCyan)
return list
}
func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder
fmt.Fprintf(&b, "STORAGE:\n")
entries := []struct {
Key string
Label string
}{
{"storage", "Storage"},
{"local_storage", "Local Storage"},
{"client_data", "Client Data"},
{"modules", "Modules"},
{"local_modules", "Local Modules"},
}
for _, item := range entries {
info, ok := s.Storage[item.Key]
if !ok {
continue
}
mark := "○"
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-15s %s %s (%s)\n", item.Label, mark, info.Path, info.Size)
}
par := widgets.NewParagraph()
par.Title = "Storage"
par.Text = b.String()
par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
return par
}
func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder
fmt.Fprintf(&b, "VOLUMES:\n")
entries := []struct {
Key string
Label string
}{
{"client_cache", "Client Cache"},
{"mysql_data", "MySQL Data"},
}
for _, item := range entries {
info, ok := s.Volumes[item.Key]
if !ok {
continue
}
mark := "○"
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-13s %s %s\n", item.Label, mark, info.Mountpoint)
}
par := widgets.NewParagraph()
par.Title = "Volumes"
par.Text = b.String()
par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
return par
}
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
servicesTable := buildServicesTable(s)
for i := 1; i < len(servicesTable.Rows); i++ {
if servicesTable.RowStyles == nil {
servicesTable.RowStyles = make(map[int]ui.Style)
}
state := strings.ToLower(servicesTable.Rows[i][1])
switch state {
case "running", "healthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
case "restarting", "unhealthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
case "exited":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
default:
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorWhite)
}
}
portsTable := buildPortsTable(s)
for i := 1; i < len(portsTable.Rows); i++ {
if portsTable.RowStyles == nil {
portsTable.RowStyles = make(map[int]ui.Style)
}
if portsTable.Rows[i][2] == "up" {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
} else {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
}
}
modulesList := buildModulesList(s)
if selectedModule >= 0 && selectedModule < len(modulesList.Rows) {
modulesList.SelectedRow = selectedModule
}
helpPar := widgets.NewParagraph()
helpPar.Title = "Controls"
helpPar.Text = " ↓ : Down\n ↑ : Up"
helpPar.Border = true
helpPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
moduleInfoPar := widgets.NewParagraph()
moduleInfoPar.Title = "Module Info"
if selectedModule >= 0 && selectedModule < len(s.Modules) {
mod := s.Modules[selectedModule]
moduleInfoPar.Text = fmt.Sprintf("%s\n\nCategory: %s\nType: %s", mod.Description, mod.Category, mod.Type)
} else {
moduleInfoPar.Text = "Select a module to view info"
}
moduleInfoPar.Border = true
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
storagePar := buildStorageParagraph(s)
storagePar.Border = true
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
storagePar.PaddingLeft = 1
storagePar.PaddingRight = 1
volumesPar := buildVolumesParagraph(s)
header := widgets.NewParagraph()
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp)
header.Border = true
usersPar := widgets.NewParagraph()
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d)
usersPar.Border = true
grid := ui.NewGrid()
termWidth, termHeight := ui.TerminalDimensions()
grid.SetRect(0, 0, termWidth, termHeight)
grid.Set(
ui.NewRow(0.18,
ui.NewCol(0.6, header),
ui.NewCol(0.4, usersPar),
),
ui.NewRow(0.42,
ui.NewCol(0.6, servicesTable),
ui.NewCol(0.4, portsTable),
),
ui.NewRow(0.40,
ui.NewCol(0.25, modulesList),
ui.NewCol(0.15,
ui.NewRow(0.30, helpPar),
ui.NewRow(0.70, moduleInfoPar),
),
ui.NewCol(0.6,
ui.NewRow(0.55,
ui.NewCol(1.0, storagePar),
),
ui.NewRow(0.45,
ui.NewCol(1.0, volumesPar),
),
),
),
)
ui.Render(grid)
return modulesList, grid
}
func main() {
if err := ui.Init(); err != nil {
log.Fatalf("failed to init termui: %v", err)
}
defer ui.Close()
snapshot, err := runSnapshot()
if err != nil {
log.Fatalf("failed to fetch snapshot: %v", err)
}
selectedModule := 0
modulesWidget, currentGrid := renderSnapshot(snapshot, selectedModule)
snapCh := make(chan *Snapshot, 1)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for range ticker.C {
snap, err := runSnapshot()
if err != nil {
log.Printf("snapshot error: %v", err)
continue
}
select {
case snapCh <- snap:
default:
}
}
}()
events := ui.PollEvents()
for {
select {
case e := <-events:
switch e.ID {
case "q", "<C-c>":
return
case "<Down>", "j":
if selectedModule < len(snapshot.Modules)-1 {
selectedModule++
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
case "<Up>", "k":
if selectedModule > 0 {
selectedModule--
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
case "<Resize>":
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
continue
}
if modulesWidget != nil {
if selectedModule >= 0 && selectedModule < len(modulesWidget.Rows) {
modulesWidget.SelectedRow = selectedModule
}
}
if currentGrid != nil {
ui.Render(currentGrid)
}
case snap := <-snapCh:
snapshot = snap
if selectedModule >= len(snapshot.Modules) {
selectedModule = len(snapshot.Modules) - 1
if selectedModule < 0 {
selectedModule = 0
}
}
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
}
}

101
scripts/go/table_nocol.go Normal file
View File

@@ -0,0 +1,101 @@
package main
import (
"image"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
// TableNoCol is a modified table widget that doesn't draw column separators
type TableNoCol struct {
widgets.Table
}
func NewTableNoCol() *TableNoCol {
t := &TableNoCol{}
t.Table = *widgets.NewTable()
return t
}
// Draw overrides the default Draw to skip column separators
func (self *TableNoCol) Draw(buf *ui.Buffer) {
self.Block.Draw(buf)
if len(self.Rows) == 0 {
return
}
self.ColumnResizer()
columnWidths := self.ColumnWidths
if len(columnWidths) == 0 {
columnCount := len(self.Rows[0])
columnWidth := self.Inner.Dx() / columnCount
for i := 0; i < columnCount; i++ {
columnWidths = append(columnWidths, columnWidth)
}
}
yCoordinate := self.Inner.Min.Y
// draw rows
for i := 0; i < len(self.Rows) && yCoordinate < self.Inner.Max.Y; i++ {
row := self.Rows[i]
colXCoordinate := self.Inner.Min.X
rowStyle := self.TextStyle
// get the row style if one exists
if style, ok := self.RowStyles[i]; ok {
rowStyle = style
}
if self.FillRow {
blankCell := ui.NewCell(' ', rowStyle)
buf.Fill(blankCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
}
// draw row cells
for j := 0; j < len(row); j++ {
col := ui.ParseStyles(row[j], rowStyle)
// draw row cell
if len(col) > columnWidths[j] || self.TextAlignment == ui.AlignLeft {
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
if k == columnWidths[j] || colXCoordinate+k == self.Inner.Max.X {
cell.Rune = ui.ELLIPSES
buf.SetCell(cell, image.Pt(colXCoordinate+k-1, yCoordinate))
break
} else {
buf.SetCell(cell, image.Pt(colXCoordinate+k, yCoordinate))
}
}
} else if self.TextAlignment == ui.AlignCenter {
xCoordinateOffset := (columnWidths[j] - len(col)) / 2
stringXCoordinate := xCoordinateOffset + colXCoordinate
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
}
} else if self.TextAlignment == ui.AlignRight {
stringXCoordinate := ui.MinInt(colXCoordinate+columnWidths[j], self.Inner.Max.X) - len(col)
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
}
}
colXCoordinate += columnWidths[j] + 1
}
// SKIP drawing vertical separators - this is the key change
yCoordinate++
// draw horizontal separator
horizontalCell := ui.NewCell(ui.HORIZONTAL_LINE, self.Block.BorderStyle)
if self.RowSeparator && yCoordinate < self.Inner.Max.Y && i != len(self.Rows)-1 {
buf.Fill(horizontalCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
yCoordinate++
}
}
}

View File

@@ -1733,8 +1733,6 @@ EOF
local staging_modules_dir="${LOCAL_STORAGE_ROOT_ABS}/modules" local staging_modules_dir="${LOCAL_STORAGE_ROOT_ABS}/modules"
mkdir -p "$staging_modules_dir" mkdir -p "$staging_modules_dir"
local local_mysql_data_dir="${LOCAL_STORAGE_ROOT_ABS}/mysql-data"
mkdir -p "$local_mysql_data_dir"
local module_state_string="" local module_state_string=""
for module_state_var in "${MODULE_KEYS[@]}"; do for module_state_var in "${MODULE_KEYS[@]}"; do

412
status.sh
View File

@@ -1,375 +1,79 @@
#!/bin/bash #!/bin/bash
# ac-compose condensed realm status view # Wrapper that ensures the statusdash TUI is built before running.
set -e set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$SCRIPT_DIR" PROJECT_DIR="$SCRIPT_DIR"
ENV_FILE="$PROJECT_DIR/.env" BINARY_PATH="$PROJECT_DIR/statusdash"
SOURCE_DIR="$PROJECT_DIR/scripts/go"
CACHE_DIR="$PROJECT_DIR/.gocache"
cd "$PROJECT_DIR" usage() {
cat <<EOF
statusdash wrapper
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; CYAN='\033[0;36m'; BLUE='\033[0;34m'; NC='\033[0m' Usage: $0 [options] [-- statusdash-args]
WATCH_MODE=true Options:
LOG_LINES=5 --rebuild Force rebuilding the statusdash binary
SHOW_LOGS=false -h, --help Show this help text
All arguments after '--' are passed directly to the statusdash binary.
Go must be installed locally to build statusdash (https://go.dev/doc/install).
EOF
}
force_rebuild=0
statusdash_args=()
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--watch|-w) WATCH_MODE=true; shift;; --rebuild)
--once) WATCH_MODE=false; shift;; force_rebuild=1
--logs|-l) SHOW_LOGS=true; shift;; shift
--lines) LOG_LINES="$2"; shift 2;; ;;
-h|--help) -h|--help)
cat <<EOF usage
ac-compose realm status exit 0
;;
Usage: $0 [options] --)
-w, --watch Continuously refresh every 3s (default) shift
--once Show a single snapshot then exit statusdash_args+=("$@")
-l, --logs Show trailing logs for each service break
--lines N Number of log lines when --logs is used (default 5) ;;
EOF *)
exit 0;; statusdash_args+=("$1")
*) echo "Unknown option: $1" >&2; exit 1;; shift
;;
esac esac
done done
command -v docker >/dev/null 2>&1 || { echo "Docker CLI not found" >&2; exit 1; } ensure_go() {
docker info >/dev/null 2>&1 || { echo "Docker daemon unavailable" >&2; exit 1; } if ! command -v go >/dev/null 2>&1; then
cat >&2 <<'ERR'
read_env(){ Go toolchain not found.
local key="$1" value="" statusdash requires Go to build. Install Go from https://go.dev/doc/install and retry.
if [ -f "$ENV_FILE" ]; then ERR
value="$(grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r' | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')" exit 1
fi
echo "$value"
}
PROJECT_NAME="$(read_env COMPOSE_PROJECT_NAME)"
NETWORK_NAME="$(read_env NETWORK_NAME)"
AUTH_PORT="$(read_env AUTH_EXTERNAL_PORT)"
WORLD_PORT="$(read_env WORLD_EXTERNAL_PORT)"
SOAP_PORT="$(read_env SOAP_EXTERNAL_PORT)"
MYSQL_PORT="$(read_env MYSQL_EXTERNAL_PORT)"
MYSQL_EXPOSE_OVERRIDE="$(read_env COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED "$(read_env MYSQL_EXPOSE_PORT "0")")"
PMA_PORT="$(read_env PMA_EXTERNAL_PORT)"
KEIRA_PORT="$(read_env KEIRA3_EXTERNAL_PORT)"
ELUNA_ENABLED="$(read_env AC_ELUNA_ENABLED)"
container_exists(){
docker ps -a --format '{{.Names}}' | grep -qx "$1"
}
container_running(){
docker ps --format '{{.Names}}' | grep -qx "$1"
}
is_one_shot(){
case "$1" in
ac-db-import|ac-db-init|ac-modules|ac-post-install|ac-client-data|ac-client-data-playerbots)
return 0;;
*)
return 1;;
esac
}
format_state(){
local status="$1" health="$2" started="$3" exit_code="$4"
local started_fmt
if [ -n "$started" ] && [[ "$started" != "--:--:--" ]]; then
started_fmt="$(date -d "$started" '+%H:%M:%S' 2>/dev/null || echo "")"
if [ -z "$started_fmt" ]; then
started_fmt="$(echo "$started" | cut -c12-19)"
fi
[ -z "$started_fmt" ] && started_fmt="--:--:--"
else
started_fmt="--:--:--"
fi
case "$status" in
running)
local desc="running (since $started_fmt)" colour="$GREEN"
if [ "$health" = "healthy" ]; then
desc="healthy (since $started_fmt)"
elif [ "$health" = "none" ]; then
desc="running (since $started_fmt)"
else
desc="$health (since $started_fmt)"; colour="$YELLOW"
[ "$health" = "unhealthy" ] && colour="$RED"
fi
echo "${colour}|● ${desc}"
;;
exited)
local colour="$YELLOW"
[ "$exit_code" != "0" ] && colour="$RED"
echo "${colour}|○ exited (code $exit_code)"
;;
restarting)
echo "${YELLOW}|● restarting"
;;
created)
echo "${CYAN}|○ created"
;;
*)
echo "${RED}|○ $status"
;;
esac
}
short_image(){
local img="$1"
if [[ "$img" != */* ]]; then
echo "$img"
return
fi
local repo="${img%%/*}"
local rest="${img#*/}"
local name="${rest%%:*}"
local tag="${img##*:}"
local has_tag="true"
[[ "$img" != *":"* ]] && has_tag="false"
local last="${name##*/}"
if [ "$has_tag" = "true" ]; then
if [[ "$tag" =~ ^[0-9] ]] || [ "$tag" = "latest" ]; then
echo "$repo/$last"
else
echo "$repo/$tag"
fi
else
echo "$repo/$last"
fi fi
} }
print_service(){ build_statusdash() {
local container="$1" label="$2" ensure_go
if container_exists "$container"; then mkdir -p "$CACHE_DIR"
local status health started exit_code image echo "Building statusdash..."
status="$(docker inspect --format='{{.State.Status}}' "$container" 2>/dev/null || echo "unknown")" (
health="$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' "$container" 2>/dev/null || echo "none")" cd "$SOURCE_DIR"
started="$(docker inspect --format='{{.State.StartedAt}}' "$container" 2>/dev/null | cut -c12-19 2>/dev/null || echo "--:--:--")" GOCACHE="$CACHE_DIR" go build -o "$BINARY_PATH" .
exit_code="$(docker inspect --format='{{.State.ExitCode}}' "$container" 2>/dev/null || echo "?")" )
image="$(docker inspect --format='{{.Config.Image}}' "$container" 2>/dev/null || echo "-")"
local state_info colour text
if [ "$status" = "exited" ] && is_one_shot "$container"; then
local finished
finished="$(docker inspect --format='{{.State.FinishedAt}}' "$container" 2>/dev/null | cut -c12-19 2>/dev/null || echo "--:--:--")"
if [ "$exit_code" = "0" ]; then
state_info="${GREEN}|○ completed (at $finished)"
else
state_info="${RED}|○ failed (code $exit_code)"
fi
else
state_info="$(format_state "$status" "$health" "$started" "$exit_code")"
fi
colour="${state_info%%|*}"
text="${state_info#*|}"
printf "%-20s %-15s %b%-30s%b %s\n" "$label" "$container" "$colour" "$text" "$NC" "$(short_image "$image")"
if [ "$SHOW_LOGS" = true ]; then
docker logs "$container" --tail "$LOG_LINES" 2>/dev/null | sed 's/^/ /' || printf " (no logs available)\n"
fi
else
printf "%-20s %-15s %b%-30s%b %s\n" "$label" "$container" "$RED" "○ missing" "$NC" "-"
fi
} }
module_summary_list(){ if [[ $force_rebuild -eq 1 ]]; then
if [ ! -f "$ENV_FILE" ]; then rm -f "$BINARY_PATH"
echo "(env not found)"
return
fi
local module_vars
module_vars="$(grep -E '^MODULE_[A-Z_]+=1' "$ENV_FILE" 2>/dev/null | cut -d'=' -f1)"
if [ -n "$module_vars" ]; then
while IFS= read -r mod; do
[ -z "$mod" ] && continue
local pretty="${mod#MODULE_}"
pretty="$(echo "$pretty" | tr '[:upper:]' '[:lower:]' | tr '_' ' ' | sed 's/\b\w/\U&/g')"
printf "%s\n" "$pretty"
done <<< "$module_vars"
else
echo "none"
fi
if container_running "ac-worldserver"; then
local playerbot="disabled"
local module_playerbots
module_playerbots="$(read_env MODULE_PLAYERBOTS)"
if [ "$module_playerbots" = "1" ]; then
playerbot="enabled"
if docker inspect --format='{{.State.Status}}' ac-worldserver 2>/dev/null | grep -q "running"; then
playerbot="running"
fi
fi
local eluna="disabled"
[ "$ELUNA_ENABLED" = "1" ] && eluna="running"
# echo "RUNTIME: playerbots $playerbot | eluna $eluna"
fi
}
render_module_ports(){
local modules_raw="$1" ports_raw="$2" net_line="$3"
mapfile -t modules <<< "$modules_raw"
mapfile -t ports_lines <<< "$ports_raw"
local ports=()
for idx in "${!ports_lines[@]}"; do
local line="${ports_lines[$idx]}"
if [ "$idx" -eq 0 ]; then
continue
fi
line="$(echo "$line" | sed 's/^[[:space:]]*//')"
[ -z "$line" ] && continue
ports+=("$line")
done
if [ -n "$net_line" ]; then
ports+=("DOCKER NET: ${net_line##*: }")
fi
local rows="${#modules[@]}"
if [ "${#ports[@]}" -gt "$rows" ]; then
rows="${#ports[@]}"
fi
printf " %-52s %s\n" "MODULES:" "PORTS:"
for ((i=0; i<rows; i++)); do
local left="${modules[i]:-}"
local right="${ports[i]:-}"
if [ -n "$left" ]; then
left="$left"
fi
local port_column=""
if [[ "$right" == DOCKER\ NET:* ]]; then
port_column=" $right"
elif [ -n "$right" ]; then
port_column=" $right"
fi
printf " %-50s %s\n" "$left" "$port_column"
done
}
user_stats(){
if ! container_running "ac-mysql"; then
echo -e "USERS: ${RED}Database offline${NC}"
return
fi
local mysql_pw db_auth db_characters
mysql_pw="$(read_env MYSQL_ROOT_PASSWORD)"
db_auth="$(read_env DB_AUTH_NAME)"
db_characters="$(read_env DB_CHARACTERS_NAME)"
if [ -z "$mysql_pw" ] || [ -z "$db_auth" ] || [ -z "$db_characters" ]; then
echo -e "USERS: ${YELLOW}Missing MySQL configuration in .env${NC}"
return
fi
local exec_mysql
exec_mysql(){
local database="$1" query="$2"
docker exec ac-mysql mysql -N -B -u root -p"${mysql_pw}" "$database" -e "$query" 2>/dev/null | tail -n1
}
local account_total account_online character_total last_week
account_total="$(exec_mysql "$db_auth" "SELECT COUNT(*) FROM account;")"
account_online="$(exec_mysql "$db_auth" "SELECT COUNT(*) FROM account WHERE online = 1;")"
character_total="$(exec_mysql "$db_characters" "SELECT COUNT(*) FROM characters;")"
last_week="$(exec_mysql "$db_auth" "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")"
[[ -z "$account_total" ]] && account_total="0"
[[ -z "$account_online" ]] && account_online="0"
[[ -z "$character_total" ]] && character_total="0"
[[ -z "$last_week" ]] && last_week="0"
printf "USERS: Accounts %b%s%b | Online %b%s%b | Characters %b%s%b | Active 7d %b%s%b\n" \
"$GREEN" "$account_total" "$NC" \
"$YELLOW" "$account_online" "$NC" \
"$CYAN" "$character_total" "$NC" \
"$BLUE" "$last_week" "$NC"
}
ports_summary(){
local names=("Auth" "World" "SOAP" "MySQL" "phpMyAdmin" "Keira3")
local ports=("$AUTH_PORT" "$WORLD_PORT" "$SOAP_PORT" "$MYSQL_PORT" "$PMA_PORT" "$KEIRA_PORT")
printf "PORTS:\n"
for i in "${!names[@]}"; do
local svc="${names[$i]}"
local port="${ports[$i]}"
if [ "$svc" = "MySQL" ] && [ "${MYSQL_EXPOSE_OVERRIDE}" != "1" ]; then
printf " %-10s %-6s %b○%b not exposed\n" "$svc" "--" "$CYAN" "$NC"
continue
fi
if [ -z "$port" ]; then
printf " %-10s %-6s %b○%b not set\n" "$svc" "--" "$YELLOW" "$NC"
continue
fi
if timeout 1 bash -c "</dev/tcp/127.0.0.1/${port}" >/dev/null 2>&1; then
if [ "$svc" = "MySQL" ]; then
printf " %-10s %-6s %b●%b reachable %b!note%b exposed\n" "$svc" "$port" "$GREEN" "$NC" "$YELLOW" "$NC"
else
printf " %-10s %-6s %b●%b reachable\n" "$svc" "$port" "$GREEN" "$NC"
fi
else
printf " %-10s %-6s %b○%b unreachable\n" "$svc" "$port" "$RED" "$NC"
fi
done
}
network_summary(){
if [ -z "$NETWORK_NAME" ]; then
echo "DOCKER NET: not set"
return
fi
if docker network ls --format '{{.Name}}' | grep -qx "$NETWORK_NAME"; then
echo "DOCKER NET: $NETWORK_NAME"
else
echo "DOCKER NET: missing ($NETWORK_NAME)"
fi
}
show_realm_status_header(){
echo -e "${BLUE}🏰 REALM STATUS DASHBOARD 🏰${NC}"
echo -e "${BLUE}═══════════════════════════${NC}"
}
render_snapshot(){
#show_realm_status_header
printf "TIME %s PROJECT %s\n\n" "$(date '+%Y-%m-%d %H:%M:%S')" "$PROJECT_NAME"
user_stats
printf "%-20s %-15s %-28s %s\n" "SERVICE" "CONTAINER" "STATE" "IMAGE"
printf "%-20s %-15s %-28s %s\n" "--------------------" "---------------" "----------------------------" "------------------------------"
print_service ac-mysql "MySQL"
print_service ac-backup "Backup"
print_service ac-db-init "DB Init"
print_service ac-db-import "DB Import"
print_service ac-authserver "Auth Server"
print_service ac-worldserver "World Server"
print_service ac-client-data "Client Data"
print_service ac-modules "Module Manager"
print_service ac-post-install "Post Install"
print_service ac-phpmyadmin "phpMyAdmin"
print_service ac-keira3 "Keira3"
echo ""
local module_block ports_block net_line
module_block="$(module_summary_list)"
ports_block="$(ports_summary)"
net_line="$(network_summary)"
render_module_ports "$module_block" "$ports_block" "$net_line"
}
display_snapshot(){
local tmp
tmp="$(mktemp)"
render_snapshot >"$tmp"
clear 2>/dev/null || printf '\033[2J\033[H'
cat "$tmp"
rm -f "$tmp"
}
if [ "$WATCH_MODE" = true ]; then
while true; do
display_snapshot
sleep 3
done
else
display_snapshot
fi fi
if [[ ! -x "$BINARY_PATH" ]]; then
build_statusdash
fi
exec "$BINARY_PATH" "${statusdash_args[@]}"