mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 00:58:34 +00:00
setup hardening
This commit is contained in:
@@ -65,7 +65,7 @@ DB_GUARD_VERIFY_INTERVAL_SECONDS=86400
|
|||||||
# =====================
|
# =====================
|
||||||
# Module SQL staging
|
# Module SQL staging
|
||||||
# =====================
|
# =====================
|
||||||
MODULE_SQL_STAGE_PATH=${STORAGE_PATH_LOCAL}/module-sql-updates
|
STAGE_PATH_MODULE_SQL=${STORAGE_PATH_LOCAL}/module-sql-updates
|
||||||
|
|
||||||
# =====================
|
# =====================
|
||||||
# SQL Source Overlay
|
# SQL Source Overlay
|
||||||
@@ -180,6 +180,7 @@ DB_CHARACTER_SYNCH_THREADS=1
|
|||||||
BACKUP_RETENTION_DAYS=3
|
BACKUP_RETENTION_DAYS=3
|
||||||
BACKUP_RETENTION_HOURS=6
|
BACKUP_RETENTION_HOURS=6
|
||||||
BACKUP_DAILY_TIME=09
|
BACKUP_DAILY_TIME=09
|
||||||
|
BACKUP_INTERVAL_MINUTES=60
|
||||||
# Optional comma/space separated schemas to include in automated backups
|
# Optional comma/space separated schemas to include in automated backups
|
||||||
BACKUP_EXTRA_DATABASES=
|
BACKUP_EXTRA_DATABASES=
|
||||||
BACKUP_HEALTHCHECK_MAX_MINUTES=1440
|
BACKUP_HEALTHCHECK_MAX_MINUTES=1440
|
||||||
|
|||||||
7
build.sh
7
build.sh
@@ -137,11 +137,18 @@ generate_module_state(){
|
|||||||
|
|
||||||
# Check if blocked modules were detected in warnings
|
# Check if blocked modules were detected in warnings
|
||||||
if echo "$validation_output" | grep -q "is blocked:"; then
|
if echo "$validation_output" | grep -q "is blocked:"; then
|
||||||
|
# Gather blocked module keys for display
|
||||||
|
local blocked_modules
|
||||||
|
blocked_modules=$(echo "$validation_output" | grep -oE 'MODULE_[A-Za-z0-9_]+' | sort -u | tr '\n' ' ')
|
||||||
|
|
||||||
# Blocked modules detected - show warning and ask for confirmation
|
# Blocked modules detected - show warning and ask for confirmation
|
||||||
echo
|
echo
|
||||||
warn "════════════════════════════════════════════════════════════════"
|
warn "════════════════════════════════════════════════════════════════"
|
||||||
warn "⚠️ BLOCKED MODULES DETECTED ⚠️"
|
warn "⚠️ BLOCKED MODULES DETECTED ⚠️"
|
||||||
warn "════════════════════════════════════════════════════════════════"
|
warn "════════════════════════════════════════════════════════════════"
|
||||||
|
if [ -n "$blocked_modules" ]; then
|
||||||
|
warn "Affected modules: ${blocked_modules}"
|
||||||
|
fi
|
||||||
warn "Some enabled modules are marked as blocked due to compatibility"
|
warn "Some enabled modules are marked as blocked due to compatibility"
|
||||||
warn "issues. These modules will be SKIPPED during the build process."
|
warn "issues. These modules will be SKIPPED during the build process."
|
||||||
warn ""
|
warn ""
|
||||||
|
|||||||
@@ -1,4 +1,11 @@
|
|||||||
name: ${COMPOSE_PROJECT_NAME}
|
name: ${COMPOSE_PROJECT_NAME}
|
||||||
|
|
||||||
|
x-logging: &logging-default
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
# =====================
|
# =====================
|
||||||
# Database Layer (db)
|
# Database Layer (db)
|
||||||
@@ -40,8 +47,7 @@ services:
|
|||||||
- --innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE}
|
- --innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE}
|
||||||
- --innodb-redo-log-capacity=${MYSQL_INNODB_REDO_LOG_CAPACITY}
|
- --innodb-redo-log-capacity=${MYSQL_INNODB_REDO_LOG_CAPACITY}
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "sh", "-c", "mysqladmin ping -h localhost -u ${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} --silent || exit 1"]
|
test: ["CMD", "sh", "-c", "mysqladmin ping -h localhost -u ${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} --silent || exit 1"]
|
||||||
interval: ${MYSQL_HEALTHCHECK_INTERVAL}
|
interval: ${MYSQL_HEALTHCHECK_INTERVAL}
|
||||||
@@ -68,11 +74,12 @@ services:
|
|||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
||||||
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
||||||
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
||||||
- mysql-data:/var/lib/mysql-persistent
|
- mysql-data:/var/lib/mysql-persistent
|
||||||
- ${STORAGE_PATH}/modules:/modules
|
- ${STORAGE_PATH}/modules:/modules
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
||||||
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
|
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
|
||||||
environment:
|
environment:
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
@@ -132,11 +139,12 @@ services:
|
|||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
||||||
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
||||||
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
||||||
- mysql-data:/var/lib/mysql-persistent
|
- mysql-data:/var/lib/mysql-persistent
|
||||||
- ${STORAGE_PATH}/modules:/modules
|
- ${STORAGE_PATH}/modules:/modules
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
||||||
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
|
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
|
||||||
- ./scripts/bash/db-guard.sh:/tmp/db-guard.sh:ro
|
- ./scripts/bash/db-guard.sh:/tmp/db-guard.sh:ro
|
||||||
environment:
|
environment:
|
||||||
@@ -326,7 +334,7 @@ services:
|
|||||||
profiles: ["client-data", "client-data-bots"]
|
profiles: ["client-data", "client-data-bots"]
|
||||||
image: ${ALPINE_IMAGE}
|
image: ${ALPINE_IMAGE}
|
||||||
container_name: ac-volume-init
|
container_name: ac-volume-init
|
||||||
user: "${CONTAINER_USER}"
|
user: "0:0"
|
||||||
volumes:
|
volumes:
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
||||||
- client-data-cache:/cache
|
- client-data-cache:/cache
|
||||||
@@ -356,22 +364,66 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}:/storage-root
|
- ${STORAGE_PATH}:/storage-root
|
||||||
- ${STORAGE_PATH_LOCAL}:/local-storage-root
|
- ${STORAGE_PATH_LOCAL}:/local-storage-root
|
||||||
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
command:
|
command:
|
||||||
- sh
|
- sh
|
||||||
- -c
|
- -c
|
||||||
- |
|
- |
|
||||||
echo "🔧 Initializing storage directories with container user ownership..."
|
echo "🔧 Initializing storage directories with proper permissions..."
|
||||||
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
|
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
|
||||||
mkdir -p /storage-root/config/mysql/conf.d
|
mkdir -p /storage-root/config/mysql/conf.d
|
||||||
mkdir -p /storage-root/client-data
|
mkdir -p /storage-root/client-data
|
||||||
mkdir -p /storage-root/backups
|
mkdir -p /storage-root/backups
|
||||||
mkdir -p /storage-root/module-sql-updates
|
|
||||||
mkdir -p /storage-root/data
|
# Copy core AzerothCore config template files (.dist) to config directory
|
||||||
mkdir -p /storage-root/temp
|
echo "📄 Copying AzerothCore configuration templates..."
|
||||||
mkdir -p /local-storage-root || true
|
SOURCE_DIR="${SOURCE_DIR:-/local-storage-root/source/azerothcore-playerbots}"
|
||||||
chown -R ${CONTAINER_USER} /storage-root /local-storage-root 2>/dev/null || true
|
if [ ! -d "$SOURCE_DIR" ] && [ -d "/local-storage-root/source/azerothcore-wotlk" ]; then
|
||||||
chmod -R 755 /storage-root /local-storage-root 2>/dev/null || true
|
SOURCE_DIR="/local-storage-root/source/azerothcore-wotlk"
|
||||||
echo "✅ Storage directories ready (owned by ${CONTAINER_USER})"
|
fi
|
||||||
|
|
||||||
|
# Seed dbimport.conf with a shared helper (fallback to a simple copy if missing)
|
||||||
|
if [ -f "/tmp/seed-dbimport-conf.sh" ]; then
|
||||||
|
echo "🧩 Seeding dbimport.conf"
|
||||||
|
DBIMPORT_CONF_DIR="/storage-root/config" \
|
||||||
|
DBIMPORT_SOURCE_ROOT="$SOURCE_DIR" \
|
||||||
|
sh -c '. /tmp/seed-dbimport-conf.sh && seed_dbimport_conf' || true
|
||||||
|
else
|
||||||
|
if [ -f "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" ]; then
|
||||||
|
cp -n "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" /storage-root/config/ 2>/dev/null || true
|
||||||
|
if [ ! -f "/storage-root/config/dbimport.conf" ]; then
|
||||||
|
cp "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" /storage-root/config/dbimport.conf
|
||||||
|
echo " ✓ Created dbimport.conf"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy authserver.conf.dist
|
||||||
|
if [ -f "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" ]; then
|
||||||
|
cp -n "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" /storage-root/config/ 2>/dev/null || true
|
||||||
|
if [ ! -f "/storage-root/config/authserver.conf" ]; then
|
||||||
|
cp "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" /storage-root/config/authserver.conf
|
||||||
|
echo " ✓ Created authserver.conf"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy worldserver.conf.dist
|
||||||
|
if [ -f "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" ]; then
|
||||||
|
cp -n "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" /storage-root/config/ 2>/dev/null || true
|
||||||
|
if [ ! -f "/storage-root/config/worldserver.conf" ]; then
|
||||||
|
cp "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" /storage-root/config/worldserver.conf
|
||||||
|
echo " ✓ Created worldserver.conf"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
mkdir -p /storage-root/config/temp
|
||||||
|
# Fix ownership of root directories and all contents
|
||||||
|
if [ "$(id -u)" -eq 0 ]; then
|
||||||
|
chown -R ${CONTAINER_USER} /storage-root /local-storage-root
|
||||||
|
chmod -R 755 /storage-root /local-storage-root
|
||||||
|
echo "✅ Storage permissions initialized"
|
||||||
|
else
|
||||||
|
echo "ℹ️ Running as $(id -u):$(id -g); assuming host permissions are already correct."
|
||||||
|
fi
|
||||||
restart: "no"
|
restart: "no"
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
@@ -400,8 +452,6 @@ services:
|
|||||||
- -c
|
- -c
|
||||||
- |
|
- |
|
||||||
mkdir -p /cache
|
mkdir -p /cache
|
||||||
chown -R ${CONTAINER_USER} /azerothcore/data /cache 2>/dev/null || true
|
|
||||||
chmod -R 755 /azerothcore/data /cache 2>/dev/null || true
|
|
||||||
if [ -f /tmp/scripts/bash/download-client-data.sh ]; then
|
if [ -f /tmp/scripts/bash/download-client-data.sh ]; then
|
||||||
chmod +x /tmp/scripts/bash/download-client-data.sh 2>/dev/null || true
|
chmod +x /tmp/scripts/bash/download-client-data.sh 2>/dev/null || true
|
||||||
bash /tmp/scripts/bash/download-client-data.sh
|
bash /tmp/scripts/bash/download-client-data.sh
|
||||||
@@ -434,9 +484,6 @@ services:
|
|||||||
- |
|
- |
|
||||||
echo "📦 Installing 7z + gosu for client data extraction..."
|
echo "📦 Installing 7z + gosu for client data extraction..."
|
||||||
apt-get update -qq && apt-get install -y p7zip-full gosu
|
apt-get update -qq && apt-get install -y p7zip-full gosu
|
||||||
echo "🔧 Normalizing client-data/cache ownership..."
|
|
||||||
chown -R ${CONTAINER_USER} /azerothcore/data /cache 2>/dev/null || true
|
|
||||||
chmod -R 755 /azerothcore/data /cache 2>/dev/null || true
|
|
||||||
gosu ${CONTAINER_USER} bash -c '
|
gosu ${CONTAINER_USER} bash -c '
|
||||||
set -e
|
set -e
|
||||||
mkdir -p /cache
|
mkdir -p /cache
|
||||||
@@ -478,8 +525,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
@@ -534,8 +580,7 @@ services:
|
|||||||
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
||||||
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
cap_add: ["SYS_NICE"]
|
cap_add: ["SYS_NICE"]
|
||||||
@@ -573,11 +618,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: json-file
|
|
||||||
options:
|
|
||||||
max-size: "10m"
|
|
||||||
max-file: "3"
|
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
@@ -613,8 +654,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
@@ -672,8 +712,7 @@ services:
|
|||||||
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
||||||
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
cap_add: ["SYS_NICE"]
|
cap_add: ["SYS_NICE"]
|
||||||
@@ -730,11 +769,7 @@ services:
|
|||||||
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
|
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
|
||||||
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
|
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: json-file
|
|
||||||
options:
|
|
||||||
max-size: "10m"
|
|
||||||
max-file: "3"
|
|
||||||
cap_add: ["SYS_NICE"]
|
cap_add: ["SYS_NICE"]
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
|
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
|
||||||
@@ -823,8 +858,10 @@ services:
|
|||||||
- |
|
- |
|
||||||
apk add --no-cache bash curl docker-cli su-exec
|
apk add --no-cache bash curl docker-cli su-exec
|
||||||
chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true
|
chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true
|
||||||
echo "📥 Running post-install as ${CONTAINER_USER}"
|
echo "📥 Running post-install as root (testing mode)"
|
||||||
su-exec ${CONTAINER_USER} bash /tmp/scripts/bash/auto-post-install.sh
|
mkdir -p /install-markers
|
||||||
|
chown -R ${CONTAINER_USER} /azerothcore/config /install-markers 2>/dev/null || true
|
||||||
|
bash /tmp/scripts/bash/auto-post-install.sh
|
||||||
restart: "no"
|
restart: "no"
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
@@ -881,8 +918,7 @@ services:
|
|||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
start_period: 40s
|
start_period: 40s
|
||||||
logging:
|
logging: *logging-default
|
||||||
driver: "json-file"
|
|
||||||
security_opt:
|
security_opt:
|
||||||
- no-new-privileges:true
|
- no-new-privileges:true
|
||||||
networks:
|
networks:
|
||||||
|
|||||||
@@ -24,6 +24,34 @@ STATUS_FILE="${DB_GUARD_STATUS_FILE:-/tmp/db-guard.status}"
|
|||||||
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
|
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
|
||||||
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
|
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
|
||||||
|
|
||||||
|
SEED_CONF_SCRIPT="${SEED_DBIMPORT_CONF_SCRIPT:-/tmp/seed-dbimport-conf.sh}"
|
||||||
|
if [ -f "$SEED_CONF_SCRIPT" ]; then
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. "$SEED_CONF_SCRIPT"
|
||||||
|
elif ! command -v seed_dbimport_conf >/dev/null 2>&1; then
|
||||||
|
seed_dbimport_conf(){
|
||||||
|
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
||||||
|
local dist="${conf}.dist"
|
||||||
|
mkdir -p "$(dirname "$conf")"
|
||||||
|
[ -f "$conf" ] && return 0
|
||||||
|
if [ -f "$dist" ]; then
|
||||||
|
cp "$dist" "$conf"
|
||||||
|
else
|
||||||
|
warn "dbimport.conf missing and no dist available; writing minimal defaults"
|
||||||
|
cat > "$conf" <<EOF
|
||||||
|
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
|
||||||
|
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
|
||||||
|
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
|
||||||
|
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
|
||||||
|
EnableDatabases = 15
|
||||||
|
Updates.AutoSetup = 1
|
||||||
|
MySQLExecutable = "/usr/bin/mysql"
|
||||||
|
TempDir = "/azerothcore/env/dist/etc/temp"
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
declare -a DB_SCHEMAS=()
|
declare -a DB_SCHEMAS=()
|
||||||
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
|
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
|
||||||
value="${!var:-}"
|
value="${!var:-}"
|
||||||
@@ -85,15 +113,6 @@ rehydrate(){
|
|||||||
"$IMPORT_SCRIPT"
|
"$IMPORT_SCRIPT"
|
||||||
}
|
}
|
||||||
|
|
||||||
ensure_dbimport_conf(){
|
|
||||||
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
|
||||||
local dist="${conf}.dist"
|
|
||||||
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
|
|
||||||
cp "$dist" "$conf"
|
|
||||||
fi
|
|
||||||
mkdir -p /azerothcore/env/dist/temp
|
|
||||||
}
|
|
||||||
|
|
||||||
sync_host_stage_files(){
|
sync_host_stage_files(){
|
||||||
local host_root="${MODULE_SQL_HOST_PATH}"
|
local host_root="${MODULE_SQL_HOST_PATH}"
|
||||||
[ -d "$host_root" ] || return 0
|
[ -d "$host_root" ] || return 0
|
||||||
@@ -110,7 +129,7 @@ sync_host_stage_files(){
|
|||||||
|
|
||||||
dbimport_verify(){
|
dbimport_verify(){
|
||||||
local bin_dir="/azerothcore/env/dist/bin"
|
local bin_dir="/azerothcore/env/dist/bin"
|
||||||
ensure_dbimport_conf
|
seed_dbimport_conf
|
||||||
sync_host_stage_files
|
sync_host_stage_files
|
||||||
if [ ! -x "${bin_dir}/dbimport" ]; then
|
if [ ! -x "${bin_dir}/dbimport" ]; then
|
||||||
warn "dbimport binary not found at ${bin_dir}/dbimport"
|
warn "dbimport binary not found at ${bin_dir}/dbimport"
|
||||||
|
|||||||
@@ -81,15 +81,6 @@ wait_for_mysql(){
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
ensure_dbimport_conf(){
|
|
||||||
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
|
||||||
local dist="${conf}.dist"
|
|
||||||
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
|
|
||||||
cp "$dist" "$conf"
|
|
||||||
fi
|
|
||||||
mkdir -p /azerothcore/env/dist/temp
|
|
||||||
}
|
|
||||||
|
|
||||||
case "${1:-}" in
|
case "${1:-}" in
|
||||||
-h|--help)
|
-h|--help)
|
||||||
print_help
|
print_help
|
||||||
@@ -106,6 +97,34 @@ esac
|
|||||||
echo "🔧 Conditional AzerothCore Database Import"
|
echo "🔧 Conditional AzerothCore Database Import"
|
||||||
echo "========================================"
|
echo "========================================"
|
||||||
|
|
||||||
|
SEED_CONF_SCRIPT="${SEED_DBIMPORT_CONF_SCRIPT:-/tmp/seed-dbimport-conf.sh}"
|
||||||
|
if [ -f "$SEED_CONF_SCRIPT" ]; then
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. "$SEED_CONF_SCRIPT"
|
||||||
|
elif ! command -v seed_dbimport_conf >/dev/null 2>&1; then
|
||||||
|
seed_dbimport_conf(){
|
||||||
|
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
||||||
|
local dist="${conf}.dist"
|
||||||
|
mkdir -p "$(dirname "$conf")"
|
||||||
|
[ -f "$conf" ] && return 0
|
||||||
|
if [ -f "$dist" ]; then
|
||||||
|
cp "$dist" "$conf"
|
||||||
|
else
|
||||||
|
echo "⚠️ dbimport.conf missing and no dist available; using localhost defaults" >&2
|
||||||
|
cat > "$conf" <<EOF
|
||||||
|
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
|
||||||
|
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
|
||||||
|
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
|
||||||
|
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
|
||||||
|
EnableDatabases = 15
|
||||||
|
Updates.AutoSetup = 1
|
||||||
|
MySQLExecutable = "/usr/bin/mysql"
|
||||||
|
TempDir = "/azerothcore/env/dist/etc/temp"
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
if ! wait_for_mysql; then
|
if ! wait_for_mysql; then
|
||||||
echo "❌ MySQL service is unavailable; aborting database import"
|
echo "❌ MySQL service is unavailable; aborting database import"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -158,6 +177,8 @@ echo "🔧 Starting database import process..."
|
|||||||
|
|
||||||
echo "🔍 Checking for backups to restore..."
|
echo "🔍 Checking for backups to restore..."
|
||||||
|
|
||||||
|
# Allow tolerant scanning; re-enable -e after search.
|
||||||
|
set +e
|
||||||
# Define backup search paths in priority order
|
# Define backup search paths in priority order
|
||||||
BACKUP_SEARCH_PATHS=(
|
BACKUP_SEARCH_PATHS=(
|
||||||
"/backups"
|
"/backups"
|
||||||
@@ -253,13 +274,16 @@ if [ -z "$backup_path" ]; then
|
|||||||
# Check for manual backups (*.sql files)
|
# Check for manual backups (*.sql files)
|
||||||
if [ -z "$backup_path" ]; then
|
if [ -z "$backup_path" ]; then
|
||||||
echo "🔍 Checking for manual backup files..."
|
echo "🔍 Checking for manual backup files..."
|
||||||
latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql 2>/dev/null | head -n 1)
|
latest_manual=""
|
||||||
if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then
|
if ls "$BACKUP_DIRS"/*.sql >/dev/null 2>&1; then
|
||||||
echo "📦 Found manual backup: $(basename "$latest_manual")"
|
latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql | head -n 1)
|
||||||
if timeout 10 head -20 "$latest_manual" >/dev/null 2>&1; then
|
if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then
|
||||||
echo "✅ Valid manual backup file: $(basename "$latest_manual")"
|
echo "📦 Found manual backup: $(basename "$latest_manual")"
|
||||||
backup_path="$latest_manual"
|
if timeout 10 head -20 "$latest_manual" >/dev/null 2>&1; then
|
||||||
break
|
echo "✅ Valid manual backup file: $(basename "$latest_manual")"
|
||||||
|
backup_path="$latest_manual"
|
||||||
|
break
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -272,6 +296,7 @@ if [ -z "$backup_path" ]; then
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
set -e
|
||||||
echo "🔄 Final backup path result: '$backup_path'"
|
echo "🔄 Final backup path result: '$backup_path'"
|
||||||
if [ -n "$backup_path" ]; then
|
if [ -n "$backup_path" ]; then
|
||||||
echo "📦 Found backup: $(basename "$backup_path")"
|
echo "📦 Found backup: $(basename "$backup_path")"
|
||||||
@@ -357,7 +382,7 @@ if [ -n "$backup_path" ]; then
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ensure_dbimport_conf
|
seed_dbimport_conf
|
||||||
|
|
||||||
cd /azerothcore/env/dist/bin
|
cd /azerothcore/env/dist/bin
|
||||||
echo "🔄 Running dbimport to apply any missing updates..."
|
echo "🔄 Running dbimport to apply any missing updates..."
|
||||||
@@ -424,23 +449,73 @@ fi
|
|||||||
|
|
||||||
echo "🗄️ Creating fresh AzerothCore databases..."
|
echo "🗄️ Creating fresh AzerothCore databases..."
|
||||||
mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e "
|
mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e "
|
||||||
CREATE DATABASE IF NOT EXISTS ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
DROP DATABASE IF EXISTS ${DB_AUTH_NAME};
|
||||||
CREATE DATABASE IF NOT EXISTS ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
DROP DATABASE IF EXISTS ${DB_WORLD_NAME};
|
||||||
CREATE DATABASE IF NOT EXISTS ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
DROP DATABASE IF EXISTS ${DB_CHARACTERS_NAME};
|
||||||
CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
DROP DATABASE IF EXISTS ${DB_PLAYERBOTS_NAME:-acore_playerbots};
|
||||||
|
CREATE DATABASE ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||||
|
CREATE DATABASE ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||||
|
CREATE DATABASE ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||||
|
CREATE DATABASE ${DB_PLAYERBOTS_NAME:-acore_playerbots} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||||
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
|
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
|
||||||
echo "✅ Fresh databases created - proceeding with schema import"
|
echo "✅ Fresh databases created - proceeding with schema import"
|
||||||
|
|
||||||
ensure_dbimport_conf
|
|
||||||
|
|
||||||
echo "🚀 Running database import..."
|
echo "🚀 Running database import..."
|
||||||
cd /azerothcore/env/dist/bin
|
cd /azerothcore/env/dist/bin
|
||||||
|
seed_dbimport_conf
|
||||||
|
|
||||||
|
maybe_run_base_import(){
|
||||||
|
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
|
||||||
|
local mysql_port="${MYSQL_PORT:-3306}"
|
||||||
|
local mysql_user="${MYSQL_USER:-root}"
|
||||||
|
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||||||
|
|
||||||
|
import_dir(){
|
||||||
|
local db="$1" dir="$2"
|
||||||
|
[ -d "$dir" ] || return 0
|
||||||
|
echo "🔧 Importing base schema for ${db} from $(basename "$dir")..."
|
||||||
|
for f in $(ls "$dir"/*.sql 2>/dev/null | LC_ALL=C sort); do
|
||||||
|
MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" "$db" < "$f" >/dev/null 2>&1 || true
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
needs_import(){
|
||||||
|
local db="$1"
|
||||||
|
local count
|
||||||
|
count="$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='${db}';" 2>/dev/null || echo 0)"
|
||||||
|
[ "${count:-0}" -eq 0 ] && return 0
|
||||||
|
local updates
|
||||||
|
updates="$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='${db}' AND table_name='updates';" 2>/dev/null || echo 0)"
|
||||||
|
[ "${updates:-0}" -eq 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if needs_import "${DB_WORLD_NAME:-acore_world}"; then
|
||||||
|
import_dir "${DB_WORLD_NAME:-acore_world}" "/azerothcore/data/sql/base/db_world"
|
||||||
|
fi
|
||||||
|
if needs_import "${DB_AUTH_NAME:-acore_auth}"; then
|
||||||
|
import_dir "${DB_AUTH_NAME:-acore_auth}" "/azerothcore/data/sql/base/db_auth"
|
||||||
|
fi
|
||||||
|
if needs_import "${DB_CHARACTERS_NAME:-acore_characters}"; then
|
||||||
|
import_dir "${DB_CHARACTERS_NAME:-acore_characters}" "/azerothcore/data/sql/base/db_characters"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
maybe_run_base_import
|
||||||
if ./dbimport; then
|
if ./dbimport; then
|
||||||
echo "✅ Database import completed successfully!"
|
echo "✅ Database import completed successfully!"
|
||||||
echo "$(date): Database import completed successfully" > "$RESTORE_STATUS_DIR/.import-completed" || echo "$(date): Database import completed successfully" > "$MARKER_STATUS_DIR/.import-completed"
|
import_marker_msg="$(date): Database import completed successfully"
|
||||||
|
if [ -w "$RESTORE_STATUS_DIR" ]; then
|
||||||
|
echo "$import_marker_msg" > "$RESTORE_STATUS_DIR/.import-completed"
|
||||||
|
elif [ -w "$MARKER_STATUS_DIR" ]; then
|
||||||
|
echo "$import_marker_msg" > "$MARKER_STATUS_DIR/.import-completed" 2>/dev/null || true
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "❌ Database import failed!"
|
echo "❌ Database import failed!"
|
||||||
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed" || echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed"
|
if [ -w "$RESTORE_STATUS_DIR" ]; then
|
||||||
|
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed"
|
||||||
|
elif [ -w "$MARKER_STATUS_DIR" ]; then
|
||||||
|
echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed" 2>/dev/null || true
|
||||||
|
fi
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -213,6 +213,23 @@ ensure_host_writable "$LOCAL_STORAGE_ROOT"
|
|||||||
TARBALL="${TARBALL:-${LOCAL_STORAGE_ROOT}/images/acore-modules-images.tar}"
|
TARBALL="${TARBALL:-${LOCAL_STORAGE_ROOT}/images/acore-modules-images.tar}"
|
||||||
ensure_host_writable "$(dirname "$TARBALL")"
|
ensure_host_writable "$(dirname "$TARBALL")"
|
||||||
|
|
||||||
|
# Resolve module SQL staging paths (local and remote)
|
||||||
|
resolve_path_relative_to_project(){
|
||||||
|
local path="$1" root="$2"
|
||||||
|
if [[ "$path" != /* ]]; then
|
||||||
|
# drop leading ./ if present
|
||||||
|
path="${path#./}"
|
||||||
|
path="${root%/}/$path"
|
||||||
|
fi
|
||||||
|
echo "${path%/}"
|
||||||
|
}
|
||||||
|
|
||||||
|
STAGE_SQL_PATH_RAW="$(read_env_value STAGE_PATH_MODULE_SQL "${STORAGE_PATH_LOCAL:-./local-storage}/module-sql-updates")"
|
||||||
|
# Expand any env references (e.g., ${STORAGE_PATH_LOCAL})
|
||||||
|
STAGE_SQL_PATH_RAW="$(eval "echo \"$STAGE_SQL_PATH_RAW\"")"
|
||||||
|
LOCAL_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_ROOT")"
|
||||||
|
REMOTE_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_DIR")"
|
||||||
|
|
||||||
SCP_OPTS=(-P "$PORT")
|
SCP_OPTS=(-P "$PORT")
|
||||||
SSH_OPTS=(-p "$PORT")
|
SSH_OPTS=(-p "$PORT")
|
||||||
if [[ -n "$IDENTITY" ]]; then
|
if [[ -n "$IDENTITY" ]]; then
|
||||||
@@ -439,6 +456,18 @@ if [[ $SKIP_STORAGE -eq 0 ]]; then
|
|||||||
rm -f "$modules_tar"
|
rm -f "$modules_tar"
|
||||||
run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-modules.tar' -C '$REMOTE_STORAGE/modules' && rm '$REMOTE_TEMP_DIR/acore-modules.tar'"
|
run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-modules.tar' -C '$REMOTE_STORAGE/modules' && rm '$REMOTE_TEMP_DIR/acore-modules.tar'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Sync module SQL staging directory (STAGE_PATH_MODULE_SQL)
|
||||||
|
if [[ -d "$LOCAL_STAGE_SQL_DIR" ]]; then
|
||||||
|
echo "⋅ Syncing module SQL staging to remote"
|
||||||
|
run_ssh "rm -rf '$REMOTE_STAGE_SQL_DIR' && mkdir -p '$REMOTE_STAGE_SQL_DIR'"
|
||||||
|
sql_tar=$(mktemp)
|
||||||
|
tar -cf "$sql_tar" -C "$LOCAL_STAGE_SQL_DIR" .
|
||||||
|
ensure_remote_temp_dir
|
||||||
|
run_scp "$sql_tar" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-module-sql.tar"
|
||||||
|
rm -f "$sql_tar"
|
||||||
|
run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-module-sql.tar' -C '$REMOTE_STAGE_SQL_DIR' && rm '$REMOTE_TEMP_DIR/acore-module-sql.tar'"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
reset_remote_post_install_marker(){
|
reset_remote_post_install_marker(){
|
||||||
|
|||||||
88
scripts/bash/seed-dbimport-conf.sh
Normal file
88
scripts/bash/seed-dbimport-conf.sh
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Ensure dbimport.conf exists with usable connection values.
|
||||||
|
set -euo pipefail 2>/dev/null || set -eu
|
||||||
|
|
||||||
|
# Usage: seed_dbimport_conf [conf_dir]
|
||||||
|
# - conf_dir: target directory (defaults to DBIMPORT_CONF_DIR or /azerothcore/env/dist/etc)
|
||||||
|
seed_dbimport_conf() {
|
||||||
|
local conf_dir="${1:-${DBIMPORT_CONF_DIR:-/azerothcore/env/dist/etc}}"
|
||||||
|
local conf="${conf_dir}/dbimport.conf"
|
||||||
|
local dist="${conf}.dist"
|
||||||
|
local source_root="${DBIMPORT_SOURCE_ROOT:-${AC_SOURCE_DIR:-/local-storage-root/source/azerothcore-playerbots}}"
|
||||||
|
if [ ! -d "$source_root" ]; then
|
||||||
|
local fallback="/local-storage-root/source/azerothcore-wotlk"
|
||||||
|
if [ -d "$fallback" ]; then
|
||||||
|
source_root="$fallback"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
local source_dist="${DBIMPORT_DIST_PATH:-${source_root}/src/tools/dbimport/dbimport.conf.dist}"
|
||||||
|
# Put temp dir inside the writable config mount so non-root can create files.
|
||||||
|
local temp_dir="${DBIMPORT_TEMP_DIR:-/azerothcore/env/dist/etc/temp}"
|
||||||
|
|
||||||
|
mkdir -p "$conf_dir" "$temp_dir"
|
||||||
|
|
||||||
|
# Prefer a real .dist from the source tree if it exists.
|
||||||
|
if [ -f "$source_dist" ]; then
|
||||||
|
cp -n "$source_dist" "$dist" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "$conf" ]; then
|
||||||
|
if [ -f "$dist" ]; then
|
||||||
|
cp "$dist" "$conf"
|
||||||
|
else
|
||||||
|
echo "⚠️ dbimport.conf.dist not found; generating minimal dbimport.conf" >&2
|
||||||
|
cat > "$conf" <<EOF
|
||||||
|
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
|
||||||
|
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
|
||||||
|
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
|
||||||
|
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
|
||||||
|
EnableDatabases = 15
|
||||||
|
Updates.AutoSetup = 1
|
||||||
|
MySQLExecutable = "/usr/bin/mysql"
|
||||||
|
TempDir = "/azerothcore/env/dist/temp"
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
set_conf() {
|
||||||
|
local key="$1" value="$2" file="$3" quoted="${4:-true}"
|
||||||
|
local formatted="$value"
|
||||||
|
if [ "$quoted" = "true" ]; then
|
||||||
|
formatted="\"${value}\""
|
||||||
|
fi
|
||||||
|
if grep -qE "^[[:space:]]*${key}[[:space:]]*=" "$file"; then
|
||||||
|
sed -i "s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${formatted}|" "$file"
|
||||||
|
else
|
||||||
|
printf '%s = %s\n' "$key" "$formatted" >> "$file"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
local host="${CONTAINER_MYSQL:-${MYSQL_HOST:-localhost}}"
|
||||||
|
local port="${MYSQL_PORT:-3306}"
|
||||||
|
local user="${MYSQL_USER:-root}"
|
||||||
|
local pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||||||
|
local db_auth="${DB_AUTH_NAME:-acore_auth}"
|
||||||
|
local db_world="${DB_WORLD_NAME:-acore_world}"
|
||||||
|
local db_chars="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||||
|
local db_bots="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||||
|
|
||||||
|
set_conf "LoginDatabaseInfo" "${host};${port};${user};${pass};${db_auth}" "$conf"
|
||||||
|
set_conf "WorldDatabaseInfo" "${host};${port};${user};${pass};${db_world}" "$conf"
|
||||||
|
set_conf "CharacterDatabaseInfo" "${host};${port};${user};${pass};${db_chars}" "$conf"
|
||||||
|
set_conf "PlayerbotsDatabaseInfo" "${host};${port};${user};${pass};${db_bots}" "$conf"
|
||||||
|
set_conf "EnableDatabases" "${AC_UPDATES_ENABLE_DATABASES:-15}" "$conf" false
|
||||||
|
set_conf "Updates.AutoSetup" "${AC_UPDATES_AUTO_SETUP:-1}" "$conf" false
|
||||||
|
set_conf "Updates.ExceptionShutdownDelay" "${AC_UPDATES_EXCEPTION_SHUTDOWN_DELAY:-10000}" "$conf" false
|
||||||
|
set_conf "Updates.AllowedModules" "${DB_UPDATES_ALLOWED_MODULES:-all}" "$conf"
|
||||||
|
set_conf "Updates.Redundancy" "${DB_UPDATES_REDUNDANCY:-1}" "$conf" false
|
||||||
|
set_conf "Database.Reconnect.Seconds" "${DB_RECONNECT_SECONDS:-5}" "$conf" false
|
||||||
|
set_conf "Database.Reconnect.Attempts" "${DB_RECONNECT_ATTEMPTS:-5}" "$conf" false
|
||||||
|
set_conf "LoginDatabase.WorkerThreads" "${DB_LOGIN_WORKER_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "WorldDatabase.WorkerThreads" "${DB_WORLD_WORKER_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "CharacterDatabase.WorkerThreads" "${DB_CHARACTER_WORKER_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "LoginDatabase.SynchThreads" "${DB_LOGIN_SYNCH_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "WorldDatabase.SynchThreads" "${DB_WORLD_SYNCH_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "CharacterDatabase.SynchThreads" "${DB_CHARACTER_SYNCH_THREADS:-1}" "$conf" false
|
||||||
|
set_conf "MySQLExecutable" "/usr/bin/mysql" "$conf"
|
||||||
|
set_conf "TempDir" "$temp_dir" "$conf"
|
||||||
|
}
|
||||||
@@ -259,14 +259,14 @@ SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
|
|||||||
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
|
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
|
||||||
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
|
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
|
||||||
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
|
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
|
||||||
MODULE_SQL_STAGE_PATH="$(read_env MODULE_SQL_STAGE_PATH "$STORAGE_PATH/module-sql-updates")"
|
STAGE_PATH_MODULE_SQL="$(read_env STAGE_PATH_MODULE_SQL "$STORAGE_PATH/module-sql-updates")"
|
||||||
MODULE_SQL_STAGE_PATH="$(eval "echo \"$MODULE_SQL_STAGE_PATH\"")"
|
STAGE_PATH_MODULE_SQL="$(eval "echo \"$STAGE_PATH_MODULE_SQL\"")"
|
||||||
if [[ "$MODULE_SQL_STAGE_PATH" != /* ]]; then
|
if [[ "$STAGE_PATH_MODULE_SQL" != /* ]]; then
|
||||||
MODULE_SQL_STAGE_PATH="$PROJECT_DIR/$MODULE_SQL_STAGE_PATH"
|
STAGE_PATH_MODULE_SQL="$PROJECT_DIR/$STAGE_PATH_MODULE_SQL"
|
||||||
fi
|
fi
|
||||||
MODULE_SQL_STAGE_PATH="$(canonical_path "$MODULE_SQL_STAGE_PATH")"
|
STAGE_PATH_MODULE_SQL="$(canonical_path "$STAGE_PATH_MODULE_SQL")"
|
||||||
mkdir -p "$MODULE_SQL_STAGE_PATH"
|
mkdir -p "$STAGE_PATH_MODULE_SQL"
|
||||||
ensure_host_writable "$MODULE_SQL_STAGE_PATH"
|
ensure_host_writable "$STAGE_PATH_MODULE_SQL"
|
||||||
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
|
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
|
||||||
|
|
||||||
declare -A ENABLED_MODULES=()
|
declare -A ENABLED_MODULES=()
|
||||||
@@ -439,7 +439,7 @@ esac
|
|||||||
# Stage module SQL to core updates directory (after containers start)
|
# Stage module SQL to core updates directory (after containers start)
|
||||||
host_stage_clear(){
|
host_stage_clear(){
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
-v "$STAGE_PATH_MODULE_SQL":/host-stage \
|
||||||
"$HOST_STAGE_HELPER_IMAGE" \
|
"$HOST_STAGE_HELPER_IMAGE" \
|
||||||
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
|
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
|
||||||
}
|
}
|
||||||
@@ -447,7 +447,7 @@ host_stage_clear(){
|
|||||||
host_stage_reset_dir(){
|
host_stage_reset_dir(){
|
||||||
local dir="$1"
|
local dir="$1"
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
-v "$STAGE_PATH_MODULE_SQL":/host-stage \
|
||||||
"$HOST_STAGE_HELPER_IMAGE" \
|
"$HOST_STAGE_HELPER_IMAGE" \
|
||||||
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
|
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
|
||||||
}
|
}
|
||||||
@@ -461,7 +461,7 @@ copy_to_host_stage(){
|
|||||||
local base_name
|
local base_name
|
||||||
base_name="$(basename "$file_path")"
|
base_name="$(basename "$file_path")"
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
-v "$STAGE_PATH_MODULE_SQL":/host-stage \
|
||||||
-v "$src_dir":/src \
|
-v "$src_dir":/src \
|
||||||
"$HOST_STAGE_HELPER_IMAGE" \
|
"$HOST_STAGE_HELPER_IMAGE" \
|
||||||
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1
|
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1
|
||||||
|
|||||||
@@ -588,14 +588,16 @@ def handle_generate(args: argparse.Namespace) -> int:
|
|||||||
write_outputs(state, output_dir)
|
write_outputs(state, output_dir)
|
||||||
|
|
||||||
if state.warnings:
|
if state.warnings:
|
||||||
warning_block = "\n".join(f"- {warning}" for warning in state.warnings)
|
module_keys_with_warnings = sorted(
|
||||||
|
{warning.split()[0].strip(":,") for warning in state.warnings if warning.startswith("MODULE_")}
|
||||||
|
)
|
||||||
|
warning_lines = []
|
||||||
|
if module_keys_with_warnings:
|
||||||
|
warning_lines.append(f"- Modules with warnings: {', '.join(module_keys_with_warnings)}")
|
||||||
|
warning_lines.extend(f"- {warning}" for warning in state.warnings)
|
||||||
|
warning_block = textwrap.indent("\n".join(warning_lines), " ")
|
||||||
print(
|
print(
|
||||||
textwrap.dedent(
|
f"⚠️ Module manifest warnings detected:\n{warning_block}\n",
|
||||||
f"""\
|
|
||||||
⚠️ Module manifest warnings detected:
|
|
||||||
{warning_block}
|
|
||||||
"""
|
|
||||||
),
|
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
if state.errors:
|
if state.errors:
|
||||||
|
|||||||
20
setup.sh
20
setup.sh
@@ -1529,8 +1529,24 @@ fi
|
|||||||
# Set build sentinel to indicate rebuild is needed
|
# Set build sentinel to indicate rebuild is needed
|
||||||
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
||||||
mkdir -p "$(dirname "$sentinel")"
|
mkdir -p "$(dirname "$sentinel")"
|
||||||
touch "$sentinel"
|
if touch "$sentinel" 2>/dev/null; then
|
||||||
say INFO "Build sentinel created at $sentinel"
|
say INFO "Build sentinel created at $sentinel"
|
||||||
|
else
|
||||||
|
say WARNING "Could not create build sentinel at $sentinel (permissions/ownership); forcing with sudo..."
|
||||||
|
if command -v sudo >/dev/null 2>&1; then
|
||||||
|
if sudo mkdir -p "$(dirname "$sentinel")" \
|
||||||
|
&& sudo chown -R "$(id -u):$(id -g)" "$(dirname "$sentinel")" \
|
||||||
|
&& sudo touch "$sentinel"; then
|
||||||
|
say INFO "Build sentinel created at $sentinel (after fixing ownership)"
|
||||||
|
else
|
||||||
|
say ERROR "Failed to force build sentinel creation at $sentinel. Fix permissions and rerun setup."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
say ERROR "Cannot force build sentinel creation (sudo unavailable). Fix permissions on $(dirname "$sentinel") and rerun setup."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore"
|
local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore"
|
||||||
|
|||||||
117
update-latest.sh
Executable file
117
update-latest.sh
Executable file
@@ -0,0 +1,117 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Safe wrapper to update to the latest commit on the current branch and run deploy.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
cd "$ROOT_DIR"
|
||||||
|
|
||||||
|
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
||||||
|
info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; }
|
||||||
|
ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; }
|
||||||
|
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
|
||||||
|
err(){ printf '%b\n' "${RED}❌ $*${NC}"; }
|
||||||
|
|
||||||
|
FORCE_DIRTY=0
|
||||||
|
DEPLOY_ARGS=()
|
||||||
|
SKIP_BUILD=0
|
||||||
|
AUTO_DEPLOY=0
|
||||||
|
|
||||||
|
usage(){
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: ./update-latest.sh [--force] [--help] [deploy args...]
|
||||||
|
|
||||||
|
Updates the current git branch with a fast-forward pull, runs a fresh build,
|
||||||
|
and optionally runs ./deploy.sh with any additional arguments you provide
|
||||||
|
(e.g., --yes --no-watch).
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--force Skip the dirty-tree check (not recommended; you may lose changes)
|
||||||
|
--skip-build Do not run ./build.sh after updating
|
||||||
|
--deploy Auto-run ./deploy.sh after build (non-interactive)
|
||||||
|
--help Show this help
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
./update-latest.sh --yes --no-watch
|
||||||
|
./update-latest.sh --deploy --yes --no-watch
|
||||||
|
./update-latest.sh --force --skip-build
|
||||||
|
./update-latest.sh --force --deploy --remote --remote-host my.host --remote-user sam --yes
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--force) FORCE_DIRTY=1; shift;;
|
||||||
|
--skip-build) SKIP_BUILD=1; shift;;
|
||||||
|
--deploy) AUTO_DEPLOY=1; shift;;
|
||||||
|
--help|-h) usage; exit 0;;
|
||||||
|
*) DEPLOY_ARGS+=("$1"); shift;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
command -v git >/dev/null 2>&1 || { err "git is required"; exit 1; }
|
||||||
|
|
||||||
|
if [ "$FORCE_DIRTY" -ne 1 ]; then
|
||||||
|
if [ -n "$(git status --porcelain)" ]; then
|
||||||
|
err "Working tree is dirty. Commit/stash or re-run with --force."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
current_branch="$(git rev-parse --abbrev-ref HEAD 2>/dev/null || true)"
|
||||||
|
if [ -z "$current_branch" ] || [ "$current_branch" = "HEAD" ]; then
|
||||||
|
err "Cannot update: detached HEAD or unknown branch."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! git ls-remote --exit-code --heads origin "$current_branch" >/dev/null 2>&1; then
|
||||||
|
err "Remote branch origin/$current_branch not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Fetching latest changes from origin/$current_branch"
|
||||||
|
git fetch --prune origin
|
||||||
|
|
||||||
|
info "Fast-forwarding to origin/$current_branch"
|
||||||
|
if ! git merge --ff-only "origin/$current_branch"; then
|
||||||
|
err "Fast-forward failed. Resolve manually or rebase, then rerun."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ok "Repository updated to $(git rev-parse --short HEAD)"
|
||||||
|
|
||||||
|
if [ "$SKIP_BUILD" -ne 1 ]; then
|
||||||
|
info "Running build.sh --yes"
|
||||||
|
if ! "$ROOT_DIR/build.sh" --yes; then
|
||||||
|
err "Build failed. Resolve issues and re-run."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
ok "Build completed"
|
||||||
|
else
|
||||||
|
warn "Skipping build (--skip-build set)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Offer to run deploy
|
||||||
|
if [ "$AUTO_DEPLOY" -eq 1 ]; then
|
||||||
|
info "Auto-deploy enabled; running deploy.sh ${DEPLOY_ARGS[*]:-(no extra args)}"
|
||||||
|
exec "$ROOT_DIR/deploy.sh" "${DEPLOY_ARGS[@]}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -t 0 ]; then
|
||||||
|
read -r -p "Run deploy.sh now? [y/N]: " reply
|
||||||
|
reply="${reply:-n}"
|
||||||
|
case "$reply" in
|
||||||
|
[Yy]*)
|
||||||
|
info "Running deploy.sh ${DEPLOY_ARGS[*]:-(no extra args)}"
|
||||||
|
exec "$ROOT_DIR/deploy.sh" "${DEPLOY_ARGS[@]}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
ok "Update (and build) complete. Run ./deploy.sh ${DEPLOY_ARGS[*]} when ready."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
warn "Non-interactive mode and --deploy not set; skipping deploy."
|
||||||
|
ok "Update (and build) complete. Run ./deploy.sh ${DEPLOY_ARGS[*]} when ready."
|
||||||
|
fi
|
||||||
Reference in New Issue
Block a user