mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 09:07:20 +00:00
refactor: reorganize scripts under bash/python
This commit is contained in:
190
scripts/bash/auto-post-install.sh
Executable file
190
scripts/bash/auto-post-install.sh
Executable file
@@ -0,0 +1,190 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm
|
||||
set -e
|
||||
|
||||
GREEN='\033[0;32m'; BLUE='\033[0;34m'; NC='\033[0m'
|
||||
|
||||
show_post_install_header(){
|
||||
echo -e "\n${BLUE} ⚔️ REALM POST-INSTALL CONFIGURATION ⚔️${NC}"
|
||||
echo -e "${BLUE} ══════════════════════════════════════════${NC}"
|
||||
echo -e "${BLUE} 🏯 Blessing Your Realm with Final Touches 🏯${NC}\n"
|
||||
}
|
||||
|
||||
show_post_install_header
|
||||
|
||||
# Install required packages
|
||||
apk add --no-cache curl mysql-client bash docker-cli-compose jq || apk add --no-cache curl mysql-client bash jq
|
||||
|
||||
ensure_playerbots_db(){
|
||||
local db_name="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
local charset="${MYSQL_CHARACTER_SET:-utf8mb4}"
|
||||
local collation="${MYSQL_COLLATION:-utf8mb4_unicode_ci}"
|
||||
echo "🔐 Ensuring playerbots database '${db_name}' exists..."
|
||||
if mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify -e "CREATE DATABASE IF NOT EXISTS \`${db_name}\` CHARACTER SET ${charset} COLLATE ${collation};" >/dev/null 2>&1; then
|
||||
echo "✅ Playerbots database ready"
|
||||
else
|
||||
echo "⚠️ Failed to guarantee playerbots database"
|
||||
fi
|
||||
}
|
||||
|
||||
update_playerbots_conf(){
|
||||
local target="$1"
|
||||
if [ ! -f "$target" ]; then
|
||||
return 0
|
||||
fi
|
||||
if sed -i "s|^PlayerbotsDatabaseInfo *=.*|PlayerbotsDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_PLAYERBOTS_NAME}\"|" "$target"; then
|
||||
echo " 🔁 Updated $(basename "$target")"
|
||||
else
|
||||
echo " ⚠️ Could not update $(basename "$target")"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Create install markers directory
|
||||
mkdir -p /install-markers
|
||||
|
||||
# Check if this is a new installation
|
||||
if [ -f "/install-markers/post-install-completed" ]; then
|
||||
echo "✅ Post-install configuration already completed"
|
||||
echo "ℹ️ Marker file found: /install-markers/post-install-completed"
|
||||
echo "🔄 To re-run post-install configuration, delete the marker file and restart this container"
|
||||
echo "🏁 Nothing else to do; exiting."
|
||||
exit 0
|
||||
else
|
||||
echo "🆕 New installation detected - running post-install configuration..."
|
||||
echo ""
|
||||
|
||||
# Wait for services to be ready
|
||||
echo "⏳ Waiting for required services to be ready..."
|
||||
|
||||
# Wait for MySQL to be responsive
|
||||
echo "🔌 Waiting for MySQL to be ready..."
|
||||
for i in $(seq 1 120); do
|
||||
if mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify -e "SELECT 1;" >/dev/null 2>&1; then
|
||||
echo "✅ MySQL is ready"
|
||||
ensure_playerbots_db
|
||||
break
|
||||
fi
|
||||
echo " ⏳ Attempt $i/120..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Wait for authserver and worldserver config files to exist
|
||||
echo "📁 Waiting for configuration files..."
|
||||
for i in $(seq 1 60); do
|
||||
if [ -f "/azerothcore/config/authserver.conf" ] && [ -f "/azerothcore/config/worldserver.conf" ]; then
|
||||
echo "✅ Configuration files found"
|
||||
break
|
||||
fi
|
||||
echo " ⏳ Waiting for config files... attempt $i/60"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [ ! -f "/azerothcore/config/authserver.conf" ] || [ ! -f "/azerothcore/config/worldserver.conf" ]; then
|
||||
echo "❌ Configuration files not found after waiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 1: Create module configuration files
|
||||
echo ""
|
||||
echo "🔧 Step 1: Creating module configuration files..."
|
||||
|
||||
# Create .conf files from .dist.conf templates for all modules
|
||||
CONFIG_DIR="/azerothcore/config"
|
||||
created_count=0
|
||||
|
||||
for file in "$CONFIG_DIR"/*.dist; do
|
||||
if [ -f "$file" ]; then
|
||||
conffile=$(echo "$file" | sed 's/.dist$//')
|
||||
filename=$(basename "$conffile")
|
||||
|
||||
# Skip core config files (already handled)
|
||||
case "$filename" in
|
||||
authserver.conf|worldserver.conf|dbimport.conf)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
# Create .conf file if it doesn't exist
|
||||
if [ ! -f "$conffile" ]; then
|
||||
echo " 📝 Creating $filename from $(basename "$file")"
|
||||
cp "$file" "$conffile"
|
||||
created_count=$((created_count + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo " ✅ Created $created_count module configuration files"
|
||||
|
||||
# Ensure module configuration files exist under modules directory
|
||||
MODULES_DIR="$CONFIG_DIR/modules"
|
||||
if [ -d "$MODULES_DIR" ]; then
|
||||
echo ""
|
||||
echo " 🔧 Creating module configs in modules/..."
|
||||
for file in "$MODULES_DIR"/*.conf.dist; do
|
||||
[ -f "$file" ] || continue
|
||||
target="${file%.dist}"
|
||||
if [ ! -f "$target" ]; then
|
||||
echo " 📝 Creating $(basename "$target") from $(basename "$file")"
|
||||
cp "$file" "$target"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Step 2: Update configuration files
|
||||
echo ""
|
||||
echo "🔧 Step 2: Updating configuration files..."
|
||||
|
||||
# Update DB connection lines and any necessary settings directly with sed
|
||||
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/authserver.conf || true
|
||||
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
update_playerbots_conf /azerothcore/config/modules/playerbots.conf
|
||||
update_playerbots_conf /azerothcore/config/modules/playerbots.conf.dist
|
||||
|
||||
ensure_config_key(){
|
||||
local file="$1"
|
||||
local key="$2"
|
||||
local value="$3"
|
||||
if [ ! -f "$file" ]; then
|
||||
return
|
||||
fi
|
||||
if grep -qE "^[[:space:]]*${key}[[:space:]]*=" "$file"; then
|
||||
return
|
||||
fi
|
||||
echo " ➕ Adding ${key} to $(basename "$file")"
|
||||
printf '\n%s = %s\n' "$key" "$value" >> "$file"
|
||||
}
|
||||
|
||||
ensure_config_key /azerothcore/config/worldserver.conf "Account.Achievements.Excluded" "\"\""
|
||||
ensure_config_key /azerothcore/config/worldserver.conf "Playerbots.Updates.EnableDatabases" "1"
|
||||
ensure_config_key /azerothcore/config/worldserver.conf "PlayerbotsDatabaseInfo" "\"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_PLAYERBOTS_NAME}\""
|
||||
|
||||
echo "✅ Configuration files updated"
|
||||
|
||||
# Step 3: Update realmlist table
|
||||
echo ""
|
||||
echo "🌐 Step 3: Updating realmlist table..."
|
||||
mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" -e "
|
||||
UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1;
|
||||
" || echo "⚠️ Could not update realmlist table"
|
||||
|
||||
echo "✅ Realmlist updated"
|
||||
|
||||
echo ""
|
||||
echo "ℹ️ Step 4: (Optional) Restart services to apply changes — handled externally"
|
||||
|
||||
# Create completion marker
|
||||
echo "$(date)" > /install-markers/post-install-completed
|
||||
echo "NEW_INSTALL_DATE=$(date)" >> /install-markers/post-install-completed
|
||||
echo "CONFIG_FILES_UPDATED=true" >> /install-markers/post-install-completed
|
||||
echo "REALMLIST_UPDATED=true" >> /install-markers/post-install-completed
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}⚔️ Your realm has been blessed and configured! ⚔️${NC}"
|
||||
echo -e "${GREEN}🏰 All post-installation rituals completed${NC}"
|
||||
echo -e "${GREEN}🗡️ Your realm awaits brave adventurers!${NC}"
|
||||
echo "🏁 Post-install tasks finished; exiting."
|
||||
exit 0
|
||||
fi
|
||||
272
scripts/bash/backup-export.sh
Executable file
272
scripts/bash/backup-export.sh
Executable file
@@ -0,0 +1,272 @@
|
||||
#!/bin/bash
|
||||
# Export one or more ACore databases to ExportBackup_<timestamp>/
|
||||
set -euo pipefail
|
||||
|
||||
INVOCATION_DIR="$PWD"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
SUPPORTED_DBS=(auth characters world)
|
||||
declare -A SUPPORTED_SET=()
|
||||
for db in "${SUPPORTED_DBS[@]}"; do
|
||||
SUPPORTED_SET["$db"]=1
|
||||
done
|
||||
|
||||
declare -A DB_NAMES=([auth]="" [characters]="" [world]="")
|
||||
declare -a INCLUDE_DBS=()
|
||||
declare -a SKIP_DBS=()
|
||||
|
||||
MYSQL_PW=""
|
||||
DEST_PARENT=""
|
||||
DEST_PROVIDED=false
|
||||
EXPLICIT_SELECTION=false
|
||||
|
||||
usage(){
|
||||
cat <<'EOF'
|
||||
Usage: ./backup-export.sh [options]
|
||||
|
||||
Creates a timestamped backup of one or more ACore databases.
|
||||
|
||||
Options:
|
||||
-o, --output DIR Destination directory (default: storage/backups)
|
||||
-p, --password PASS MySQL root password
|
||||
--auth-db NAME Auth database schema name
|
||||
--characters-db NAME Characters database schema name
|
||||
--world-db NAME World database schema name
|
||||
--db LIST Comma-separated list of databases to export
|
||||
--skip LIST Comma-separated list of databases to skip
|
||||
-h, --help Show this help and exit
|
||||
|
||||
Supported database identifiers: auth, characters, world.
|
||||
By default exports auth and characters if database names are provided.
|
||||
|
||||
Examples:
|
||||
# Export all databases to default location
|
||||
./backup-export.sh --password azerothcore123 --auth-db acore_auth --characters-db acore_characters --world-db acore_world --all
|
||||
|
||||
# Export specific databases to custom directory
|
||||
./backup-export.sh --output /path/to/backups --password azerothcore123 --db auth,characters --auth-db acore_auth --characters-db acore_characters
|
||||
|
||||
# Export only world database
|
||||
./backup-export.sh --password azerothcore123 --db world --world-db acore_world
|
||||
EOF
|
||||
}
|
||||
|
||||
err(){ printf 'Error: %s\n' "$*" >&2; }
|
||||
die(){ err "$1"; exit 1; }
|
||||
|
||||
normalize_token(){
|
||||
printf '%s' "$1" | tr '[:upper:]' '[:lower:]' | tr -d '[:space:]'
|
||||
}
|
||||
|
||||
add_unique(){
|
||||
local -n arr="$1"
|
||||
local value="$2"
|
||||
for existing in "${arr[@]:-}"; do
|
||||
[[ "$existing" == "$value" ]] && return
|
||||
done
|
||||
arr+=("$value")
|
||||
}
|
||||
|
||||
parse_db_list(){
|
||||
local -n target="$1"
|
||||
local value="$2"
|
||||
IFS=',' read -ra parts <<<"$value"
|
||||
for part in "${parts[@]}"; do
|
||||
local token
|
||||
token="$(normalize_token "$part")"
|
||||
[[ -z "$token" ]] && continue
|
||||
if [[ -z "${SUPPORTED_SET[$token]:-}" ]]; then
|
||||
die "Unknown database identifier: $token (supported: ${SUPPORTED_DBS[*]})"
|
||||
fi
|
||||
add_unique target "$token"
|
||||
done
|
||||
}
|
||||
|
||||
remove_from_list(){
|
||||
local -n arr="$1"
|
||||
local value="$2"
|
||||
local -a filtered=()
|
||||
for item in "${arr[@]}"; do
|
||||
[[ "$item" == "$value" ]] || filtered+=("$item")
|
||||
done
|
||||
arr=("${filtered[@]}")
|
||||
}
|
||||
|
||||
resolve_relative(){
|
||||
local base="$1" path="$2"
|
||||
if command -v python3 >/dev/null 2>&1; then
|
||||
python3 - "$base" "$path" <<'PY'
|
||||
import os, sys
|
||||
base, path = sys.argv[1:3]
|
||||
if not path:
|
||||
print(os.path.abspath(base))
|
||||
elif os.path.isabs(path):
|
||||
print(os.path.normpath(path))
|
||||
else:
|
||||
print(os.path.normpath(os.path.join(base, path)))
|
||||
PY
|
||||
else
|
||||
die "python3 is required but was not found on PATH"
|
||||
fi
|
||||
}
|
||||
|
||||
json_string(){
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
die "python3 is required but was not found on PATH"
|
||||
fi
|
||||
python3 - "$1" <<'PY'
|
||||
import json, sys
|
||||
print(json.dumps(sys.argv[1]))
|
||||
PY
|
||||
}
|
||||
|
||||
POSITIONAL=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-o|--output)
|
||||
[[ $# -ge 2 ]] || die "--output requires a directory argument"
|
||||
DEST_PARENT="$2"
|
||||
DEST_PROVIDED=true
|
||||
shift 2
|
||||
;;
|
||||
-p|--password)
|
||||
[[ $# -ge 2 ]] || die "--password requires a value"
|
||||
MYSQL_PW="$2"
|
||||
shift 2
|
||||
;;
|
||||
--auth-db)
|
||||
[[ $# -ge 2 ]] || die "--auth-db requires a value"
|
||||
DB_NAMES[auth]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--characters-db)
|
||||
[[ $# -ge 2 ]] || die "--characters-db requires a value"
|
||||
DB_NAMES[characters]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--world-db)
|
||||
[[ $# -ge 2 ]] || die "--world-db requires a value"
|
||||
DB_NAMES[world]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--db|--only)
|
||||
[[ $# -ge 2 ]] || die "--db requires a value"
|
||||
EXPLICIT_SELECTION=true
|
||||
parse_db_list INCLUDE_DBS "$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip)
|
||||
[[ $# -ge 2 ]] || die "--skip requires a value"
|
||||
parse_db_list SKIP_DBS "$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
while [[ $# -gt 0 ]]; do
|
||||
POSITIONAL+=("$1")
|
||||
shift
|
||||
done
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
die "Unknown option: $1"
|
||||
;;
|
||||
*)
|
||||
POSITIONAL+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if ((${#POSITIONAL[@]} > 0)); then
|
||||
die "Positional arguments are not supported. Use named options instead."
|
||||
fi
|
||||
|
||||
declare -a ACTIVE_DBS=()
|
||||
if $EXPLICIT_SELECTION; then
|
||||
ACTIVE_DBS=("${INCLUDE_DBS[@]}")
|
||||
else
|
||||
for db in "${SUPPORTED_DBS[@]}"; do
|
||||
if [[ -n "${DB_NAMES[$db]}" ]]; then
|
||||
add_unique ACTIVE_DBS "$db"
|
||||
fi
|
||||
done
|
||||
if ((${#ACTIVE_DBS[@]} == 0)); then
|
||||
ACTIVE_DBS=(auth characters)
|
||||
fi
|
||||
fi
|
||||
|
||||
for skip in "${SKIP_DBS[@]:-}"; do
|
||||
remove_from_list ACTIVE_DBS "$skip"
|
||||
done
|
||||
|
||||
if ((${#ACTIVE_DBS[@]} == 0)); then
|
||||
die "No databases selected for export."
|
||||
fi
|
||||
|
||||
[[ -n "$MYSQL_PW" ]] || die "MySQL password is required (use --password)."
|
||||
|
||||
for db in "${ACTIVE_DBS[@]}"; do
|
||||
case "$db" in
|
||||
auth|characters|world) ;;
|
||||
*) die "Unsupported database identifier requested: $db" ;;
|
||||
esac
|
||||
if [[ -z "${DB_NAMES[$db]}" ]]; then
|
||||
die "Missing schema name for '$db'. Provide --${db}-db."
|
||||
fi
|
||||
done
|
||||
|
||||
if $DEST_PROVIDED; then
|
||||
DEST_PARENT="$(resolve_relative "$INVOCATION_DIR" "$DEST_PARENT")"
|
||||
else
|
||||
# Use storage/backups as default to align with existing backup structure
|
||||
if [ -d "$SCRIPT_DIR/storage" ]; then
|
||||
DEST_PARENT="$SCRIPT_DIR/storage/backups"
|
||||
mkdir -p "$DEST_PARENT"
|
||||
else
|
||||
DEST_PARENT="$SCRIPT_DIR"
|
||||
fi
|
||||
fi
|
||||
|
||||
TIMESTAMP="$(date +%Y%m%d_%H%M%S)"
|
||||
DEST_DIR="$(printf '%s/ExportBackup_%s' "$DEST_PARENT" "$TIMESTAMP")"
|
||||
mkdir -p "$DEST_DIR"
|
||||
generated_at="$(date --iso-8601=seconds)"
|
||||
|
||||
dump_db(){
|
||||
local schema="$1" outfile="$2"
|
||||
echo "Dumping ${schema} -> ${outfile}"
|
||||
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" | gzip > "$outfile"
|
||||
}
|
||||
|
||||
for db in "${ACTIVE_DBS[@]}"; do
|
||||
outfile="$DEST_DIR/acore_${db}.sql.gz"
|
||||
dump_db "${DB_NAMES[$db]}" "$outfile"
|
||||
done
|
||||
|
||||
first=1
|
||||
{
|
||||
printf '{\n'
|
||||
printf ' "generated_at": %s,\n' "$(json_string "$generated_at")"
|
||||
printf ' "databases": {\n'
|
||||
for db in "${ACTIVE_DBS[@]}"; do
|
||||
key_json="$(json_string "$db")"
|
||||
value_json="$(json_string "${DB_NAMES[$db]}")"
|
||||
if (( first )); then
|
||||
first=0
|
||||
else
|
||||
printf ',\n'
|
||||
fi
|
||||
printf ' %s: %s' "$key_json" "$value_json"
|
||||
done
|
||||
printf '\n }\n'
|
||||
printf '}\n'
|
||||
} > "$DEST_DIR/manifest.json"
|
||||
|
||||
echo "Exported databases: ${ACTIVE_DBS[*]}"
|
||||
echo "Backups saved under $DEST_DIR"
|
||||
473
scripts/bash/backup-import.sh
Executable file
473
scripts/bash/backup-import.sh
Executable file
@@ -0,0 +1,473 @@
|
||||
#!/bin/bash
|
||||
# Restore one or more ACore databases from a backup directory.
|
||||
set -euo pipefail
|
||||
|
||||
INVOCATION_DIR="$PWD"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
COLOR_RED='\033[0;31m'
|
||||
COLOR_GREEN='\033[0;32m'
|
||||
COLOR_YELLOW='\033[1;33m'
|
||||
COLOR_RESET='\033[0m'
|
||||
|
||||
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
|
||||
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
|
||||
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
|
||||
fatal(){ err "$*"; exit 1; }
|
||||
|
||||
SUPPORTED_DBS=(auth characters world)
|
||||
declare -A SUPPORTED_SET=()
|
||||
for db in "${SUPPORTED_DBS[@]}"; do
|
||||
SUPPORTED_SET["$db"]=1
|
||||
done
|
||||
|
||||
declare -A DB_NAMES=([auth]="" [characters]="" [world]="")
|
||||
declare -a INCLUDE_DBS=()
|
||||
declare -a SKIP_DBS=()
|
||||
declare -a ACTIVE_DBS=()
|
||||
|
||||
MYSQL_PW=""
|
||||
BACKUP_DIR=""
|
||||
BACKUP_PROVIDED=false
|
||||
EXPLICIT_SELECTION=false
|
||||
|
||||
usage(){
|
||||
cat <<'EOF'
|
||||
Usage: ./backup-import.sh [options]
|
||||
|
||||
Restores selected ACore databases from a backup directory.
|
||||
|
||||
Options:
|
||||
-b, --backup-dir DIR Backup directory (required)
|
||||
-p, --password PASS MySQL root password
|
||||
--auth-db NAME Auth database schema name
|
||||
--characters-db NAME Characters database schema name
|
||||
--world-db NAME World database schema name
|
||||
--db LIST Comma-separated list of databases to import
|
||||
--skip LIST Comma-separated list of databases to skip
|
||||
--all Import all supported databases
|
||||
-h, --help Show this help and exit
|
||||
|
||||
Supported database identifiers: auth, characters, world.
|
||||
By default the script restores auth and characters databases.
|
||||
|
||||
Examples:
|
||||
# Restore from specific backup directory
|
||||
./backup-import.sh --backup-dir /path/to/backup --password azerothcore123 --auth-db acore_auth --characters-db acore_characters
|
||||
|
||||
# Restore all databases
|
||||
./backup-import.sh --backup-dir ./storage/backups/ExportBackup_20241029_120000 --password azerothcore123 --all --auth-db acore_auth --characters-db acore_characters --world-db acore_world
|
||||
|
||||
# Restore only world database
|
||||
./backup-import.sh --backup-dir ./backups/daily/latest --password azerothcore123 --db world --world-db acore_world
|
||||
EOF
|
||||
}
|
||||
|
||||
normalize_token(){
|
||||
printf '%s' "$1" | tr '[:upper:]' '[:lower:]' | tr -d '[:space:]'
|
||||
}
|
||||
|
||||
add_unique(){
|
||||
local -n arr="$1"
|
||||
local value="$2"
|
||||
for existing in "${arr[@]:-}"; do
|
||||
[[ "$existing" == "$value" ]] && return
|
||||
done
|
||||
arr+=("$value")
|
||||
}
|
||||
|
||||
parse_db_list(){
|
||||
local -n target="$1"
|
||||
local value="$2"
|
||||
IFS=',' read -ra parts <<<"$value"
|
||||
for part in "${parts[@]}"; do
|
||||
local token
|
||||
token="$(normalize_token "$part")"
|
||||
[[ -z "$token" ]] && continue
|
||||
if [[ -z "${SUPPORTED_SET[$token]:-}" ]]; then
|
||||
fatal "Unknown database identifier: $token (supported: ${SUPPORTED_DBS[*]})"
|
||||
fi
|
||||
add_unique target "$token"
|
||||
done
|
||||
}
|
||||
|
||||
remove_from_list(){
|
||||
local -n arr="$1"
|
||||
local value="$2"
|
||||
local -a filtered=()
|
||||
for item in "${arr[@]}"; do
|
||||
[[ "$item" == "$value" ]] || filtered+=("$item")
|
||||
done
|
||||
arr=("${filtered[@]}")
|
||||
}
|
||||
|
||||
resolve_relative(){
|
||||
local base="$1" path="$2"
|
||||
if command -v python3 >/dev/null 2>&1; then
|
||||
python3 - "$base" "$path" <<'PY'
|
||||
import os, sys
|
||||
base, path = sys.argv[1:3]
|
||||
if not path:
|
||||
print(os.path.abspath(base))
|
||||
elif os.path.isabs(path):
|
||||
print(os.path.normpath(path))
|
||||
else:
|
||||
print(os.path.normpath(os.path.join(base, path)))
|
||||
PY
|
||||
else
|
||||
fatal "python3 is required but was not found on PATH"
|
||||
fi
|
||||
}
|
||||
|
||||
load_manifest(){
|
||||
local path="$1"
|
||||
[[ -f "$path" ]] || return 0
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
fatal "python3 is required to read $path"
|
||||
fi
|
||||
while IFS='=' read -r key value; do
|
||||
[[ -n "$key" && -n "$value" ]] || continue
|
||||
local token
|
||||
token="$(normalize_token "$key")"
|
||||
[[ -n "${SUPPORTED_SET[$token]:-}" ]] || continue
|
||||
if [[ -z "${DB_NAMES[$token]}" ]]; then
|
||||
DB_NAMES[$token]="$value"
|
||||
fi
|
||||
done < <(python3 - "$path" <<'PY'
|
||||
import json, sys
|
||||
|
||||
SUPPORTED = {
|
||||
"auth": {"keys": {"auth"}, "schemas": {"acore_auth"}},
|
||||
"characters": {"keys": {"characters", "chars", "char"}, "schemas": {"acore_characters"}},
|
||||
"world": {"keys": {"world"}, "schemas": {"acore_world"}},
|
||||
}
|
||||
|
||||
def map_entry(key, value, result):
|
||||
if key and key in SUPPORTED:
|
||||
result[key] = value
|
||||
return
|
||||
value_lower = value.lower()
|
||||
for ident, meta in SUPPORTED.items():
|
||||
if value_lower in meta["schemas"]:
|
||||
result.setdefault(ident, value)
|
||||
return
|
||||
if key:
|
||||
for ident, meta in SUPPORTED.items():
|
||||
if key in meta["keys"]:
|
||||
result.setdefault(ident, value)
|
||||
return
|
||||
|
||||
def main():
|
||||
path = sys.argv[1]
|
||||
with open(path, "r", encoding="utf-8") as fh:
|
||||
data = json.load(fh)
|
||||
result = {}
|
||||
databases = data.get("databases")
|
||||
if isinstance(databases, dict):
|
||||
for key, value in databases.items():
|
||||
map_entry(key.lower(), str(value), result)
|
||||
elif isinstance(databases, list):
|
||||
for value in databases:
|
||||
map_entry("", str(value), result)
|
||||
for key, value in result.items():
|
||||
print(f"{key}={value}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
PY
|
||||
)
|
||||
}
|
||||
|
||||
find_dump(){
|
||||
local db="$1"
|
||||
local hint="${DB_NAMES[$db]}"
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
fatal "python3 is required to locate backup dumps"
|
||||
fi
|
||||
python3 - "$BACKUP_DIR" "$db" "$hint" <<'PY'
|
||||
import glob, os, sys
|
||||
backup_dir, db, hint = sys.argv[1:4]
|
||||
|
||||
# Search patterns for database dumps
|
||||
patterns = [
|
||||
f"acore_{db}.sql.gz",
|
||||
f"acore_{db}.sql",
|
||||
f"{db}.sql.gz",
|
||||
f"{db}.sql",
|
||||
]
|
||||
if hint:
|
||||
patterns = [f"{hint}.sql.gz", f"{hint}.sql"] + patterns
|
||||
|
||||
# Search locations (in order of preference)
|
||||
search_dirs = []
|
||||
|
||||
# Check for daily backups first (most recent)
|
||||
daily_dir = os.path.join(backup_dir, "daily")
|
||||
if os.path.isdir(daily_dir):
|
||||
daily_subdirs = [d for d in os.listdir(daily_dir) if os.path.isdir(os.path.join(daily_dir, d))]
|
||||
if daily_subdirs:
|
||||
latest_daily = max(daily_subdirs, key=lambda x: os.path.getmtime(os.path.join(daily_dir, x)))
|
||||
search_dirs.append(os.path.join(daily_dir, latest_daily))
|
||||
|
||||
# Check for hourly backups
|
||||
hourly_dir = os.path.join(backup_dir, "hourly")
|
||||
if os.path.isdir(hourly_dir):
|
||||
hourly_subdirs = [d for d in os.listdir(hourly_dir) if os.path.isdir(os.path.join(hourly_dir, d))]
|
||||
if hourly_subdirs:
|
||||
latest_hourly = max(hourly_subdirs, key=lambda x: os.path.getmtime(os.path.join(hourly_dir, x)))
|
||||
search_dirs.append(os.path.join(hourly_dir, latest_hourly))
|
||||
|
||||
# Check for timestamped backup directories
|
||||
timestamped_dirs = []
|
||||
try:
|
||||
for item in os.listdir(backup_dir):
|
||||
item_path = os.path.join(backup_dir, item)
|
||||
if os.path.isdir(item_path):
|
||||
# Match ExportBackup_YYYYMMDD_HHMMSS or just YYYYMMDD_HHMMSS
|
||||
if item.startswith("ExportBackup_") or (len(item) == 15 and item[8] == '_'):
|
||||
timestamped_dirs.append(item_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if timestamped_dirs:
|
||||
latest_timestamped = max(timestamped_dirs, key=os.path.getmtime)
|
||||
search_dirs.append(latest_timestamped)
|
||||
|
||||
# Add the main backup directory itself
|
||||
search_dirs.append(backup_dir)
|
||||
|
||||
# Search for matching dumps
|
||||
seen = {}
|
||||
matches = []
|
||||
|
||||
for search_dir in search_dirs:
|
||||
for pattern in patterns:
|
||||
for path in glob.glob(os.path.join(search_dir, pattern)):
|
||||
if path not in seen and os.path.isfile(path):
|
||||
seen[path] = True
|
||||
matches.append(path)
|
||||
|
||||
if not matches:
|
||||
sys.exit(1)
|
||||
|
||||
# Return the most recent match
|
||||
latest = max(matches, key=os.path.getmtime)
|
||||
print(latest)
|
||||
PY
|
||||
}
|
||||
|
||||
guess_schema_from_dump(){
|
||||
local dump="$1"
|
||||
local base
|
||||
base="$(basename "$dump")"
|
||||
case "$base" in
|
||||
acore_auth.sql|acore_auth.sql.gz) echo "acore_auth" ;;
|
||||
acore_characters.sql|acore_characters.sql.gz) echo "acore_characters" ;;
|
||||
acore_world.sql|acore_world.sql.gz) echo "acore_world" ;;
|
||||
*)
|
||||
if [[ "$base" =~ ^([A-Za-z0-9_-]+)\.sql(\.gz)?$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
timestamp(){ date +%Y%m%d_%H%M%S; }
|
||||
|
||||
backup_db(){
|
||||
local schema="$1" label="$2"
|
||||
local out="manual-backups/${label}-pre-import-$(timestamp).sql"
|
||||
mkdir -p manual-backups
|
||||
log "Backing up current ${schema} to ${out}"
|
||||
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" > "$out"
|
||||
}
|
||||
|
||||
restore(){
|
||||
local schema="$1" dump="$2"
|
||||
log "Importing ${dump##*/} into ${schema}"
|
||||
case "$dump" in
|
||||
*.gz) gzip -dc "$dump" ;;
|
||||
*.sql) cat "$dump" ;;
|
||||
*) fatal "Unsupported dump format: $dump" ;;
|
||||
esac | docker exec -i ac-mysql mysql -uroot -p"$MYSQL_PW" "$schema"
|
||||
}
|
||||
|
||||
db_selected(){
|
||||
local needle="$1"
|
||||
for item in "${ACTIVE_DBS[@]}"; do
|
||||
[[ "$item" == "$needle" ]] && return 0
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
count_rows(){
|
||||
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$1"
|
||||
}
|
||||
|
||||
case "${1:-}" in
|
||||
-h|--help) usage; exit 0;;
|
||||
esac
|
||||
|
||||
POSITIONAL=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-b|--backup-dir)
|
||||
[[ $# -ge 2 ]] || fatal "--backup-dir requires a directory argument"
|
||||
BACKUP_DIR="$2"
|
||||
BACKUP_PROVIDED=true
|
||||
shift 2
|
||||
;;
|
||||
-p|--password)
|
||||
[[ $# -ge 2 ]] || fatal "--password requires a value"
|
||||
MYSQL_PW="$2"
|
||||
shift 2
|
||||
;;
|
||||
--auth-db)
|
||||
[[ $# -ge 2 ]] || fatal "--auth-db requires a value"
|
||||
DB_NAMES[auth]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--characters-db)
|
||||
[[ $# -ge 2 ]] || fatal "--characters-db requires a value"
|
||||
DB_NAMES[characters]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--world-db)
|
||||
[[ $# -ge 2 ]] || fatal "--world-db requires a value"
|
||||
DB_NAMES[world]="$2"
|
||||
shift 2
|
||||
;;
|
||||
--db|--only)
|
||||
[[ $# -ge 2 ]] || fatal "--db requires a value"
|
||||
EXPLICIT_SELECTION=true
|
||||
parse_db_list INCLUDE_DBS "$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip)
|
||||
[[ $# -ge 2 ]] || fatal "--skip requires a value"
|
||||
parse_db_list SKIP_DBS "$2"
|
||||
shift 2
|
||||
;;
|
||||
--all)
|
||||
EXPLICIT_SELECTION=true
|
||||
for db in "${SUPPORTED_DBS[@]}"; do
|
||||
add_unique INCLUDE_DBS "$db"
|
||||
done
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
while [[ $# -gt 0 ]]; do
|
||||
POSITIONAL+=("$1")
|
||||
shift
|
||||
done
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
fatal "Unknown option: $1"
|
||||
;;
|
||||
*)
|
||||
POSITIONAL+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if ((${#POSITIONAL[@]} > 0)); then
|
||||
fatal "Positional arguments are not supported. Use named options instead."
|
||||
fi
|
||||
|
||||
if $EXPLICIT_SELECTION; then
|
||||
ACTIVE_DBS=("${INCLUDE_DBS[@]}")
|
||||
else
|
||||
ACTIVE_DBS=(auth characters)
|
||||
fi
|
||||
|
||||
for skip in "${SKIP_DBS[@]:-}"; do
|
||||
remove_from_list ACTIVE_DBS "$skip"
|
||||
done
|
||||
|
||||
if ((${#ACTIVE_DBS[@]} == 0)); then
|
||||
fatal "No databases selected for import."
|
||||
fi
|
||||
|
||||
if $BACKUP_PROVIDED; then
|
||||
BACKUP_DIR="$(resolve_relative "$INVOCATION_DIR" "$BACKUP_DIR")"
|
||||
else
|
||||
fatal "Backup directory is required. Use --backup-dir DIR to specify."
|
||||
fi
|
||||
|
||||
[[ -d "$BACKUP_DIR" ]] || fatal "Backup directory not found: $BACKUP_DIR"
|
||||
log "Using backup directory: $BACKUP_DIR"
|
||||
|
||||
MANIFEST_PATH="$BACKUP_DIR/manifest.json"
|
||||
if [[ -f "$MANIFEST_PATH" ]]; then
|
||||
load_manifest "$MANIFEST_PATH"
|
||||
fi
|
||||
|
||||
[[ -n "$MYSQL_PW" ]] || fatal "MySQL password is required (use --password)."
|
||||
|
||||
declare -A DUMP_PATHS=()
|
||||
log "Databases selected: ${ACTIVE_DBS[*]}"
|
||||
for db in "${ACTIVE_DBS[@]}"; do
|
||||
if ! dump_path="$(find_dump "$db")"; then
|
||||
fatal "No dump found for '$db' in $BACKUP_DIR (expected files like acore_${db}.sql or .sql.gz)."
|
||||
fi
|
||||
if [[ -z "${DB_NAMES[$db]}" ]]; then
|
||||
DB_NAMES[$db]="$(guess_schema_from_dump "$dump_path")"
|
||||
fi
|
||||
[[ -n "${DB_NAMES[$db]}" ]] || fatal "Missing schema name for '$db'. Provide --${db}-db, include it in manifest.json, or name the dump appropriately."
|
||||
DUMP_PATHS["$db"]="$dump_path"
|
||||
log " $db -> ${DB_NAMES[$db]} (using ${dump_path##*/})"
|
||||
done
|
||||
|
||||
log "Stopping world/auth services"
|
||||
docker stop ac-worldserver ac-authserver >/dev/null || warn "Services already stopped"
|
||||
|
||||
for db in "${ACTIVE_DBS[@]}"; do
|
||||
backup_db "${DB_NAMES[$db]}" "$db"
|
||||
restore "${DB_NAMES[$db]}" "${DUMP_PATHS[$db]}"
|
||||
done
|
||||
|
||||
log "Module SQL patches will be applied when services restart"
|
||||
|
||||
log "Restarting services to reinitialize GUID generators"
|
||||
docker restart ac-authserver ac-worldserver >/dev/null
|
||||
|
||||
log "Waiting for services to fully initialize..."
|
||||
sleep 10
|
||||
|
||||
for i in {1..30}; do
|
||||
if docker exec ac-worldserver pgrep worldserver >/dev/null 2>&1 && docker exec ac-authserver pgrep authserver >/dev/null 2>&1; then
|
||||
log "Services are running"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
warn "Services took longer than expected to start"
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if db_selected auth; then
|
||||
ACCOUNTS=$(count_rows "SELECT COUNT(*) FROM ${DB_NAMES[auth]}.account;")
|
||||
log "Accounts: $ACCOUNTS"
|
||||
fi
|
||||
|
||||
if db_selected characters; then
|
||||
CHARS=$(count_rows "SELECT COUNT(*) FROM ${DB_NAMES[characters]}.characters;")
|
||||
log "Characters: $CHARS"
|
||||
if [ "$CHARS" -gt 0 ]; then
|
||||
MAX_GUID=$(count_rows "SELECT COALESCE(MAX(guid), 0) FROM ${DB_NAMES[characters]}.characters;")
|
||||
log "Highest character GUID: $MAX_GUID"
|
||||
log "Next new character will receive GUID: $((MAX_GUID + 1))"
|
||||
fi
|
||||
fi
|
||||
|
||||
./status.sh --once || warn "status.sh reported issues; inspect manually."
|
||||
|
||||
log "Import completed for: ${ACTIVE_DBS[*]}"
|
||||
1041
scripts/bash/backup-merge.sh
Executable file
1041
scripts/bash/backup-merge.sh
Executable file
File diff suppressed because it is too large
Load Diff
104
scripts/bash/backup-scheduler.sh
Executable file
104
scripts/bash/backup-scheduler.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm
|
||||
set -e
|
||||
|
||||
BACKUP_DIR_BASE="/backups"
|
||||
HOURLY_DIR="$BACKUP_DIR_BASE/hourly"
|
||||
DAILY_DIR="$BACKUP_DIR_BASE/daily"
|
||||
RETENTION_HOURS=${BACKUP_RETENTION_HOURS:-6}
|
||||
RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-3}
|
||||
DAILY_TIME=${BACKUP_DAILY_TIME:-09}
|
||||
MYSQL_PORT=${MYSQL_PORT:-3306}
|
||||
|
||||
mkdir -p "$HOURLY_DIR" "$DAILY_DIR"
|
||||
|
||||
log() { echo "[$(date '+%F %T')] $*"; }
|
||||
|
||||
# Build database list from env (include optional acore_playerbots if present)
|
||||
database_list() {
|
||||
local dbs=("${DB_AUTH_NAME}" "${DB_WORLD_NAME}" "${DB_CHARACTERS_NAME}")
|
||||
if mysql -h"${MYSQL_HOST}" -P"${MYSQL_PORT}" -u"${MYSQL_USER}" -p"${MYSQL_PASSWORD}" -e "USE acore_playerbots;" >/dev/null 2>&1; then
|
||||
dbs+=("acore_playerbots")
|
||||
log "Detected optional database: acore_playerbots (will be backed up)" >&2
|
||||
fi
|
||||
printf '%s\n' "${dbs[@]}"
|
||||
}
|
||||
|
||||
run_backup() {
|
||||
local tier_dir="$1" # hourly or daily dir
|
||||
local tier_type="$2" # "hourly" or "daily"
|
||||
local ts=$(date '+%Y%m%d_%H%M%S')
|
||||
local target_dir="$tier_dir/$ts"
|
||||
mkdir -p "$target_dir"
|
||||
log "Starting ${tier_type} backup to $target_dir"
|
||||
|
||||
local -a dbs
|
||||
mapfile -t dbs < <(database_list)
|
||||
|
||||
for db in "${dbs[@]}"; do
|
||||
log "Backing up database: $db"
|
||||
if mysqldump \
|
||||
-h"${MYSQL_HOST}" -P"${MYSQL_PORT}" -u"${MYSQL_USER}" -p"${MYSQL_PASSWORD}" \
|
||||
--single-transaction --routines --triggers --events \
|
||||
--hex-blob --quick --lock-tables=false \
|
||||
--add-drop-database --databases "$db" \
|
||||
| gzip -c > "$target_dir/${db}.sql.gz"; then
|
||||
log "✅ Successfully backed up $db"
|
||||
else
|
||||
log "❌ Failed to back up $db"
|
||||
fi
|
||||
done
|
||||
|
||||
# Create backup manifest (parity with scripts/backup.sh and backup-hourly.sh)
|
||||
local size; size=$(du -sh "$target_dir" | cut -f1)
|
||||
local mysql_ver; mysql_ver=$(mysql -h"${MYSQL_HOST}" -P"${MYSQL_PORT}" -u"${MYSQL_USER}" -p"${MYSQL_PASSWORD}" -e 'SELECT VERSION();' -s -N 2>/dev/null || echo "unknown")
|
||||
|
||||
if [ "$tier_type" = "hourly" ]; then
|
||||
cat > "$target_dir/manifest.json" <<EOF
|
||||
{
|
||||
"timestamp": "${ts}",
|
||||
"type": "hourly",
|
||||
"databases": [$(printf '"%s",' "${dbs[@]}" | sed 's/,$//')],
|
||||
"backup_size": "${size}",
|
||||
"retention_hours": ${RETENTION_HOURS},
|
||||
"mysql_version": "${mysql_ver}"
|
||||
}
|
||||
EOF
|
||||
else
|
||||
cat > "$target_dir/manifest.json" <<EOF
|
||||
{
|
||||
"timestamp": "${ts}",
|
||||
"type": "daily",
|
||||
"databases": [$(printf '"%s",' "${dbs[@]}" | sed 's/,$//')],
|
||||
"backup_size": "${size}",
|
||||
"retention_days": ${RETENTION_DAYS},
|
||||
"mysql_version": "${mysql_ver}"
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
log "Backup complete: $target_dir (size ${size})"
|
||||
}
|
||||
|
||||
cleanup_old() {
|
||||
find "$HOURLY_DIR" -mindepth 1 -maxdepth 1 -type d -mmin +$((RETENTION_HOURS*60)) -print -exec rm -rf {} + 2>/dev/null || true
|
||||
find "$DAILY_DIR" -mindepth 1 -maxdepth 1 -type d -mtime +$RETENTION_DAYS -print -exec rm -rf {} + 2>/dev/null || true
|
||||
}
|
||||
|
||||
log "Backup scheduler starting: hourly($RETENTION_HOURS h), daily($RETENTION_DAYS d at ${DAILY_TIME}:00)"
|
||||
|
||||
while true; do
|
||||
minute=$(date '+%M')
|
||||
hour=$(date '+%H')
|
||||
|
||||
if [ "$minute" = "00" ]; then
|
||||
run_backup "$HOURLY_DIR" "hourly"
|
||||
fi
|
||||
|
||||
if [ "$hour" = "$DAILY_TIME" ] && [ "$minute" = "00" ]; then
|
||||
run_backup "$DAILY_DIR" "daily"
|
||||
fi
|
||||
|
||||
cleanup_old
|
||||
sleep 60
|
||||
done
|
||||
101
scripts/bash/compose_overrides.sh
Normal file
101
scripts/bash/compose_overrides.sh
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Helper utilities for dynamically including docker compose override files
|
||||
# based on FEATURE_NAME_ENABLED style environment flags.
|
||||
|
||||
compose_overrides::trim() {
|
||||
local value="$1"
|
||||
# shellcheck disable=SC2001
|
||||
value="$(echo "$value" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
|
||||
printf '%s' "$value"
|
||||
}
|
||||
|
||||
compose_overrides::derive_flag_from_name() {
|
||||
local file="$1"
|
||||
local base
|
||||
base="$(basename "$file")"
|
||||
base="${base%.*}"
|
||||
base="${base//[^[:alnum:]]/_}"
|
||||
base="${base^^}"
|
||||
printf 'COMPOSE_OVERRIDE_%s_ENABLED' "$base"
|
||||
}
|
||||
|
||||
compose_overrides::extract_tag() {
|
||||
local file="$1" tag="$2"
|
||||
local line
|
||||
line="$(grep -m1 "^# *${tag}:" "$file" 2>/dev/null || true)"
|
||||
if [ -z "$line" ]; then
|
||||
return 1
|
||||
fi
|
||||
line="${line#*:}"
|
||||
compose_overrides::trim "$line"
|
||||
}
|
||||
|
||||
compose_overrides::extract_all_tags() {
|
||||
local file="$1" tag="$2"
|
||||
grep "^# *${tag}:" "$file" 2>/dev/null | cut -d':' -f2- | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'
|
||||
}
|
||||
|
||||
compose_overrides::read_env_value() {
|
||||
local env_path="$1" key="$2" default="${3:-}"
|
||||
local value=""
|
||||
if [ -f "$env_path" ]; then
|
||||
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
printf '%s' "$value"
|
||||
}
|
||||
|
||||
compose_overrides::list_enabled_files() {
|
||||
local root_dir="$1" env_path="$2" result_var="$3"
|
||||
local overrides_dir="${root_dir}/compose-overrides"
|
||||
local -n __result="$result_var"
|
||||
__result=()
|
||||
|
||||
[ -d "$overrides_dir" ] || return 0
|
||||
|
||||
local -a override_files=()
|
||||
while IFS= read -r -d '' file; do
|
||||
override_files+=("$file")
|
||||
done < <(find "$overrides_dir" -maxdepth 1 -type f \( -name '*.yml' -o -name '*.yaml' \) -print0 | sort -z)
|
||||
|
||||
local file flag flag_value legacy_default legacy_flags legacy_flag
|
||||
for file in "${override_files[@]}"; do
|
||||
flag="$(compose_overrides::extract_tag "$file" "override-flag" || true)"
|
||||
if [ -z "$flag" ]; then
|
||||
flag="$(compose_overrides::derive_flag_from_name "$file")"
|
||||
fi
|
||||
|
||||
legacy_default="0"
|
||||
legacy_flags="$(compose_overrides::extract_all_tags "$file" "legacy-flag" || true)"
|
||||
if [ -n "$legacy_flags" ]; then
|
||||
while IFS= read -r legacy_flag; do
|
||||
[ -z "$legacy_flag" ] && continue
|
||||
legacy_default="$(compose_overrides::read_env_value "$env_path" "$legacy_flag" "$legacy_default")"
|
||||
# Stop at first legacy flag that yields a value
|
||||
if [ -n "$legacy_default" ]; then
|
||||
break
|
||||
fi
|
||||
done <<< "$legacy_flags"
|
||||
fi
|
||||
|
||||
flag_value="$(compose_overrides::read_env_value "$env_path" "$flag" "$legacy_default")"
|
||||
if [ "$flag_value" = "1" ]; then
|
||||
__result+=("$file")
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
compose_overrides::build_compose_args() {
|
||||
local root_dir="$1" env_path="$2" default_compose="$3" result_var="$4"
|
||||
local -n __result="$result_var"
|
||||
__result=(-f "$default_compose")
|
||||
|
||||
local -a enabled_files=()
|
||||
compose_overrides::list_enabled_files "$root_dir" "$env_path" enabled_files
|
||||
for file in "${enabled_files[@]}"; do
|
||||
__result+=(-f "$file")
|
||||
done
|
||||
}
|
||||
162
scripts/bash/configure-server.sh
Executable file
162
scripts/bash/configure-server.sh
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/bin/bash
|
||||
# Simple wrapper script for server configuration management
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
print_header() {
|
||||
echo -e "\n${BLUE}🔧 AzerothCore Configuration Manager${NC}\n"
|
||||
}
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $(basename "$0") [COMMAND] [OPTIONS]
|
||||
|
||||
Commands:
|
||||
apply Apply configuration overrides from config/server-overrides.conf
|
||||
preset <name> Apply a preset configuration
|
||||
list List available presets
|
||||
edit Open server-overrides.conf in editor
|
||||
status Show current configuration status
|
||||
|
||||
Examples:
|
||||
$(basename "$0") apply # Apply custom overrides
|
||||
$(basename "$0") preset fast-leveling # Apply fast-leveling preset
|
||||
$(basename "$0") list # Show available presets
|
||||
$(basename "$0") edit # Edit configuration file
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
edit_config() {
|
||||
local config_file="$PROJECT_DIR/config/server-overrides.conf"
|
||||
local editor="${EDITOR:-nano}"
|
||||
|
||||
echo -e "${YELLOW}📝 Opening configuration file in $editor...${NC}"
|
||||
|
||||
if [[ ! -f "$config_file" ]]; then
|
||||
echo -e "${YELLOW}⚠️ Configuration file doesn't exist. Creating template...${NC}"
|
||||
mkdir -p "$(dirname "$config_file")"
|
||||
# Create a minimal template if it doesn't exist
|
||||
cat > "$config_file" << 'EOF'
|
||||
# AzerothCore Server Configuration Overrides
|
||||
# Edit this file and run './scripts/bash/configure-server.sh apply' to update settings
|
||||
|
||||
[worldserver.conf]
|
||||
# Example settings - uncomment and modify as needed
|
||||
# Rate.XP.Kill = 2.0
|
||||
# Rate.XP.Quest = 2.0
|
||||
# MaxPlayerLevel = 80
|
||||
|
||||
[playerbots.conf]
|
||||
# Example playerbot settings
|
||||
# AiPlayerbot.MinRandomBots = 100
|
||||
# AiPlayerbot.MaxRandomBots = 300
|
||||
EOF
|
||||
echo -e "${GREEN}✅ Created template configuration file${NC}"
|
||||
fi
|
||||
|
||||
"$editor" "$config_file"
|
||||
|
||||
echo -e "\n${YELLOW}Would you like to apply these changes now? (y/N)${NC}"
|
||||
read -r response
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
python3 "$SCRIPT_DIR/apply-config.py"
|
||||
else
|
||||
echo -e "${BLUE}ℹ️ Run '$(basename "$0") apply' when ready to apply changes${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
show_status() {
|
||||
echo -e "${BLUE}📊 Configuration Status${NC}\n"
|
||||
|
||||
# Check if config files exist
|
||||
local storage_path="${STORAGE_PATH:-./storage}"
|
||||
local config_dir="$storage_path/config"
|
||||
|
||||
if [[ -d "$config_dir" ]]; then
|
||||
echo -e "${GREEN}✅ Config directory found: $config_dir${NC}"
|
||||
|
||||
local conf_count
|
||||
conf_count=$(find "$config_dir" -name "*.conf" -type f | wc -l)
|
||||
echo -e "${GREEN}📄 Configuration files: $conf_count${NC}"
|
||||
|
||||
# Show some key files
|
||||
for conf in worldserver.conf authserver.conf playerbots.conf; do
|
||||
if [[ -f "$config_dir/$conf" ]]; then
|
||||
echo -e "${GREEN} ✅ $conf${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} ⚠️ $conf (missing)${NC}"
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo -e "${RED}❌ Config directory not found: $config_dir${NC}"
|
||||
echo -e "${YELLOW}ℹ️ Run './deploy.sh' first to initialize storage${NC}"
|
||||
fi
|
||||
|
||||
# Check override file
|
||||
local override_file="$PROJECT_DIR/config/server-overrides.conf"
|
||||
if [[ -f "$override_file" ]]; then
|
||||
echo -e "${GREEN}✅ Override file: $override_file${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Override file not found${NC}"
|
||||
echo -e "${BLUE}ℹ️ Run '$(basename "$0") edit' to create one${NC}"
|
||||
fi
|
||||
|
||||
# Show available presets
|
||||
echo -e "\n${BLUE}📋 Available Presets:${NC}"
|
||||
python3 "$SCRIPT_DIR/apply-config.py" --list-presets
|
||||
}
|
||||
|
||||
main() {
|
||||
print_header
|
||||
|
||||
case "${1:-}" in
|
||||
"apply")
|
||||
echo -e "${YELLOW}🔄 Applying configuration overrides...${NC}"
|
||||
python3 "$SCRIPT_DIR/apply-config.py" "${@:2}"
|
||||
echo -e "\n${GREEN}✅ Configuration applied!${NC}"
|
||||
echo -e "${YELLOW}ℹ️ Restart your server to apply changes:${NC} docker compose restart"
|
||||
;;
|
||||
"preset")
|
||||
if [[ -z "${2:-}" ]]; then
|
||||
echo -e "${RED}❌ Please specify a preset name${NC}"
|
||||
echo -e "Available presets:"
|
||||
python3 "$SCRIPT_DIR/apply-config.py" --list-presets
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${YELLOW}🎯 Applying preset: $2${NC}"
|
||||
python3 "$SCRIPT_DIR/apply-config.py" --preset "$2" "${@:3}"
|
||||
echo -e "\n${GREEN}✅ Preset '$2' applied!${NC}"
|
||||
echo -e "${YELLOW}ℹ️ Restart your server to apply changes:${NC} docker compose restart"
|
||||
;;
|
||||
"list")
|
||||
python3 "$SCRIPT_DIR/apply-config.py" --list-presets
|
||||
;;
|
||||
"edit")
|
||||
edit_config
|
||||
;;
|
||||
"status")
|
||||
show_status
|
||||
;;
|
||||
"help"|"--help"|"-h"|"")
|
||||
show_usage
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}❌ Unknown command: $1${NC}"
|
||||
show_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
44
scripts/bash/copy-module-configs.sh
Executable file
44
scripts/bash/copy-module-configs.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# Copy module .dist.conf files to .conf files for proper configuration loading
|
||||
# This ensures all module configurations are available and can be customized
|
||||
|
||||
CONFIG_DIR="${STORAGE_PATH:-/nfs/azerothcore}/config"
|
||||
|
||||
echo "Creating module .conf files from .dist.conf templates..."
|
||||
|
||||
cd "$CONFIG_DIR" || {
|
||||
echo "Error: Cannot access config directory: $CONFIG_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Counter for created files
|
||||
created_count=0
|
||||
|
||||
# Process all .dist files except authserver, worldserver, dbimport (already handled)
|
||||
for file in *.dist; do
|
||||
conffile=$(echo "$file" | sed 's/.dist$//')
|
||||
|
||||
# Skip if it's a core config file (already handled)
|
||||
case "$conffile" in
|
||||
authserver.conf|worldserver.conf|dbimport.conf)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
# Create .conf file if it doesn't exist
|
||||
if [ ! -f "$conffile" ]; then
|
||||
echo "Creating $conffile from $file"
|
||||
cp "$file" "$conffile"
|
||||
created_count=$((created_count + 1))
|
||||
else
|
||||
echo "Skipping $conffile (already exists)"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Created $created_count module configuration files"
|
||||
echo "Module configuration files are now ready for customization"
|
||||
|
||||
# List all .conf files for verification
|
||||
echo ""
|
||||
echo "Available configuration files:"
|
||||
ls -1 *.conf | sort
|
||||
324
scripts/bash/db-import-conditional.sh
Executable file
324
scripts/bash/db-import-conditional.sh
Executable file
@@ -0,0 +1,324 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm
|
||||
set -e
|
||||
|
||||
print_help() {
|
||||
cat <<'EOF'
|
||||
Usage: db-import-conditional.sh [options]
|
||||
|
||||
Description:
|
||||
Conditionally restores AzerothCore databases from backups if available;
|
||||
otherwise creates fresh databases and runs the dbimport tool to populate
|
||||
schemas. Uses status markers to prevent overwriting restored data.
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message and exit
|
||||
|
||||
Environment variables:
|
||||
CONTAINER_MYSQL Hostname of the MySQL container (default: ac-mysql)
|
||||
MYSQL_PORT MySQL port (default: 3306)
|
||||
MYSQL_USER MySQL user (default: root)
|
||||
MYSQL_ROOT_PASSWORD MySQL password for the user above
|
||||
DB_AUTH_NAME Auth DB name (default: acore_auth)
|
||||
DB_WORLD_NAME World DB name (default: acore_world)
|
||||
DB_CHARACTERS_NAME Characters DB name (default: acore_characters)
|
||||
BACKUP DIRS Uses /backups/{daily,timestamped} if present
|
||||
STATUS MARKERS Uses /var/lib/mysql-persistent/.restore-*
|
||||
|
||||
Notes:
|
||||
- If a valid backup is detected and successfully restored, schema import is skipped.
|
||||
- On fresh setups, the script creates databases and runs dbimport.
|
||||
EOF
|
||||
}
|
||||
|
||||
case "${1:-}" in
|
||||
-h|--help)
|
||||
print_help
|
||||
exit 0
|
||||
;;
|
||||
"") ;;
|
||||
*)
|
||||
echo "Unknown option: $1" >&2
|
||||
print_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "🔧 Conditional AzerothCore Database Import"
|
||||
echo "========================================"
|
||||
|
||||
# Restoration status markers - use writable location
|
||||
RESTORE_STATUS_DIR="/var/lib/mysql-persistent"
|
||||
MARKER_STATUS_DIR="/tmp"
|
||||
RESTORE_SUCCESS_MARKER="$RESTORE_STATUS_DIR/.restore-completed"
|
||||
RESTORE_FAILED_MARKER="$RESTORE_STATUS_DIR/.restore-failed"
|
||||
RESTORE_SUCCESS_MARKER_TMP="$MARKER_STATUS_DIR/.restore-completed"
|
||||
RESTORE_FAILED_MARKER_TMP="$MARKER_STATUS_DIR/.restore-failed"
|
||||
|
||||
mkdir -p "$RESTORE_STATUS_DIR" 2>/dev/null || true
|
||||
if ! touch "$RESTORE_STATUS_DIR/.test-write" 2>/dev/null; then
|
||||
echo "⚠️ Cannot write to $RESTORE_STATUS_DIR, using $MARKER_STATUS_DIR for markers"
|
||||
RESTORE_SUCCESS_MARKER="$RESTORE_SUCCESS_MARKER_TMP"
|
||||
RESTORE_FAILED_MARKER="$RESTORE_FAILED_MARKER_TMP"
|
||||
else
|
||||
rm -f "$RESTORE_STATUS_DIR/.test-write" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
echo "🔍 Checking restoration status..."
|
||||
|
||||
if [ -f "$RESTORE_SUCCESS_MARKER" ]; then
|
||||
echo "✅ Backup restoration completed successfully"
|
||||
cat "$RESTORE_SUCCESS_MARKER" || true
|
||||
echo "🚫 Skipping database import - data already restored from backup"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -f "$RESTORE_FAILED_MARKER" ]; then
|
||||
echo "ℹ️ No backup was restored - fresh databases detected"
|
||||
cat "$RESTORE_FAILED_MARKER" || true
|
||||
echo "▶️ Proceeding with database import to populate fresh databases"
|
||||
else
|
||||
echo "⚠️ No restoration status found - assuming fresh installation"
|
||||
echo "▶️ Proceeding with database import"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔧 Starting database import process..."
|
||||
|
||||
echo "🔍 Checking for backups to restore..."
|
||||
|
||||
# Define backup search paths in priority order
|
||||
BACKUP_SEARCH_PATHS=(
|
||||
"/backups"
|
||||
"/var/lib/mysql-persistent"
|
||||
"$SCRIPT_DIR/../storage/backups"
|
||||
"$SCRIPT_DIR/../manual-backups"
|
||||
"$SCRIPT_DIR/../ImportBackup"
|
||||
)
|
||||
|
||||
backup_path=""
|
||||
|
||||
echo "🔍 Checking for legacy backup file..."
|
||||
if [ -f "/var/lib/mysql-persistent/backup.sql" ]; then
|
||||
echo "📄 Found legacy backup file, validating content..."
|
||||
if timeout 10 head -10 "/var/lib/mysql-persistent/backup.sql" 2>/dev/null | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||||
echo "✅ Legacy backup file validated"
|
||||
backup_path="/var/lib/mysql-persistent/backup.sql"
|
||||
else
|
||||
echo "⚠️ Legacy backup file exists but appears invalid or empty"
|
||||
fi
|
||||
else
|
||||
echo "🔍 No legacy backup found"
|
||||
fi
|
||||
|
||||
# Search through backup directories
|
||||
if [ -z "$backup_path" ]; then
|
||||
for BACKUP_DIRS in "${BACKUP_SEARCH_PATHS[@]}"; do
|
||||
if [ ! -d "$BACKUP_DIRS" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "📁 Checking backup directory: $BACKUP_DIRS"
|
||||
if [ -n "$(ls -A "$BACKUP_DIRS" 2>/dev/null)" ]; then
|
||||
# Check for daily backups first
|
||||
if [ -d "$BACKUP_DIRS/daily" ]; then
|
||||
echo "🔍 Checking for daily backups..."
|
||||
latest_daily=$(ls -1t "$BACKUP_DIRS/daily" 2>/dev/null | head -n 1)
|
||||
if [ -n "$latest_daily" ] && [ -d "$BACKUP_DIRS/daily/$latest_daily" ]; then
|
||||
echo "📦 Latest daily backup found: $latest_daily"
|
||||
for backup_file in "$BACKUP_DIRS/daily/$latest_daily"/*.sql.gz; do
|
||||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||||
if timeout 10 zcat "$backup_file" 2>/dev/null | head -20 | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||||
echo "✅ Valid daily backup file: $(basename "$backup_file")"
|
||||
backup_path="$BACKUP_DIRS/daily/$latest_daily"
|
||||
break 2
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for hourly backups
|
||||
if [ -z "$backup_path" ] && [ -d "$BACKUP_DIRS/hourly" ]; then
|
||||
echo "🔍 Checking for hourly backups..."
|
||||
latest_hourly=$(ls -1t "$BACKUP_DIRS/hourly" 2>/dev/null | head -n 1)
|
||||
if [ -n "$latest_hourly" ] && [ -d "$BACKUP_DIRS/hourly/$latest_hourly" ]; then
|
||||
echo "📦 Latest hourly backup found: $latest_hourly"
|
||||
for backup_file in "$BACKUP_DIRS/hourly/$latest_hourly"/*.sql.gz; do
|
||||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||||
if timeout 10 zcat "$backup_file" 2>/dev/null | head -20 | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||||
echo "✅ Valid hourly backup file: $(basename "$backup_file")"
|
||||
backup_path="$BACKUP_DIRS/hourly/$latest_hourly"
|
||||
break 2
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for timestamped backup directories (like ExportBackup_YYYYMMDD_HHMMSS)
|
||||
if [ -z "$backup_path" ]; then
|
||||
echo "🔍 Checking for timestamped backup directories..."
|
||||
timestamped_backups=$(ls -1t "$BACKUP_DIRS" 2>/dev/null | grep -E '^(ExportBackup_)?[0-9]{8}_[0-9]{6}$' | head -n 1)
|
||||
if [ -n "$timestamped_backups" ]; then
|
||||
latest_timestamped="$timestamped_backups"
|
||||
echo "📦 Found timestamped backup: $latest_timestamped"
|
||||
if [ -d "$BACKUP_DIRS/$latest_timestamped" ]; then
|
||||
if ls "$BACKUP_DIRS/$latest_timestamped"/*.sql.gz >/dev/null 2>&1; then
|
||||
echo "🔍 Validating timestamped backup content..."
|
||||
for backup_file in "$BACKUP_DIRS/$latest_timestamped"/*.sql.gz; do
|
||||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||||
if timeout 10 zcat "$backup_file" 2>/dev/null | head -20 | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||||
echo "✅ Valid timestamped backup found: $(basename "$backup_file")"
|
||||
backup_path="$BACKUP_DIRS/$latest_timestamped"
|
||||
break 2
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for manual backups (*.sql files)
|
||||
if [ -z "$backup_path" ]; then
|
||||
echo "🔍 Checking for manual backup files..."
|
||||
latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql 2>/dev/null | head -n 1)
|
||||
if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then
|
||||
echo "📦 Found manual backup: $(basename "$latest_manual")"
|
||||
if timeout 10 head -20 "$latest_manual" 2>/dev/null | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||||
echo "✅ Valid manual backup file: $(basename "$latest_manual")"
|
||||
backup_path="$latest_manual"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# If we found a backup in this directory, stop searching
|
||||
if [ -n "$backup_path" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "🔄 Final backup path result: '$backup_path'"
|
||||
if [ -n "$backup_path" ]; then
|
||||
echo "📦 Found backup: $(basename "$backup_path")"
|
||||
|
||||
restore_backup() {
|
||||
local backup_path="$1"
|
||||
local restore_success=true
|
||||
|
||||
if [ -d "$backup_path" ]; then
|
||||
echo "🔄 Restoring from backup directory: $backup_path"
|
||||
|
||||
# Check for manifest file to understand backup structure
|
||||
if [ -f "$backup_path/manifest.json" ]; then
|
||||
echo "📋 Found manifest file, checking backup contents..."
|
||||
cat "$backup_path/manifest.json"
|
||||
fi
|
||||
|
||||
# Restore compressed SQL files
|
||||
if ls "$backup_path"/*.sql.gz >/dev/null 2>&1; then
|
||||
for backup_file in "$backup_path"/*.sql.gz; do
|
||||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||||
echo "🔄 Restoring $(basename "$backup_file")..."
|
||||
if timeout 300 zcat "$backup_file" | mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD}; then
|
||||
echo "✅ Restored $(basename "$backup_file")"
|
||||
else
|
||||
echo "❌ Failed to restore $(basename "$backup_file")"
|
||||
restore_success=false
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Also check for uncompressed SQL files
|
||||
if ls "$backup_path"/*.sql >/dev/null 2>&1; then
|
||||
for backup_file in "$backup_path"/*.sql; do
|
||||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||||
echo "🔄 Restoring $(basename "$backup_file")..."
|
||||
if timeout 300 mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} < "$backup_file"; then
|
||||
echo "✅ Restored $(basename "$backup_file")"
|
||||
else
|
||||
echo "❌ Failed to restore $(basename "$backup_file")"
|
||||
restore_success=false
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
elif [ -f "$backup_path" ]; then
|
||||
echo "🔄 Restoring from backup file: $backup_path"
|
||||
case "$backup_path" in
|
||||
*.gz)
|
||||
if timeout 300 zcat "$backup_path" | mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD}; then
|
||||
echo "✅ Restored compressed backup"
|
||||
else
|
||||
echo "❌ Failed to restore compressed backup"
|
||||
restore_success=false
|
||||
fi
|
||||
;;
|
||||
*.sql)
|
||||
if timeout 300 mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} < "$backup_path"; then
|
||||
echo "✅ Restored SQL backup"
|
||||
else
|
||||
echo "❌ Failed to restore SQL backup"
|
||||
restore_success=false
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "⚠️ Unknown backup file format: $backup_path"
|
||||
restore_success=false
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
||||
}
|
||||
|
||||
if restore_backup "$backup_path"; then
|
||||
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
||||
echo "🎉 Backup restoration completed successfully!"
|
||||
exit 0
|
||||
else
|
||||
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
||||
echo "⚠️ Backup restoration failed, will proceed with fresh database setup"
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ No valid backups found - proceeding with fresh setup"
|
||||
echo "$(date): No backup found - fresh setup needed" > "$RESTORE_FAILED_MARKER"
|
||||
fi
|
||||
|
||||
echo "🗄️ Creating fresh AzerothCore databases..."
|
||||
mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e "
|
||||
CREATE DATABASE IF NOT EXISTS ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
CREATE DATABASE IF NOT EXISTS ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
CREATE DATABASE IF NOT EXISTS ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
|
||||
echo "✅ Fresh databases created - proceeding with schema import"
|
||||
|
||||
echo "📝 Creating dbimport configuration..."
|
||||
mkdir -p /azerothcore/env/dist/etc
|
||||
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
||||
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||
Updates.EnableDatabases = 7
|
||||
Updates.AutoSetup = 1
|
||||
EOF
|
||||
|
||||
echo "🚀 Running database import..."
|
||||
cd /azerothcore/env/dist/bin
|
||||
if ./dbimport; then
|
||||
echo "✅ Database import completed successfully!"
|
||||
echo "$(date): Database import completed successfully" > "$RESTORE_STATUS_DIR/.import-completed" || echo "$(date): Database import completed successfully" > "$MARKER_STATUS_DIR/.import-completed"
|
||||
else
|
||||
echo "❌ Database import failed!"
|
||||
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed" || echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🎉 Database import process complete!"
|
||||
121
scripts/bash/deploy-tools.sh
Executable file
121
scripts/bash/deploy-tools.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
|
||||
# azerothcore-rm helper to deploy phpMyAdmin and Keira3 tooling.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
|
||||
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
|
||||
ENV_FILE="$ROOT_DIR/.env"
|
||||
TEMPLATE_FILE="$ROOT_DIR/.env.template"
|
||||
source "$ROOT_DIR/scripts/bash/project_name.sh"
|
||||
|
||||
# Default project name (read from .env or template)
|
||||
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
||||
source "$ROOT_DIR/scripts/bash/compose_overrides.sh"
|
||||
declare -a COMPOSE_FILE_ARGS=()
|
||||
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
info(){ echo -e "${BLUE}ℹ️ $*${NC}"; }
|
||||
ok(){ echo -e "${GREEN}✅ $*${NC}"; }
|
||||
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
|
||||
err(){ echo -e "${RED}❌ $*${NC}"; }
|
||||
|
||||
read_env(){
|
||||
local key="$1" default="${2:-}" value=""
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
value="$(grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
resolve_project_name(){
|
||||
local raw_name sanitized
|
||||
raw_name="$(read_env COMPOSE_PROJECT_NAME "$DEFAULT_PROJECT_NAME")"
|
||||
project_name::sanitize "$raw_name"
|
||||
}
|
||||
|
||||
init_compose_files(){
|
||||
compose_overrides::build_compose_args "$ROOT_DIR" "$ENV_FILE" "$DEFAULT_COMPOSE_FILE" COMPOSE_FILE_ARGS
|
||||
}
|
||||
|
||||
init_compose_files
|
||||
|
||||
compose(){
|
||||
docker compose --project-name "$PROJECT_NAME" "${COMPOSE_FILE_ARGS[@]}" "$@"
|
||||
}
|
||||
|
||||
show_header(){
|
||||
echo -e "\n${BLUE} 🛠️ TOOLING DEPLOYMENT 🛠️${NC}"
|
||||
echo -e "${BLUE} ═══════════════════════════${NC}"
|
||||
echo -e "${BLUE} 📊 Enabling Management UIs 📊${NC}\n"
|
||||
}
|
||||
|
||||
ensure_command(){
|
||||
if ! command -v "$1" >/dev/null 2>&1; then
|
||||
err "Required command '$1' not found in PATH."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_mysql_running(){
|
||||
local mysql_service="ac-mysql"
|
||||
local mysql_container
|
||||
mysql_container="$(read_env CONTAINER_MYSQL "ac-mysql")"
|
||||
if docker ps --format '{{.Names}}' | grep -qx "$mysql_container"; then
|
||||
info "MySQL container '$mysql_container' already running."
|
||||
return
|
||||
fi
|
||||
info "Starting database service '$mysql_service'..."
|
||||
compose --profile db up -d "$mysql_service" >/dev/null
|
||||
ok "Database service ready."
|
||||
}
|
||||
|
||||
start_tools(){
|
||||
info "Starting phpMyAdmin and Keira3..."
|
||||
compose --profile tools up --detach --quiet-pull >/dev/null
|
||||
ok "Tooling services are online."
|
||||
}
|
||||
|
||||
show_endpoints(){
|
||||
local pma_port keira_port
|
||||
pma_port="$(read_env PMA_EXTERNAL_PORT 8081)"
|
||||
keira_port="$(read_env KEIRA3_EXTERNAL_PORT 4201)"
|
||||
echo ""
|
||||
echo -e "${GREEN}Accessible endpoints:${NC}"
|
||||
echo " • phpMyAdmin : http://localhost:${pma_port}"
|
||||
echo " • Keira3 : http://localhost:${keira_port}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
main(){
|
||||
if [[ "${1:-}" == "--help" ]]; then
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0")
|
||||
|
||||
Ensures the database service is running and launches the tooling profile
|
||||
containing phpMyAdmin and Keira3 dashboards.
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ensure_command docker
|
||||
docker info >/dev/null 2>&1 || { err "Docker daemon unavailable."; exit 1; }
|
||||
|
||||
PROJECT_NAME="$(resolve_project_name)"
|
||||
|
||||
show_header
|
||||
ensure_mysql_running
|
||||
start_tools
|
||||
show_endpoints
|
||||
}
|
||||
|
||||
main "$@"
|
||||
85
scripts/bash/detect-client-data-version.sh
Executable file
85
scripts/bash/detect-client-data-version.sh
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Detect which wowgaming/client-data release an AzerothCore checkout expects.
|
||||
# Currently inspects apps/installer/includes/functions.sh for the
|
||||
# inst_download_client_data version marker, but can be extended with new
|
||||
# heuristics if needed.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
print_usage() {
|
||||
cat <<'EOF'
|
||||
Usage: scripts/bash/detect-client-data-version.sh [--no-header] <repo-path> [...]
|
||||
|
||||
Outputs a tab-separated list of repository path, raw version token found in the
|
||||
source tree, and a normalized CLIENT_DATA_VERSION (e.g., v18.0).
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ "${1:-}" == "--help" ]]; then
|
||||
print_usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
show_header=1
|
||||
if [[ "${1:-}" == "--no-header" ]]; then
|
||||
show_header=0
|
||||
shift
|
||||
fi
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
print_usage >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
normalize_version() {
|
||||
local token="$1"
|
||||
token="${token//$'\r'/}"
|
||||
token="${token//\"/}"
|
||||
token="${token//\'/}"
|
||||
token="${token// /}"
|
||||
token="${token%%#*}"
|
||||
token="${token%%;*}"
|
||||
token="${token%%\)*}"
|
||||
token="${token%%\}*}"
|
||||
echo "$token"
|
||||
}
|
||||
|
||||
detect_from_installer() {
|
||||
local repo_path="$1"
|
||||
local installer_file="$repo_path/apps/installer/includes/functions.sh"
|
||||
[[ -f "$installer_file" ]] || return 1
|
||||
local raw
|
||||
raw="$(grep -E 'local[[:space:]]+VERSION=' "$installer_file" | head -n1 | cut -d'=' -f2-)"
|
||||
[[ -n "$raw" ]] || return 1
|
||||
echo "$raw"
|
||||
}
|
||||
|
||||
detect_version() {
|
||||
local repo_path="$1"
|
||||
if [[ ! -d "$repo_path" ]]; then
|
||||
printf '%s\t%s\t%s\n' "$repo_path" "<missing>" "<unknown>"
|
||||
return
|
||||
fi
|
||||
|
||||
local raw=""
|
||||
if raw="$(detect_from_installer "$repo_path")"; then
|
||||
:
|
||||
elif [[ -f "$repo_path/.env" ]]; then
|
||||
raw="$(grep -E '^CLIENT_DATA_VERSION=' "$repo_path/.env" | head -n1 | cut -d'=' -f2-)"
|
||||
fi
|
||||
|
||||
if [[ -z "$raw" ]]; then
|
||||
printf '%s\t%s\t%s\n' "$repo_path" "<unknown>" "<unknown>"
|
||||
return
|
||||
fi
|
||||
|
||||
local normalized
|
||||
normalized="$(normalize_version "$raw")"
|
||||
printf '%s\t%s\t%s\n' "$repo_path" "$raw" "$normalized"
|
||||
}
|
||||
|
||||
[[ "$show_header" -eq 0 ]] || printf 'repo\traw\tclient_data_version\n'
|
||||
for repo in "$@"; do
|
||||
detect_version "$repo"
|
||||
done
|
||||
202
scripts/bash/download-client-data.sh
Executable file
202
scripts/bash/download-client-data.sh
Executable file
@@ -0,0 +1,202 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm
|
||||
set -e
|
||||
|
||||
echo '🚀 Starting AzerothCore game data setup...'
|
||||
|
||||
# Get the latest release info from wowgaming/client-data
|
||||
REQUESTED_TAG="${CLIENT_DATA_VERSION:-}"
|
||||
if [ -n "$REQUESTED_TAG" ]; then
|
||||
echo "📌 Using requested client data version: $REQUESTED_TAG"
|
||||
LATEST_TAG="$REQUESTED_TAG"
|
||||
LATEST_URL="https://github.com/wowgaming/client-data/releases/download/${REQUESTED_TAG}/data.zip"
|
||||
else
|
||||
echo '📡 Fetching latest client data release info...'
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
RELEASE_INFO=$(curl -sL https://api.github.com/repos/wowgaming/client-data/releases/latest 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
RELEASE_INFO=$(wget -qO- https://api.github.com/repos/wowgaming/client-data/releases/latest 2>/dev/null)
|
||||
else
|
||||
echo '❌ No download tool available to fetch release info (need curl or wget)'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$RELEASE_INFO" ]; then
|
||||
LATEST_URL=$(echo "$RELEASE_INFO" | grep '"browser_download_url":' | grep '\.zip' | cut -d'"' -f4 | head -1)
|
||||
LATEST_TAG=$(echo "$RELEASE_INFO" | grep '"tag_name":' | cut -d'"' -f4)
|
||||
LATEST_SIZE=$(echo "$RELEASE_INFO" | grep '"size":' | head -1 | grep -o '[0-9]*')
|
||||
fi
|
||||
|
||||
if [ -z "$LATEST_URL" ]; then
|
||||
echo '❌ Could not fetch client-data release information. Aborting.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "📍 Latest release: $LATEST_TAG"
|
||||
echo "📥 Download URL: $LATEST_URL"
|
||||
|
||||
# Cache file paths
|
||||
CACHE_DIR="/cache"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
CACHE_FILE="${CACHE_DIR}/client-data-${LATEST_TAG}.zip"
|
||||
TMP_FILE="${CACHE_FILE}.tmp"
|
||||
VERSION_FILE="${CACHE_DIR}/client-data-version.txt"
|
||||
|
||||
# Check if we have a cached version
|
||||
if [ -f "$CACHE_FILE" ] && [ -f "$VERSION_FILE" ]; then
|
||||
CACHED_VERSION=$(cat "$VERSION_FILE" 2>/dev/null)
|
||||
if [ "$CACHED_VERSION" = "$LATEST_TAG" ]; then
|
||||
echo "✅ Found cached client data version $LATEST_TAG"
|
||||
echo "📊 Cached file size: $(ls -lh "$CACHE_FILE" | awk '{print $5}')"
|
||||
|
||||
# Verify cache file integrity
|
||||
echo "🔍 Verifying cached file integrity..."
|
||||
CACHE_INTEGRITY_OK=false
|
||||
|
||||
if command -v 7z >/dev/null 2>&1; then
|
||||
if 7z t "$CACHE_FILE" >/dev/null 2>&1; then
|
||||
CACHE_INTEGRITY_OK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$CACHE_INTEGRITY_OK" = "false" ]; then
|
||||
if unzip -t "$CACHE_FILE" > /dev/null 2>&1; then
|
||||
CACHE_INTEGRITY_OK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$CACHE_INTEGRITY_OK" = "true" ]; then
|
||||
echo "✅ Cache file integrity verified"
|
||||
echo "⚡ Using cached download - skipping download phase"
|
||||
cp "$CACHE_FILE" data.zip
|
||||
else
|
||||
echo "⚠️ Cache file corrupted, will re-download"
|
||||
rm -f "$CACHE_FILE" "$VERSION_FILE"
|
||||
fi
|
||||
else
|
||||
echo "📦 Cache version ($CACHED_VERSION) differs from latest ($LATEST_TAG)"
|
||||
echo "🗑️ Removing old cache"
|
||||
rm -f "${CACHE_DIR}"/client-data-*.zip "$VERSION_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Download if we don't have a valid cached file
|
||||
if [ ! -f "data.zip" ]; then
|
||||
echo "📥 Downloading client data (~15GB)..."
|
||||
echo "📍 Source: $LATEST_URL"
|
||||
|
||||
if command -v aria2c >/dev/null 2>&1; then
|
||||
aria2c --max-connection-per-server=8 --split=8 --min-split-size=10M \
|
||||
--summary-interval=5 --download-result=hide \
|
||||
--console-log-level=warn --show-console-readout=false \
|
||||
--dir "$CACHE_DIR" -o "$(basename "$TMP_FILE")" "$LATEST_URL" || {
|
||||
echo '⚠️ aria2c failed, falling back to curl...'
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -L --progress-bar -o "$TMP_FILE" "$LATEST_URL" || {
|
||||
echo '❌ curl failed, trying wget...'
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=dot:giga -O "$TMP_FILE" "$LATEST_URL" || {
|
||||
echo '❌ All download methods failed'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo '❌ wget not available, all download methods failed'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=dot:giga -O "$TMP_FILE" "$LATEST_URL" || {
|
||||
echo '❌ All download methods failed'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo '❌ No fallback download method available'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
else
|
||||
# Try curl first since it's more commonly available in minimal containers
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
echo "📥 Using curl (aria2c not available)..."
|
||||
curl -L --progress-bar -o "$TMP_FILE" "$LATEST_URL" || {
|
||||
echo '❌ curl failed, trying wget...'
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=dot:giga -O "$TMP_FILE" "$LATEST_URL" 2>&1 | sed 's/^/📊 /' || {
|
||||
echo '❌ All download methods failed'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo '❌ wget not available, all download methods failed'
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
echo "📥 Using wget (aria2c and curl not available)..."
|
||||
wget --progress=dot:giga -O "$TMP_FILE" "$LATEST_URL" 2>&1 | sed 's/^/📊 /' || {
|
||||
echo '❌ wget failed, no other download methods available'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
echo '❌ No download tool available (tried aria2c, curl, wget)'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔍 Verifying download integrity..."
|
||||
INTEGRITY_OK=false
|
||||
|
||||
if command -v 7z >/dev/null 2>&1; then
|
||||
if 7z t "$TMP_FILE" >/dev/null 2>&1; then
|
||||
INTEGRITY_OK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$INTEGRITY_OK" = "false" ]; then
|
||||
if unzip -t "$TMP_FILE" > /dev/null 2>&1; then
|
||||
INTEGRITY_OK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$INTEGRITY_OK" = "true" ]; then
|
||||
mv "$TMP_FILE" "$CACHE_FILE"
|
||||
echo "$LATEST_TAG" > "$VERSION_FILE"
|
||||
echo '✅ Download completed and verified'
|
||||
echo "📊 File size: $(ls -lh "$CACHE_FILE" | awk '{print $5}')"
|
||||
cp "$CACHE_FILE" data.zip
|
||||
else
|
||||
echo '❌ Downloaded file is corrupted'
|
||||
rm -f "$TMP_FILE"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo '📂 Extracting client data (this may take some minutes)...'
|
||||
rm -rf /azerothcore/data/maps /azerothcore/data/vmaps /azerothcore/data/mmaps /azerothcore/data/dbc
|
||||
|
||||
if command -v 7z >/dev/null 2>&1; then
|
||||
7z x -aoa -o/azerothcore/data/ data.zip >/dev/null 2>&1
|
||||
else
|
||||
unzip -o -q data.zip -d /azerothcore/data/
|
||||
fi
|
||||
|
||||
rm -f data.zip
|
||||
|
||||
echo '✅ Client data extraction complete!'
|
||||
for dir in maps vmaps mmaps dbc; do
|
||||
if [ -d "/azerothcore/data/$dir" ] && [ -n "$(ls -A /azerothcore/data/$dir 2>/dev/null)" ]; then
|
||||
DIR_SIZE=$(du -sh /azerothcore/data/$dir 2>/dev/null | cut -f1)
|
||||
echo "✅ $dir directory: OK ($DIR_SIZE)"
|
||||
else
|
||||
echo "❌ $dir directory: MISSING or EMPTY"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '🎉 Game data setup complete! AzerothCore worldserver can now start.'
|
||||
68
scripts/bash/import-database-files.sh
Executable file
68
scripts/bash/import-database-files.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
# Copy user database files from database-import/ to backup system
|
||||
set -e
|
||||
|
||||
# Source environment variables
|
||||
if [ -f ".env" ]; then
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
fi
|
||||
|
||||
IMPORT_DIR="./database-import"
|
||||
STORAGE_PATH="${STORAGE_PATH:-./storage}"
|
||||
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
|
||||
BACKUP_DIR="${STORAGE_PATH}/backups/daily"
|
||||
TIMESTAMP=$(date +%Y-%m-%d)
|
||||
|
||||
# Exit if no import directory or empty
|
||||
if [ ! -d "$IMPORT_DIR" ] || [ -z "$(ls -A "$IMPORT_DIR" 2>/dev/null | grep -E '\.(sql|sql\.gz)$')" ]; then
|
||||
echo "📁 No database files found in $IMPORT_DIR - skipping import"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Exit if backup system already has databases restored
|
||||
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
|
||||
echo "✅ Database already restored - skipping import"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "📥 Found database files in $IMPORT_DIR"
|
||||
echo "📂 Copying to backup system for import..."
|
||||
|
||||
# Ensure backup directory exists
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Copy files with smart naming
|
||||
for file in "$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz; do
|
||||
[ -f "$file" ] || continue
|
||||
|
||||
filename=$(basename "$file")
|
||||
|
||||
# Try to detect database type by filename
|
||||
if echo "$filename" | grep -qi "auth"; then
|
||||
target_name="acore_auth_${TIMESTAMP}.sql"
|
||||
elif echo "$filename" | grep -qi "world"; then
|
||||
target_name="acore_world_${TIMESTAMP}.sql"
|
||||
elif echo "$filename" | grep -qi "char"; then
|
||||
target_name="acore_characters_${TIMESTAMP}.sql"
|
||||
else
|
||||
# Fallback - use original name with timestamp
|
||||
base_name="${filename%.*}"
|
||||
ext="${filename##*.}"
|
||||
target_name="${base_name}_${TIMESTAMP}.${ext}"
|
||||
fi
|
||||
|
||||
# Add .gz extension if source is compressed
|
||||
if [[ "$filename" == *.sql.gz ]]; then
|
||||
target_name="${target_name}.gz"
|
||||
fi
|
||||
|
||||
target_path="$BACKUP_DIR/$target_name"
|
||||
|
||||
echo "📋 Copying $filename → $target_name"
|
||||
cp "$file" "$target_path"
|
||||
done
|
||||
|
||||
echo "✅ Database files copied to backup system"
|
||||
echo "💡 Files will be automatically imported during deployment"
|
||||
379
scripts/bash/manage-modules-sql.sh
Executable file
379
scripts/bash/manage-modules-sql.sh
Executable file
@@ -0,0 +1,379 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm
|
||||
set -e
|
||||
trap 'echo " ❌ SQL helper error (line ${LINENO}): ${BASH_COMMAND}" >&2' ERR
|
||||
|
||||
CUSTOM_SQL_ROOT="/tmp/scripts/sql/custom"
|
||||
ALT_CUSTOM_SQL_ROOT="/scripts/sql/custom"
|
||||
HELPER_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
SQL_SUCCESS_LOG=()
|
||||
SQL_FAILURE_LOG=()
|
||||
TEMP_SQL_FILES=()
|
||||
|
||||
render_sql_file_for_execution(){
|
||||
local src="$1"
|
||||
local pb_db="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
local rendered="$src"
|
||||
|
||||
if command -v python3 >/dev/null 2>&1; then
|
||||
local temp
|
||||
temp="$(mktemp)"
|
||||
local result
|
||||
result="$(python3 - "$src" "$temp" "$pb_db" <<'PY'
|
||||
import sys, pathlib, re
|
||||
src, dest, pb_db = sys.argv[1:]
|
||||
text = pathlib.Path(src).read_text()
|
||||
original = text
|
||||
text = text.replace("{{PLAYERBOTS_DB}}", pb_db)
|
||||
pattern = re.compile(r'(?<![.`])\bplayerbots\b')
|
||||
text = pattern.sub(f'`{pb_db}`.playerbots', text)
|
||||
pathlib.Path(dest).write_text(text)
|
||||
print("changed" if text != original else "unchanged", end="")
|
||||
PY
|
||||
)"
|
||||
if [ "$result" = "changed" ]; then
|
||||
rendered="$temp"
|
||||
TEMP_SQL_FILES+=("$temp")
|
||||
else
|
||||
rm -f "$temp"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$rendered"
|
||||
}
|
||||
|
||||
log_sql_success(){
|
||||
local target_db="$1"
|
||||
local sql_file="$2"
|
||||
SQL_SUCCESS_LOG+=("${target_db}::${sql_file}")
|
||||
}
|
||||
|
||||
log_sql_failure(){
|
||||
local target_db="$1"
|
||||
local sql_file="$2"
|
||||
SQL_FAILURE_LOG+=("${target_db}::${sql_file}")
|
||||
}
|
||||
|
||||
mysql_exec(){
|
||||
local mysql_port="${MYSQL_PORT:-3306}"
|
||||
if command -v mariadb >/dev/null 2>&1; then
|
||||
mariadb --ssl=false -h "${CONTAINER_MYSQL}" -P "$mysql_port" -u root -p"${MYSQL_ROOT_PASSWORD}" "$@"
|
||||
return
|
||||
fi
|
||||
if command -v mysql >/dev/null 2>&1; then
|
||||
mysql --ssl-mode=DISABLED -h "${CONTAINER_MYSQL}" -P "$mysql_port" -u root -p"${MYSQL_ROOT_PASSWORD}" "$@"
|
||||
return
|
||||
fi
|
||||
echo " ❌ Neither mariadb nor mysql client is available for SQL execution" >&2
|
||||
return 127
|
||||
}
|
||||
|
||||
playerbots_table_exists(){
|
||||
local pb_db="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
local count
|
||||
count="$(mysql_exec -N -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='${pb_db}' AND table_name='playerbots';" 2>/dev/null || echo 0)"
|
||||
[ "${count}" != "0" ]
|
||||
}
|
||||
|
||||
run_custom_sql_group(){
|
||||
local subdir="$1" target_db="$2" label="$3"
|
||||
local dir="${CUSTOM_SQL_ROOT}/${subdir}"
|
||||
if [ ! -d "$dir" ] && [ -d "${ALT_CUSTOM_SQL_ROOT}/${subdir}" ]; then
|
||||
dir="${ALT_CUSTOM_SQL_ROOT}/${subdir}"
|
||||
fi
|
||||
[ -d "$dir" ] || return 0
|
||||
while IFS= read -r sql_file; do
|
||||
local base_name
|
||||
base_name="$(basename "$sql_file")"
|
||||
local rendered
|
||||
rendered="$(render_sql_file_for_execution "$sql_file")"
|
||||
if grep -q '\bplayerbots\b' "$rendered"; then
|
||||
if ! playerbots_table_exists; then
|
||||
echo " Skipping ${label}: ${base_name} (playerbots table missing)"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
echo " Executing ${label}: ${base_name}"
|
||||
local sql_output
|
||||
sql_output="$(mktemp)"
|
||||
if mysql_exec "${target_db}" < "$rendered" >"$sql_output" 2>&1; then
|
||||
echo " ✅ Successfully executed ${base_name}"
|
||||
log_sql_success "$target_db" "$sql_file"
|
||||
else
|
||||
echo " ❌ Failed to execute $sql_file"
|
||||
sed 's/^/ /' "$sql_output"
|
||||
log_sql_failure "$target_db" "$sql_file"
|
||||
fi
|
||||
rm -f "$sql_output"
|
||||
done < <(LC_ALL=C find "$dir" -type f -name "*.sql" | sort) || true
|
||||
}
|
||||
|
||||
ensure_module_metadata(){
|
||||
if declare -p MODULE_NAME >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local -a module_py_candidates=(
|
||||
"${MODULE_HELPER:-}"
|
||||
"${HELPER_DIR%/*}/modules.py"
|
||||
"/tmp/scripts/python/modules.py"
|
||||
"/scripts/python/modules.py"
|
||||
)
|
||||
|
||||
local module_py=""
|
||||
for candidate in "${module_py_candidates[@]}"; do
|
||||
[ -n "$candidate" ] || continue
|
||||
if [ -f "$candidate" ]; then
|
||||
module_py="$candidate"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
local manifest_path="${MANIFEST_PATH:-${MODULES_MANIFEST_PATH:-/tmp/config/module-manifest.json}}"
|
||||
local env_path="${ENV_PATH:-${MODULES_ENV_PATH:-/tmp/.env}}"
|
||||
local state_env_candidate="${STATE_DIR:-${MODULES_ROOT:-/modules}}/modules.env"
|
||||
if [ -f "$state_env_candidate" ]; then
|
||||
env_path="$state_env_candidate"
|
||||
fi
|
||||
|
||||
if [ -z "$module_py" ]; then
|
||||
echo " ⚠️ Module metadata helper missing; skipping module SQL execution."
|
||||
return 1
|
||||
fi
|
||||
if [ ! -f "$manifest_path" ] || [ ! -f "$env_path" ]; then
|
||||
echo " ⚠️ Module manifest (${manifest_path}) or env (${env_path}) not found; skipping module SQL execution."
|
||||
return 1
|
||||
fi
|
||||
|
||||
local shell_dump
|
||||
echo " ℹ️ Reloading module metadata using ${module_py} (env=${env_path}, manifest=${manifest_path})"
|
||||
if ! shell_dump="$(python3 "$module_py" --env-path "$env_path" --manifest "$manifest_path" dump --format shell 2>/dev/null)"; then
|
||||
echo " ⚠️ Unable to regenerate module metadata from ${module_py}; skipping module SQL execution."
|
||||
return 1
|
||||
fi
|
||||
shell_dump="$(echo "$shell_dump" | sed 's/^declare -A /declare -gA /')"
|
||||
eval "$shell_dump"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to execute SQL files for a module
|
||||
module_sql_run_module(){
|
||||
local module_key="$1"
|
||||
local module_dir_path="$2"
|
||||
local module_dir_name="${3:-}"
|
||||
local module_name="${MODULE_NAME[$module_key]:-}"
|
||||
if [ -z "$module_name" ]; then
|
||||
if [ -n "$module_dir_name" ]; then
|
||||
module_name="$module_dir_name"
|
||||
else
|
||||
module_name="$(basename "$module_dir_path")"
|
||||
fi
|
||||
fi
|
||||
local world_db="${DB_WORLD_NAME:-acore_world}"
|
||||
local auth_db="${DB_AUTH_NAME:-acore_auth}"
|
||||
local characters_db="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
local playerbots_db="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
local character_set="${MYSQL_CHARACTER_SET:-utf8mb4}"
|
||||
local collation="${MYSQL_COLLATION:-utf8mb4_unicode_ci}"
|
||||
execute_sql_file_in_db(){
|
||||
local target_db="$1"
|
||||
local sql_file="$2"
|
||||
local label="$3"
|
||||
local rendered
|
||||
rendered="$(render_sql_file_for_execution "$sql_file")"
|
||||
|
||||
if grep -q '\bplayerbots\b' "$rendered"; then
|
||||
if ! playerbots_table_exists; then
|
||||
echo " Skipping ${label}: ${base_name} (playerbots table missing)"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
local base_name
|
||||
base_name="$(basename "$sql_file")"
|
||||
echo " Executing ${label}: ${base_name}"
|
||||
local sql_output
|
||||
sql_output="$(mktemp)"
|
||||
if mysql_exec "${target_db}" < "$rendered" >"$sql_output" 2>&1; then
|
||||
echo " ✅ Successfully executed ${base_name}"
|
||||
log_sql_success "$target_db" "$sql_file"
|
||||
else
|
||||
echo " ❌ Failed to execute $sql_file"
|
||||
sed 's/^/ /' "$sql_output"
|
||||
log_sql_failure "$target_db" "$sql_file"
|
||||
fi
|
||||
rm -f "$sql_output"
|
||||
}
|
||||
|
||||
local run_sorted_sql
|
||||
|
||||
run_sorted_sql() {
|
||||
local dir="$1"
|
||||
local target_db="$2"
|
||||
local label="$3"
|
||||
local skip_regex="${4:-}"
|
||||
[ -d "$dir" ] || return
|
||||
while IFS= read -r sql_file; do
|
||||
local base_name
|
||||
base_name="$(basename "$sql_file")"
|
||||
if [ -n "$skip_regex" ] && [[ "$base_name" =~ $skip_regex ]]; then
|
||||
echo " Skipping ${label}: ${base_name}"
|
||||
continue
|
||||
fi
|
||||
execute_sql_file_in_db "$target_db" "$sql_file" "$label"
|
||||
done < <(LC_ALL=C find "$dir" -type f -name "*.sql" | sort) || true
|
||||
}
|
||||
|
||||
echo "Processing SQL scripts for $module_name..."
|
||||
|
||||
if [ "$module_key" = "MODULE_PLAYERBOTS" ]; then
|
||||
echo " Ensuring database ${playerbots_db} exists..."
|
||||
if mysql_exec -e "CREATE DATABASE IF NOT EXISTS \`${playerbots_db}\` CHARACTER SET ${character_set} COLLATE ${collation};" >/dev/null 2>&1; then
|
||||
echo " ✅ Playerbots database ready"
|
||||
else
|
||||
echo " ❌ Failed to ensure playerbots database"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Find and execute SQL files in the module
|
||||
if [ -d "$module_dir_path/data/sql" ]; then
|
||||
# Execute world database scripts
|
||||
if [ -d "$module_dir_path/data/sql/world" ]; then
|
||||
while IFS= read -r sql_file; do
|
||||
execute_sql_file_in_db "$world_db" "$sql_file" "world SQL"
|
||||
done < <(find "$module_dir_path/data/sql/world" -type f -name "*.sql") || true
|
||||
fi
|
||||
run_sorted_sql "$module_dir_path/data/sql/db-world" "${world_db}" "world SQL"
|
||||
|
||||
# Execute auth database scripts
|
||||
if [ -d "$module_dir_path/data/sql/auth" ]; then
|
||||
while IFS= read -r sql_file; do
|
||||
execute_sql_file_in_db "$auth_db" "$sql_file" "auth SQL"
|
||||
done < <(find "$module_dir_path/data/sql/auth" -type f -name "*.sql") || true
|
||||
fi
|
||||
run_sorted_sql "$module_dir_path/data/sql/db-auth" "${auth_db}" "auth SQL"
|
||||
|
||||
# Execute character database scripts
|
||||
if [ -d "$module_dir_path/data/sql/characters" ]; then
|
||||
while IFS= read -r sql_file; do
|
||||
execute_sql_file_in_db "$characters_db" "$sql_file" "characters SQL"
|
||||
done < <(find "$module_dir_path/data/sql/characters" -type f -name "*.sql") || true
|
||||
fi
|
||||
run_sorted_sql "$module_dir_path/data/sql/db-characters" "${characters_db}" "characters SQL"
|
||||
|
||||
# Execute playerbots database scripts
|
||||
if [ "$module_key" = "MODULE_PLAYERBOTS" ] && [ -d "$module_dir_path/data/sql/playerbots" ]; then
|
||||
local pb_root="$module_dir_path/data/sql/playerbots"
|
||||
run_sorted_sql "$pb_root/base" "$playerbots_db" "playerbots SQL"
|
||||
run_sorted_sql "$pb_root/custom" "$playerbots_db" "playerbots SQL"
|
||||
run_sorted_sql "$pb_root/updates" "$playerbots_db" "playerbots SQL"
|
||||
run_sorted_sql "$pb_root/archive" "$playerbots_db" "playerbots SQL"
|
||||
echo " Skipping playerbots create scripts (handled by automation)"
|
||||
fi
|
||||
|
||||
# Execute base SQL files (common pattern)
|
||||
while IFS= read -r sql_file; do
|
||||
execute_sql_file_in_db "$world_db" "$sql_file" "base SQL"
|
||||
done < <(find "$module_dir_path/data/sql" -maxdepth 1 -type f -name "*.sql") || true
|
||||
fi
|
||||
|
||||
# Look for SQL files in other common locations
|
||||
if [ -d "$module_dir_path/sql" ]; then
|
||||
while IFS= read -r sql_file; do
|
||||
execute_sql_file_in_db "$world_db" "$sql_file" "module SQL"
|
||||
done < <(find "$module_dir_path/sql" -type f -name "*.sql") || true
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main function to execute SQL for all enabled modules
|
||||
execute_module_sql_scripts() {
|
||||
# Install MariaDB client if not available
|
||||
which mariadb >/dev/null 2>&1 || {
|
||||
echo "Installing MariaDB client..."
|
||||
apk add --no-cache mariadb-client >/dev/null 2>&1 || echo "Warning: Could not install MariaDB client"
|
||||
}
|
||||
|
||||
SQL_SUCCESS_LOG=()
|
||||
SQL_FAILURE_LOG=()
|
||||
|
||||
local metadata_available=1
|
||||
if ! ensure_module_metadata; then
|
||||
metadata_available=0
|
||||
echo " ⚠️ Module metadata unavailable; module repository SQL will be skipped."
|
||||
fi
|
||||
|
||||
# Iterate modules from manifest metadata
|
||||
local key module_dir enabled
|
||||
local world_db="${DB_WORLD_NAME:-acore_world}"
|
||||
local auth_db="${DB_AUTH_NAME:-acore_auth}"
|
||||
local characters_db="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
local modules_root="${MODULES_ROOT:-/modules}"
|
||||
modules_root="${modules_root%/}"
|
||||
if [ "$metadata_available" = "1" ]; then
|
||||
echo "Discovered ${#MODULE_KEYS[@]} module definitions (MODULES_ROOT=${modules_root})"
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
module_dir="${MODULE_NAME[$key]:-}"
|
||||
[ -n "$module_dir" ] || continue
|
||||
|
||||
local module_dir_path="$module_dir"
|
||||
case "$module_dir_path" in
|
||||
/*) ;;
|
||||
*)
|
||||
module_dir_path="${modules_root}/${module_dir_path#/}"
|
||||
;;
|
||||
esac
|
||||
enabled="${MODULE_ENABLED[$key]:-0}"
|
||||
if [ "$enabled" != "1" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ ! -d "$module_dir_path" ]; then
|
||||
echo " ⚠️ Skipping ${module_dir} (enabled) because directory is missing at ${module_dir_path}"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$module_dir" = "mod-pocket-portal" ] || [ "$(basename "$module_dir_path")" = "mod-pocket-portal" ]; then
|
||||
echo '⚠️ Skipping mod-pocket-portal SQL: module disabled until C++20 patch is applied.'
|
||||
continue
|
||||
fi
|
||||
|
||||
module_sql_run_module "$key" "$module_dir_path" "$module_dir"
|
||||
done
|
||||
else
|
||||
echo "Discovered 0 module definitions (MODULES_ROOT=${modules_root})"
|
||||
fi
|
||||
|
||||
run_custom_sql_group world "${world_db}" "custom world SQL"
|
||||
run_custom_sql_group auth "${auth_db}" "custom auth SQL"
|
||||
run_custom_sql_group characters "${characters_db}" "custom characters SQL"
|
||||
|
||||
echo "SQL execution summary:"
|
||||
if [ ${#SQL_SUCCESS_LOG[@]} -gt 0 ]; then
|
||||
echo " ✅ Applied:"
|
||||
for entry in "${SQL_SUCCESS_LOG[@]}"; do
|
||||
IFS='::' read -r db file <<< "$entry"
|
||||
echo " • [$db] $file"
|
||||
done
|
||||
else
|
||||
echo " ✅ Applied: none"
|
||||
fi
|
||||
if [ ${#SQL_FAILURE_LOG[@]} -gt 0 ]; then
|
||||
echo " ❌ Failed:"
|
||||
for entry in "${SQL_FAILURE_LOG[@]}"; do
|
||||
IFS='::' read -r db file <<< "$entry"
|
||||
echo " • [$db] $file"
|
||||
done
|
||||
else
|
||||
echo " ❌ Failed: none"
|
||||
fi
|
||||
|
||||
if [ ${#TEMP_SQL_FILES[@]} -gt 0 ]; then
|
||||
rm -f "${TEMP_SQL_FILES[@]}" 2>/dev/null || true
|
||||
TEMP_SQL_FILES=()
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
621
scripts/bash/manage-modules.sh
Executable file
621
scripts/bash/manage-modules.sh
Executable file
@@ -0,0 +1,621 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Manifest-driven module management. Stages repositories, applies module
|
||||
# metadata hooks, manages configuration files, and flags rebuild requirements.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
MODULE_HELPER="$SCRIPT_DIR/modules.py"
|
||||
DEFAULT_ENV_PATH="$PROJECT_ROOT/.env"
|
||||
ENV_PATH="${MODULES_ENV_PATH:-$DEFAULT_ENV_PATH}"
|
||||
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
|
||||
source "$PROJECT_ROOT/scripts/bash/project_name.sh"
|
||||
|
||||
# Default project name (read from .env or template)
|
||||
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_PATH" "$TEMPLATE_FILE")"
|
||||
|
||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
||||
PLAYERBOTS_DB_UPDATE_LOGGED=0
|
||||
info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; }
|
||||
ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; }
|
||||
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
|
||||
err(){ printf '%b\n' "${RED}❌ $*${NC}"; exit 1; }
|
||||
|
||||
# Declare module metadata arrays globally at script level
|
||||
declare -A MODULE_NAME MODULE_REPO MODULE_REF MODULE_TYPE MODULE_ENABLED MODULE_NEEDS_BUILD MODULE_BLOCKED MODULE_POST_INSTALL MODULE_REQUIRES MODULE_CONFIG_CLEANUP MODULE_NOTES MODULE_STATUS MODULE_BLOCK_REASON
|
||||
declare -a MODULE_KEYS
|
||||
|
||||
read_env_value(){
|
||||
local key="$1" default="${2:-}" value="${!key:-}"
|
||||
if [ -n "$value" ]; then
|
||||
echo "$value"
|
||||
return
|
||||
fi
|
||||
if [ -f "$ENV_PATH" ]; then
|
||||
value="$(grep -E "^${key}=" "$ENV_PATH" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
value="$(echo "$value" | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
|
||||
if [[ "$value" == \"*\" && "$value" == *\" ]]; then
|
||||
value="${value:1:-1}"
|
||||
elif [[ "$value" == \'*\' && "$value" == *\' ]]; then
|
||||
value="${value:1:-1}"
|
||||
fi
|
||||
fi
|
||||
if [ -z "${value:-}" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
printf '%s\n' "${value}"
|
||||
}
|
||||
|
||||
ensure_python(){
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
err "python3 is required but not installed in PATH"
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_manifest_path(){
|
||||
if [ -n "${MODULES_MANIFEST_PATH:-}" ] && [ -f "${MODULES_MANIFEST_PATH}" ]; then
|
||||
echo "${MODULES_MANIFEST_PATH}"
|
||||
return
|
||||
fi
|
||||
local candidate
|
||||
candidate="$PROJECT_ROOT/config/module-manifest.json"
|
||||
if [ -f "$candidate" ]; then
|
||||
echo "$candidate"
|
||||
return
|
||||
fi
|
||||
candidate="$SCRIPT_DIR/../config/module-manifest.json"
|
||||
if [ -f "$candidate" ]; then
|
||||
echo "$candidate"
|
||||
return
|
||||
fi
|
||||
candidate="/tmp/config/module-manifest.json"
|
||||
if [ -f "$candidate" ]; then
|
||||
echo "$candidate"
|
||||
return
|
||||
fi
|
||||
err "Unable to locate module manifest (set MODULES_MANIFEST_PATH or ensure config/module-manifest.json exists)"
|
||||
}
|
||||
|
||||
setup_git_config(){
|
||||
info "Configuring git identity"
|
||||
git config --global user.name "${GIT_USERNAME:-$DEFAULT_PROJECT_NAME}" >/dev/null 2>&1 || true
|
||||
git config --global user.email "${GIT_EMAIL:-noreply@azerothcore.org}" >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
generate_module_state(){
|
||||
mkdir -p "$STATE_DIR"
|
||||
if ! python3 "$MODULE_HELPER" --env-path "$ENV_PATH" --manifest "$MANIFEST_PATH" generate --output-dir "$STATE_DIR"; then
|
||||
err "Module manifest validation failed"
|
||||
fi
|
||||
local env_file="$STATE_DIR/modules.env"
|
||||
if [ ! -f "$env_file" ]; then
|
||||
err "modules.env not produced at $env_file"
|
||||
fi
|
||||
# shellcheck disable=SC1090
|
||||
source "$env_file"
|
||||
|
||||
# Module arrays are already declared at script level
|
||||
if ! MODULE_SHELL_STATE="$(python3 "$MODULE_HELPER" --env-path "$ENV_PATH" --manifest "$MANIFEST_PATH" dump --format shell)"; then
|
||||
err "Unable to load manifest metadata"
|
||||
fi
|
||||
local eval_script
|
||||
# Remove the declare line since we already declared the arrays
|
||||
eval_script="$(echo "$MODULE_SHELL_STATE" | sed '/^declare -A /d')"
|
||||
eval "$eval_script"
|
||||
IFS=' ' read -r -a MODULES_COMPILE_LIST <<< "${MODULES_COMPILE:-}"
|
||||
if [ "${#MODULES_COMPILE_LIST[@]}" -eq 1 ] && [ -z "${MODULES_COMPILE_LIST[0]}" ]; then
|
||||
MODULES_COMPILE_LIST=()
|
||||
fi
|
||||
}
|
||||
|
||||
remove_disabled_modules(){
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
local dir
|
||||
dir="${MODULE_NAME[$key]:-}"
|
||||
[ -n "$dir" ] || continue
|
||||
if [ "${MODULE_ENABLED[$key]:-0}" != "1" ] && [ -d "$dir" ]; then
|
||||
info "Removing ${dir} (disabled)"
|
||||
rm -rf "$dir"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
run_post_install_hooks(){
|
||||
local key="$1"
|
||||
local dir="$2"
|
||||
local hooks_csv="${MODULE_POST_INSTALL[$key]:-}"
|
||||
|
||||
# Skip if no hooks defined
|
||||
[ -n "$hooks_csv" ] || return 0
|
||||
|
||||
IFS=',' read -r -a hooks <<< "$hooks_csv"
|
||||
local -a hook_search_paths=(
|
||||
"$SCRIPT_DIR/hooks"
|
||||
"/tmp/scripts/hooks"
|
||||
"/scripts/hooks"
|
||||
)
|
||||
|
||||
for hook in "${hooks[@]}"; do
|
||||
[ -n "$hook" ] || continue
|
||||
|
||||
# Trim whitespace
|
||||
hook="$(echo "$hook" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')"
|
||||
|
||||
local hook_script=""
|
||||
local candidate
|
||||
for candidate in "${hook_search_paths[@]}"; do
|
||||
if [ -x "$candidate/$hook" ]; then
|
||||
hook_script="$candidate/$hook"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$hook_script" ]; then
|
||||
info "Running post-install hook: $hook"
|
||||
|
||||
# Set hook environment variables
|
||||
export MODULE_KEY="$key"
|
||||
export MODULE_DIR="$dir"
|
||||
export MODULE_NAME="${MODULE_NAME[$key]:-$(basename "$dir")}"
|
||||
export MODULES_ROOT="${MODULES_ROOT:-/modules}"
|
||||
export LUA_SCRIPTS_TARGET="/azerothcore/lua_scripts"
|
||||
|
||||
# Execute the hook script
|
||||
if "$hook_script"; then
|
||||
ok "Hook '$hook' completed successfully"
|
||||
else
|
||||
local exit_code=$?
|
||||
case $exit_code in
|
||||
1) warn "Hook '$hook' completed with warnings" ;;
|
||||
*) err "Hook '$hook' failed with exit code $exit_code" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Clean up hook-specific environment (preserve MODULE_NAME array and script-level MODULES_ROOT)
|
||||
unset MODULE_KEY MODULE_DIR LUA_SCRIPTS_TARGET
|
||||
else
|
||||
err "Hook script not found for ${hook} (searched: ${hook_search_paths[*]})"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
install_enabled_modules(){
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
if [ "${MODULE_ENABLED[$key]:-0}" != "1" ]; then
|
||||
continue
|
||||
fi
|
||||
local dir repo ref
|
||||
dir="${MODULE_NAME[$key]:-}"
|
||||
repo="${MODULE_REPO[$key]:-}"
|
||||
ref="${MODULE_REF[$key]:-}"
|
||||
if [ -z "$dir" ] || [ -z "$repo" ]; then
|
||||
warn "Missing repository metadata for $key"
|
||||
continue
|
||||
fi
|
||||
if [ -d "$dir/.git" ]; then
|
||||
info "$dir already present; skipping clone"
|
||||
elif [ -d "$dir" ]; then
|
||||
warn "$dir exists but is not a git repository; leaving in place"
|
||||
else
|
||||
info "Cloning ${dir} from ${repo}"
|
||||
if ! git clone "$repo" "$dir"; then
|
||||
err "Failed to clone $repo"
|
||||
fi
|
||||
if [ -n "$ref" ]; then
|
||||
(cd "$dir" && git checkout "$ref") || warn "Unable to checkout ref $ref for $dir"
|
||||
fi
|
||||
fi
|
||||
run_post_install_hooks "$key" "$dir"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
update_playerbots_db_info(){
|
||||
local target="$1"
|
||||
if [ ! -f "$target" ] && [ ! -L "$target" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local env_file="${ENV_PATH:-}"
|
||||
local resolved
|
||||
|
||||
resolved="$(
|
||||
python3 - "$target" "${env_file}" <<'PY'
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
import re
|
||||
|
||||
def load_env_file(path):
|
||||
data = {}
|
||||
if not path:
|
||||
return data
|
||||
candidate = pathlib.Path(path)
|
||||
if not candidate.is_file():
|
||||
return data
|
||||
for raw in candidate.read_text(encoding="utf-8", errors="ignore").splitlines():
|
||||
if not raw or raw.lstrip().startswith("#"):
|
||||
continue
|
||||
if "=" not in raw:
|
||||
continue
|
||||
key, val = raw.split("=", 1)
|
||||
key = key.strip()
|
||||
val = val.strip()
|
||||
if not key:
|
||||
continue
|
||||
if val and val[0] == val[-1] and val[0] in {"'", '"'}:
|
||||
val = val[1:-1]
|
||||
if "#" in val:
|
||||
# Strip inline comments
|
||||
val = val.split("#", 1)[0].rstrip()
|
||||
data[key] = val
|
||||
return data
|
||||
|
||||
def resolve_key(env_map, key, default=""):
|
||||
value = os.environ.get(key)
|
||||
if value:
|
||||
return value
|
||||
return env_map.get(key, default)
|
||||
|
||||
def parse_bool(value):
|
||||
if value is None:
|
||||
return None
|
||||
value = value.strip().lower()
|
||||
if value == "":
|
||||
return None
|
||||
if value in {"1", "true", "yes", "on"}:
|
||||
return True
|
||||
if value in {"0", "false", "no", "off"}:
|
||||
return False
|
||||
return None
|
||||
|
||||
def parse_int(value):
|
||||
if value is None:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
if re.fullmatch(r"[+-]?\d+", value):
|
||||
return str(int(value))
|
||||
return None
|
||||
|
||||
def update_config(path_in, settings):
|
||||
if not (os.path.exists(path_in) or os.path.islink(path_in)):
|
||||
return False
|
||||
path = os.path.realpath(path_in)
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as fh:
|
||||
lines = fh.read().splitlines()
|
||||
except FileNotFoundError:
|
||||
lines = []
|
||||
|
||||
changed = False
|
||||
pending = dict(settings)
|
||||
|
||||
for idx, raw in enumerate(lines):
|
||||
stripped = raw.strip()
|
||||
for key, value in list(pending.items()):
|
||||
if re.match(rf"^\s*{re.escape(key)}\s*=", stripped):
|
||||
desired = f"{key} = {value}"
|
||||
if stripped != desired:
|
||||
leading = raw[: len(raw) - len(raw.lstrip())]
|
||||
trailing = ""
|
||||
if "#" in raw:
|
||||
before, comment = raw.split("#", 1)
|
||||
if before.strip():
|
||||
trailing = f" # {comment.strip()}"
|
||||
lines[idx] = f"{leading}{desired}{trailing}"
|
||||
changed = True
|
||||
pending.pop(key, None)
|
||||
break
|
||||
|
||||
if pending:
|
||||
if lines and lines[-1] and not lines[-1].endswith("\n"):
|
||||
lines[-1] = lines[-1] + "\n"
|
||||
if lines and lines[-1].strip():
|
||||
lines.append("\n")
|
||||
for key, value in pending.items():
|
||||
lines.append(f"{key} = {value}\n")
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
output = "\n".join(lines)
|
||||
if output and not output.endswith("\n"):
|
||||
output += "\n"
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(output)
|
||||
|
||||
return True
|
||||
|
||||
target_path, env_path = sys.argv[1:3]
|
||||
env_map = load_env_file(env_path)
|
||||
|
||||
host = resolve_key(env_map, "CONTAINER_MYSQL") or resolve_key(env_map, "MYSQL_HOST", "ac-mysql") or "ac-mysql"
|
||||
port = resolve_key(env_map, "MYSQL_PORT", "3306") or "3306"
|
||||
user = resolve_key(env_map, "MYSQL_USER", "root") or "root"
|
||||
password = resolve_key(env_map, "MYSQL_ROOT_PASSWORD", "")
|
||||
database = resolve_key(env_map, "DB_PLAYERBOTS_NAME", "acore_playerbots") or "acore_playerbots"
|
||||
|
||||
value = ";".join([host, port, user, password, database])
|
||||
settings = {"PlayerbotsDatabaseInfo": f'"{value}"'}
|
||||
|
||||
enabled_setting = parse_bool(resolve_key(env_map, "PLAYERBOT_ENABLED"))
|
||||
if enabled_setting is not None:
|
||||
settings["AiPlayerbot.Enabled"] = "1" if enabled_setting else "0"
|
||||
|
||||
max_bots = parse_int(resolve_key(env_map, "PLAYERBOT_MAX_BOTS"))
|
||||
min_bots = parse_int(resolve_key(env_map, "PLAYERBOT_MIN_BOTS"))
|
||||
|
||||
if max_bots and not min_bots:
|
||||
min_bots = max_bots
|
||||
|
||||
if min_bots:
|
||||
settings["AiPlayerbot.MinRandomBots"] = min_bots
|
||||
if max_bots:
|
||||
settings["AiPlayerbot.MaxRandomBots"] = max_bots
|
||||
|
||||
update_config(target_path, settings)
|
||||
|
||||
print(value)
|
||||
PY
|
||||
)" || return 0
|
||||
|
||||
local host port
|
||||
host="${resolved%%;*}"
|
||||
port="${resolved#*;}"
|
||||
port="${port%%;*}"
|
||||
|
||||
if [ "$PLAYERBOTS_DB_UPDATE_LOGGED" = "0" ]; then
|
||||
info "Updated PlayerbotsDatabaseInfo to use host ${host}:${port}"
|
||||
PLAYERBOTS_DB_UPDATE_LOGGED=1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
manage_configuration_files(){
|
||||
echo 'Managing configuration files...'
|
||||
|
||||
local env_target="${MODULES_ENV_TARGET_DIR:-}"
|
||||
if [ -z "$env_target" ]; then
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" = "1" ]; then
|
||||
env_target="${MODULES_ROOT}/env/dist/etc"
|
||||
else
|
||||
env_target="/azerothcore/env/dist/etc"
|
||||
fi
|
||||
fi
|
||||
|
||||
mkdir -p "$env_target"
|
||||
|
||||
local key patterns_csv enabled pattern
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
enabled="${MODULE_ENABLED[$key]:-0}"
|
||||
patterns_csv="${MODULE_CONFIG_CLEANUP[$key]:-}"
|
||||
IFS=',' read -r -a patterns <<< "$patterns_csv"
|
||||
if [ "${#patterns[@]}" -eq 1 ] && [ -z "${patterns[0]}" ]; then
|
||||
unset patterns
|
||||
continue
|
||||
fi
|
||||
for pattern in "${patterns[@]}"; do
|
||||
[ -n "$pattern" ] || continue
|
||||
if [ "$enabled" != "1" ]; then
|
||||
rm -f "$env_target"/$pattern 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
unset patterns
|
||||
done
|
||||
|
||||
local modules_conf_dir="${env_target%/}/modules"
|
||||
mkdir -p "$modules_conf_dir"
|
||||
rm -rf "${modules_conf_dir}.backup"
|
||||
rm -f "$modules_conf_dir"/*.conf "$modules_conf_dir"/*.conf.dist 2>/dev/null || true
|
||||
|
||||
local module_dir
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
module_dir="${MODULE_NAME[$key]:-}"
|
||||
[ -n "$module_dir" ] || continue
|
||||
[ -d "$module_dir" ] || continue
|
||||
while IFS= read -r conf_file; do
|
||||
[ -n "$conf_file" ] || continue
|
||||
base_name="$(basename "$conf_file")"
|
||||
# Ensure previous copies in root config are removed to keep modules/ canonical
|
||||
main_conf_path="${env_target}/${base_name}"
|
||||
if [ -f "$main_conf_path" ]; then
|
||||
rm -f "$main_conf_path"
|
||||
fi
|
||||
if [[ "$base_name" == *.conf.dist ]]; then
|
||||
root_conf="${env_target}/${base_name%.dist}"
|
||||
if [ -f "$root_conf" ]; then
|
||||
rm -f "$root_conf"
|
||||
fi
|
||||
fi
|
||||
|
||||
dest_path="${modules_conf_dir}/${base_name}"
|
||||
cp "$conf_file" "$dest_path"
|
||||
if [[ "$base_name" == *.conf.dist ]]; then
|
||||
dest_conf="${modules_conf_dir}/${base_name%.dist}"
|
||||
if [ ! -f "$dest_conf" ]; then
|
||||
cp "$conf_file" "$dest_conf"
|
||||
fi
|
||||
fi
|
||||
done < <(find "$module_dir" -path "*/conf/*" -type f \( -name "*.conf" -o -name "*.conf.dist" \) 2>/dev/null)
|
||||
done
|
||||
|
||||
local playerbots_enabled="${MODULE_PLAYERBOTS:-0}"
|
||||
if [ "${MODULE_ENABLED[MODULE_PLAYERBOTS]:-0}" = "1" ]; then
|
||||
playerbots_enabled=1
|
||||
fi
|
||||
|
||||
if [ "$playerbots_enabled" = "1" ]; then
|
||||
update_playerbots_db_info "$modules_conf_dir/playerbots.conf"
|
||||
update_playerbots_db_info "$modules_conf_dir/playerbots.conf.dist"
|
||||
fi
|
||||
|
||||
if [ "${MODULE_AUTOBALANCE:-0}" = "1" ] && [ -f "$env_target/AutoBalance.conf.dist" ]; then
|
||||
sed -i 's/^AutoBalance\.LevelScaling\.EndGameBoost.*/AutoBalance.LevelScaling.EndGameBoost = false # disabled pending proper implementation/' \
|
||||
"$env_target/AutoBalance.conf.dist" || true
|
||||
fi
|
||||
}
|
||||
|
||||
load_sql_helper(){
|
||||
local helper_paths=(
|
||||
"/scripts/bash/manage-modules-sql.sh"
|
||||
"/tmp/scripts/bash/manage-modules-sql.sh"
|
||||
)
|
||||
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" = "1" ]; then
|
||||
helper_paths+=("$SCRIPT_DIR/manage-modules-sql.sh")
|
||||
fi
|
||||
|
||||
local helper_path=""
|
||||
for helper_path in "${helper_paths[@]}"; do
|
||||
if [ -f "$helper_path" ]; then
|
||||
# shellcheck disable=SC1090
|
||||
. "$helper_path"
|
||||
SQL_HELPER_PATH="$helper_path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
err "SQL helper not found; expected manage-modules-sql.sh to be available"
|
||||
}
|
||||
|
||||
execute_module_sql(){
|
||||
SQL_EXECUTION_FAILED=0
|
||||
if declare -f execute_module_sql_scripts >/dev/null 2>&1; then
|
||||
echo 'Executing module SQL scripts...'
|
||||
if execute_module_sql_scripts; then
|
||||
echo 'SQL execution complete.'
|
||||
else
|
||||
echo '⚠️ Module SQL scripts reported errors'
|
||||
SQL_EXECUTION_FAILED=1
|
||||
fi
|
||||
else
|
||||
info "SQL helper did not expose execute_module_sql_scripts; skipping module SQL execution"
|
||||
fi
|
||||
}
|
||||
|
||||
track_module_state(){
|
||||
echo 'Checking for module changes that require rebuild...'
|
||||
|
||||
local modules_state_file
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" = "1" ]; then
|
||||
modules_state_file="./.modules_state"
|
||||
else
|
||||
modules_state_file="/modules/.modules_state"
|
||||
fi
|
||||
|
||||
local current_state=""
|
||||
for key in "${MODULE_KEYS[@]}"; do
|
||||
current_state+="${key}=${MODULE_ENABLED[$key]:-0}|"
|
||||
done
|
||||
|
||||
local previous_state=""
|
||||
if [ -f "$modules_state_file" ]; then
|
||||
previous_state="$(cat "$modules_state_file")"
|
||||
fi
|
||||
|
||||
local rebuild_required=0
|
||||
if [ "$current_state" != "$previous_state" ]; then
|
||||
if [ -n "$previous_state" ]; then
|
||||
echo "🔄 Module configuration has changed - rebuild required"
|
||||
else
|
||||
echo "📝 First run - establishing module state baseline"
|
||||
fi
|
||||
rebuild_required=1
|
||||
else
|
||||
echo "✅ No module changes detected"
|
||||
fi
|
||||
|
||||
echo "$current_state" > "$modules_state_file"
|
||||
|
||||
if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
|
||||
echo "🔧 Detected ${#MODULES_COMPILE_LIST[@]} enabled C++ modules requiring compilation:"
|
||||
for mod in "${MODULES_COMPILE_LIST[@]}"; do
|
||||
echo " • $mod"
|
||||
done
|
||||
else
|
||||
echo "✅ No C++ modules enabled - pre-built containers can be used"
|
||||
fi
|
||||
|
||||
local rebuild_sentinel
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" = "1" ]; then
|
||||
if [ -n "${LOCAL_STORAGE_SENTINEL_PATH:-}" ]; then
|
||||
rebuild_sentinel="${LOCAL_STORAGE_SENTINEL_PATH}"
|
||||
else
|
||||
rebuild_sentinel="./.requires_rebuild"
|
||||
fi
|
||||
else
|
||||
rebuild_sentinel="/modules/.requires_rebuild"
|
||||
fi
|
||||
|
||||
local host_rebuild_sentinel=""
|
||||
if [ -n "${MODULES_HOST_DIR:-}" ]; then
|
||||
host_rebuild_sentinel="${MODULES_HOST_DIR%/}/.requires_rebuild"
|
||||
fi
|
||||
|
||||
if [ "$rebuild_required" = "1" ] && [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
|
||||
printf '%s\n' "${MODULES_COMPILE_LIST[@]}" > "$rebuild_sentinel"
|
||||
if [ -n "$host_rebuild_sentinel" ]; then
|
||||
printf '%s\n' "${MODULES_COMPILE_LIST[@]}" > "$host_rebuild_sentinel" 2>/dev/null || true
|
||||
fi
|
||||
echo "🚨 Module changes detected; run ./scripts/bash/rebuild-with-modules.sh to rebuild source images."
|
||||
else
|
||||
rm -f "$rebuild_sentinel" 2>/dev/null || true
|
||||
if [ -n "$host_rebuild_sentinel" ]; then
|
||||
rm -f "$host_rebuild_sentinel" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" = "1" ]; then
|
||||
local target_dir="${MODULES_HOST_DIR:-$(pwd)}"
|
||||
local desired_user
|
||||
desired_user="$(id -u):$(id -g)"
|
||||
if [ -d "$target_dir" ]; then
|
||||
chown -R "$desired_user" "$target_dir" >/dev/null 2>&1 || true
|
||||
chmod -R ug+rwX "$target_dir" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
main(){
|
||||
ensure_python
|
||||
|
||||
if [ "${MODULES_LOCAL_RUN:-0}" != "1" ]; then
|
||||
cd /modules || err "Modules directory /modules not found"
|
||||
fi
|
||||
MODULES_ROOT="$(pwd)"
|
||||
|
||||
MANIFEST_PATH="$(resolve_manifest_path)"
|
||||
STATE_DIR="${MODULES_HOST_DIR:-$MODULES_ROOT}"
|
||||
|
||||
setup_git_config
|
||||
generate_module_state
|
||||
remove_disabled_modules
|
||||
install_enabled_modules
|
||||
manage_configuration_files
|
||||
info "SQL execution gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
||||
if [ "${MODULES_SKIP_SQL:-0}" = "1" ]; then
|
||||
info "Skipping module SQL execution (MODULES_SKIP_SQL=1)"
|
||||
else
|
||||
info "Initiating module SQL helper"
|
||||
load_sql_helper
|
||||
info "SQL helper loaded from ${SQL_HELPER_PATH:-unknown}"
|
||||
execute_module_sql
|
||||
fi
|
||||
track_module_state
|
||||
|
||||
if [ "${SQL_EXECUTION_FAILED:-0}" = "1" ]; then
|
||||
warn "Module SQL execution reported issues; review logs above."
|
||||
fi
|
||||
|
||||
echo 'Module management complete.'
|
||||
|
||||
if [ "${MODULES_DEBUG_KEEPALIVE:-0}" = "1" ]; then
|
||||
tail -f /dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
366
scripts/bash/migrate-stack.sh
Executable file
366
scripts/bash/migrate-stack.sh
Executable file
@@ -0,0 +1,366 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Utility to migrate module images (and optionally storage) to a remote host.
|
||||
# Assumes module images have already been rebuilt locally.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
ENV_FILE="$PROJECT_ROOT/.env"
|
||||
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
|
||||
source "$PROJECT_ROOT/scripts/bash/project_name.sh"
|
||||
|
||||
# Default project name (read from .env or template)
|
||||
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
||||
|
||||
read_env_value(){
|
||||
local key="$1" default="$2" value=""
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
value="$(grep -E "^${key}=" "$ENV_FILE" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="${!key:-}"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
resolve_project_name(){
|
||||
local raw_name
|
||||
raw_name="$(read_env_value COMPOSE_PROJECT_NAME "$DEFAULT_PROJECT_NAME")"
|
||||
project_name::sanitize "$raw_name"
|
||||
}
|
||||
|
||||
resolve_project_image(){
|
||||
local tag="$1"
|
||||
local project_name
|
||||
project_name="$(resolve_project_name)"
|
||||
echo "${project_name}:${tag}"
|
||||
}
|
||||
|
||||
ensure_host_writable(){
|
||||
local path="$1"
|
||||
[ -n "$path" ] || return 0
|
||||
if [ ! -d "$path" ]; then
|
||||
mkdir -p "$path" 2>/dev/null || true
|
||||
fi
|
||||
if [ -d "$path" ]; then
|
||||
local uid gid
|
||||
uid="$(id -u)"
|
||||
gid="$(id -g)"
|
||||
if ! chown -R "$uid":"$gid" "$path" 2>/dev/null; then
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
local helper_image
|
||||
helper_image="$(read_env_value ALPINE_IMAGE "alpine:latest")"
|
||||
docker run --rm \
|
||||
-u 0:0 \
|
||||
-v "$path":/workspace \
|
||||
"$helper_image" \
|
||||
sh -c "chown -R ${uid}:${gid} /workspace" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
chmod -R u+rwX "$path" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
usage(){
|
||||
cat <<'EOF_HELP'
|
||||
Usage: $(basename "$0") --host HOST --user USER [options]
|
||||
|
||||
Options:
|
||||
--host HOST Remote hostname or IP address (required)
|
||||
--user USER SSH username on remote host (required)
|
||||
--port PORT SSH port (default: 22)
|
||||
--identity PATH SSH private key (passed to scp/ssh)
|
||||
--project-dir DIR Remote project directory (default: ~/<project-name>)
|
||||
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
|
||||
--storage PATH Remote storage directory (default: <project-dir>/storage)
|
||||
--skip-storage Do not sync the storage directory
|
||||
--yes, -y Auto-confirm prompts (for existing deployments)
|
||||
--help Show this help
|
||||
EOF_HELP
|
||||
}
|
||||
|
||||
HOST=""
|
||||
USER=""
|
||||
PORT=22
|
||||
IDENTITY=""
|
||||
PROJECT_DIR=""
|
||||
TARBALL=""
|
||||
REMOTE_STORAGE=""
|
||||
SKIP_STORAGE=0
|
||||
ASSUME_YES=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host) HOST="$2"; shift 2;;
|
||||
--user) USER="$2"; shift 2;;
|
||||
--port) PORT="$2"; shift 2;;
|
||||
--identity) IDENTITY="$2"; shift 2;;
|
||||
--project-dir) PROJECT_DIR="$2"; shift 2;;
|
||||
--tarball) TARBALL="$2"; shift 2;;
|
||||
--storage) REMOTE_STORAGE="$2"; shift 2;;
|
||||
--skip-storage) SKIP_STORAGE=1; shift;;
|
||||
--yes|-y) ASSUME_YES=1; shift;;
|
||||
--help|-h) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||
echo "--host and --user are required" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
expand_remote_path(){
|
||||
local path="$1"
|
||||
case "$path" in
|
||||
"~") echo "/home/${USER}";;
|
||||
"~/"*) echo "/home/${USER}/${path#\~/}";;
|
||||
*) echo "$path";;
|
||||
esac
|
||||
}
|
||||
|
||||
PROJECT_DIR="${PROJECT_DIR:-/home/${USER}/$(resolve_project_name)}"
|
||||
PROJECT_DIR="$(expand_remote_path "$PROJECT_DIR")"
|
||||
REMOTE_STORAGE="${REMOTE_STORAGE:-${PROJECT_DIR}/storage}"
|
||||
REMOTE_STORAGE="$(expand_remote_path "$REMOTE_STORAGE")"
|
||||
LOCAL_STORAGE_ROOT="${STORAGE_PATH_LOCAL:-}"
|
||||
if [ -z "$LOCAL_STORAGE_ROOT" ]; then
|
||||
LOCAL_STORAGE_ROOT="$(read_env_value STORAGE_PATH_LOCAL "./local-storage")"
|
||||
fi
|
||||
LOCAL_STORAGE_ROOT="${LOCAL_STORAGE_ROOT%/}"
|
||||
[ -z "$LOCAL_STORAGE_ROOT" ] && LOCAL_STORAGE_ROOT="."
|
||||
ensure_host_writable "$LOCAL_STORAGE_ROOT"
|
||||
TARBALL="${TARBALL:-${LOCAL_STORAGE_ROOT}/images/acore-modules-images.tar}"
|
||||
ensure_host_writable "$(dirname "$TARBALL")"
|
||||
|
||||
SCP_OPTS=(-P "$PORT")
|
||||
SSH_OPTS=(-p "$PORT")
|
||||
if [[ -n "$IDENTITY" ]]; then
|
||||
SCP_OPTS+=(-i "$IDENTITY")
|
||||
SSH_OPTS+=(-i "$IDENTITY")
|
||||
fi
|
||||
|
||||
run_ssh(){
|
||||
ssh "${SSH_OPTS[@]}" "$USER@$HOST" "$@"
|
||||
}
|
||||
|
||||
run_scp(){
|
||||
scp "${SCP_OPTS[@]}" "$@"
|
||||
}
|
||||
|
||||
validate_remote_environment(){
|
||||
echo "⋅ Validating remote environment..."
|
||||
|
||||
# 1. Check Docker daemon is running
|
||||
echo " • Checking Docker daemon..."
|
||||
if ! run_ssh "docker info >/dev/null 2>&1"; then
|
||||
echo "❌ Docker daemon not running or not accessible on remote host"
|
||||
echo " Please ensure Docker is installed and running on $HOST"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 2. Check disk space (need at least 5GB for images + storage)
|
||||
echo " • Checking disk space..."
|
||||
local available_gb
|
||||
available_gb=$(run_ssh "df /tmp | tail -1 | awk '{print int(\$4/1024/1024)}'")
|
||||
if [ "$available_gb" -lt 5 ]; then
|
||||
echo "❌ Insufficient disk space on remote host"
|
||||
echo " Available: ${available_gb}GB, Required: 5GB minimum"
|
||||
echo " Please free up disk space on $HOST"
|
||||
exit 1
|
||||
fi
|
||||
echo " Available: ${available_gb}GB ✓"
|
||||
|
||||
# 3. Check/create project directory with proper permissions
|
||||
echo " • Validating project directory permissions..."
|
||||
if ! run_ssh "mkdir -p '$PROJECT_DIR' && test -w '$PROJECT_DIR'"; then
|
||||
echo "❌ Cannot create or write to project directory: $PROJECT_DIR"
|
||||
echo " Please ensure $USER has write permissions to $PROJECT_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 4. Check for existing deployment and warn if running
|
||||
echo " • Checking for existing deployment..."
|
||||
local running_containers
|
||||
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
|
||||
if [ "$running_containers" -gt 0 ]; then
|
||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||
echo " Migration will overwrite existing deployment"
|
||||
if [ "$ASSUME_YES" != "1" ]; then
|
||||
read -r -p " Continue with migration? [y/N]: " reply
|
||||
case "$reply" in
|
||||
[Yy]*) echo " Proceeding with migration..." ;;
|
||||
*) echo " Migration cancelled."; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
# 5. Ensure remote repository is up to date
|
||||
echo " • Ensuring remote repository is current..."
|
||||
setup_remote_repository
|
||||
|
||||
echo "✅ Remote environment validation complete"
|
||||
}
|
||||
|
||||
setup_remote_repository(){
|
||||
# Check if git is available
|
||||
if ! run_ssh "command -v git >/dev/null 2>&1"; then
|
||||
echo "❌ Git not found on remote host. Please install git."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if project directory has a git repository
|
||||
if run_ssh "test -d '$PROJECT_DIR/.git'"; then
|
||||
echo " • Updating existing repository..."
|
||||
# Fetch latest changes and reset to match origin
|
||||
run_ssh "cd '$PROJECT_DIR' && git fetch origin && git reset --hard origin/\$(git rev-parse --abbrev-ref HEAD) && git clean -fd"
|
||||
else
|
||||
echo " • Cloning repository..."
|
||||
# Determine the git repository URL from local repo
|
||||
local repo_url
|
||||
repo_url=$(git config --get remote.origin.url 2>/dev/null || echo "")
|
||||
if [ -z "$repo_url" ]; then
|
||||
echo "❌ Cannot determine repository URL. Please ensure local directory is a git repository."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clone the repository to remote
|
||||
run_ssh "rm -rf '$PROJECT_DIR' && git clone '$repo_url' '$PROJECT_DIR'"
|
||||
fi
|
||||
|
||||
# Verify essential scripts exist
|
||||
if ! run_ssh "test -f '$PROJECT_DIR/deploy.sh' && test -x '$PROJECT_DIR/deploy.sh'"; then
|
||||
echo "❌ deploy.sh not found or not executable in remote repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create local-storage directory structure with proper ownership
|
||||
run_ssh "mkdir -p '$PROJECT_DIR/local-storage/modules' && chown -R $USER: '$PROJECT_DIR/local-storage'"
|
||||
|
||||
echo " • Repository synchronized ✓"
|
||||
}
|
||||
|
||||
cleanup_stale_docker_resources(){
|
||||
echo "⋅ Cleaning up stale Docker resources on remote..."
|
||||
|
||||
# Get project name to target our containers/images specifically
|
||||
local project_name
|
||||
project_name="$(resolve_project_name)"
|
||||
|
||||
# Stop and remove old containers
|
||||
echo " • Removing old containers..."
|
||||
run_ssh "docker ps -a --filter 'name=ac-' --format '{{.Names}}' | xargs -r docker rm -f 2>/dev/null || true"
|
||||
|
||||
# Remove old project images to force fresh load
|
||||
echo " • Removing old project images..."
|
||||
local images_to_remove=(
|
||||
"${project_name}:authserver-modules-latest"
|
||||
"${project_name}:worldserver-modules-latest"
|
||||
"${project_name}:authserver-playerbots"
|
||||
"${project_name}:worldserver-playerbots"
|
||||
"${project_name}:db-import-playerbots"
|
||||
"${project_name}:client-data-playerbots"
|
||||
)
|
||||
for img in "${images_to_remove[@]}"; do
|
||||
run_ssh "docker rmi '$img' 2>/dev/null || true"
|
||||
done
|
||||
|
||||
# Prune dangling images and build cache
|
||||
echo " • Pruning dangling images and build cache..."
|
||||
run_ssh "docker image prune -f >/dev/null 2>&1 || true"
|
||||
run_ssh "docker builder prune -f >/dev/null 2>&1 || true"
|
||||
|
||||
echo "✅ Docker cleanup complete"
|
||||
}
|
||||
|
||||
validate_remote_environment
|
||||
|
||||
echo "⋅ Exporting module images to $TARBALL"
|
||||
# Check which images are available and collect them
|
||||
IMAGES_TO_SAVE=()
|
||||
|
||||
project_auth_modules="$(resolve_project_image "authserver-modules-latest")"
|
||||
project_world_modules="$(resolve_project_image "worldserver-modules-latest")"
|
||||
project_auth_playerbots="$(resolve_project_image "authserver-playerbots")"
|
||||
project_world_playerbots="$(resolve_project_image "worldserver-playerbots")"
|
||||
project_db_import="$(resolve_project_image "db-import-playerbots")"
|
||||
project_client_data="$(resolve_project_image "client-data-playerbots")"
|
||||
|
||||
for image in \
|
||||
"$project_auth_modules" \
|
||||
"$project_world_modules" \
|
||||
"$project_auth_playerbots" \
|
||||
"$project_world_playerbots" \
|
||||
"$project_db_import" \
|
||||
"$project_client_data"; do
|
||||
if docker image inspect "$image" >/dev/null 2>&1; then
|
||||
IMAGES_TO_SAVE+=("$image")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#IMAGES_TO_SAVE[@]} -eq 0 ]; then
|
||||
echo "❌ No AzerothCore images found to migrate. Run './build.sh' first or pull standard images."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "⋅ Found ${#IMAGES_TO_SAVE[@]} images to migrate:"
|
||||
printf ' • %s\n' "${IMAGES_TO_SAVE[@]}"
|
||||
docker image save "${IMAGES_TO_SAVE[@]}" > "$TARBALL"
|
||||
|
||||
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
||||
if [[ -d storage ]]; then
|
||||
echo "⋅ Syncing storage to remote"
|
||||
run_ssh "mkdir -p '$REMOTE_STORAGE'"
|
||||
while IFS= read -r -d '' entry; do
|
||||
base_name="$(basename "$entry")"
|
||||
if [[ "$base_name" = modules ]]; then
|
||||
continue
|
||||
fi
|
||||
if [ -L "$entry" ]; then
|
||||
target_path="$(readlink -f "$entry")"
|
||||
run_scp "$target_path" "$USER@$HOST:$REMOTE_STORAGE/$base_name"
|
||||
else
|
||||
run_scp -r "$entry" "$USER@$HOST:$REMOTE_STORAGE/"
|
||||
fi
|
||||
done < <(find storage -mindepth 1 -maxdepth 1 -print0)
|
||||
else
|
||||
echo "⋅ Skipping storage sync (storage/ missing)"
|
||||
fi
|
||||
else
|
||||
echo "⋅ Skipping storage sync"
|
||||
fi
|
||||
|
||||
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
||||
LOCAL_MODULES_DIR="${LOCAL_STORAGE_ROOT}/modules"
|
||||
if [[ -d "$LOCAL_MODULES_DIR" ]]; then
|
||||
echo "⋅ Syncing module staging to remote"
|
||||
run_ssh "rm -rf '$REMOTE_STORAGE/modules' && mkdir -p '$REMOTE_STORAGE/modules'"
|
||||
modules_tar=$(mktemp)
|
||||
tar -cf "$modules_tar" -C "$LOCAL_MODULES_DIR" .
|
||||
run_scp "$modules_tar" "$USER@$HOST:/tmp/acore-modules.tar"
|
||||
rm -f "$modules_tar"
|
||||
run_ssh "tar -xf /tmp/acore-modules.tar -C '$REMOTE_STORAGE/modules' && rm /tmp/acore-modules.tar"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up stale Docker resources before loading new images
|
||||
cleanup_stale_docker_resources
|
||||
|
||||
echo "⋅ Loading images on remote"
|
||||
run_scp "$TARBALL" "$USER@$HOST:/tmp/acore-modules-images.tar"
|
||||
run_ssh "docker load < /tmp/acore-modules-images.tar && rm /tmp/acore-modules-images.tar"
|
||||
|
||||
if [[ -f .env ]]; then
|
||||
echo "⋅ Uploading .env"
|
||||
run_scp .env "$USER@$HOST:$PROJECT_DIR/.env"
|
||||
fi
|
||||
|
||||
echo "⋅ Remote prepares completed"
|
||||
echo "Run on the remote host to deploy:"
|
||||
echo " cd '$PROJECT_DIR' && ./deploy.sh --no-watch"
|
||||
96
scripts/bash/mysql-entrypoint.sh
Executable file
96
scripts/bash/mysql-entrypoint.sh
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/bin/bash
|
||||
# Wrapper entrypoint to adapt MySQL container UID/GID to match host user expectations.
|
||||
set -euo pipefail
|
||||
|
||||
ORIGINAL_ENTRYPOINT="${MYSQL_ORIGINAL_ENTRYPOINT:-docker-entrypoint.sh}"
|
||||
if ! command -v "$ORIGINAL_ENTRYPOINT" >/dev/null 2>&1; then
|
||||
# Fallback to common install path
|
||||
if [ -x /usr/local/bin/docker-entrypoint.sh ]; then
|
||||
ORIGINAL_ENTRYPOINT=/usr/local/bin/docker-entrypoint.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
TARGET_SPEC="${MYSQL_RUNTIME_USER:-${CONTAINER_USER:-}}"
|
||||
if [ -z "${TARGET_SPEC:-}" ] || [ "${TARGET_SPEC}" = "0:0" ]; then
|
||||
exec "$ORIGINAL_ENTRYPOINT" "$@"
|
||||
fi
|
||||
|
||||
if [[ "$TARGET_SPEC" != *:* ]]; then
|
||||
echo "mysql-entrypoint: Expected MYSQL_RUNTIME_USER/CONTAINER_USER in uid:gid form, got '${TARGET_SPEC}'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=':' read -r TARGET_UID TARGET_GID <<< "$TARGET_SPEC"
|
||||
|
||||
if ! [[ "$TARGET_UID" =~ ^[0-9]+$ ]] || ! [[ "$TARGET_GID" =~ ^[0-9]+$ ]]; then
|
||||
echo "mysql-entrypoint: UID/GID must be numeric (received uid='${TARGET_UID}' gid='${TARGET_GID}')" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! id mysql >/dev/null 2>&1; then
|
||||
echo "mysql-entrypoint: mysql user not found in container" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
current_uid="$(id -u mysql)"
|
||||
current_gid="$(id -g mysql)"
|
||||
|
||||
# Adjust group if needed
|
||||
target_group_name=""
|
||||
if [ "$current_gid" != "$TARGET_GID" ]; then
|
||||
if groupmod -g "$TARGET_GID" mysql 2>/dev/null; then
|
||||
target_group_name="mysql"
|
||||
else
|
||||
existing_group="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
|
||||
if [ -z "$existing_group" ]; then
|
||||
existing_group="mysql-host"
|
||||
if ! getent group "$existing_group" >/dev/null 2>&1; then
|
||||
groupadd -g "$TARGET_GID" "$existing_group"
|
||||
fi
|
||||
fi
|
||||
usermod -g "$existing_group" mysql
|
||||
target_group_name="$existing_group"
|
||||
fi
|
||||
else
|
||||
target_group_name="$(getent group mysql | cut -d: -f1)"
|
||||
fi
|
||||
|
||||
if [ -z "$target_group_name" ]; then
|
||||
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || true)"
|
||||
fi
|
||||
|
||||
# Adjust user UID if needed
|
||||
if [ "$current_uid" != "$TARGET_UID" ]; then
|
||||
if getent passwd "$TARGET_UID" >/dev/null 2>&1 && [ "$(getent passwd "$TARGET_UID" | cut -d: -f1)" != "mysql" ]; then
|
||||
echo "mysql-entrypoint: UID ${TARGET_UID} already in use by $(getent passwd "$TARGET_UID" | cut -d: -f1)." >&2
|
||||
echo "mysql-entrypoint: Please choose a different CONTAINER_USER or adjust the image." >&2
|
||||
exit 1
|
||||
fi
|
||||
usermod -u "$TARGET_UID" mysql
|
||||
fi
|
||||
|
||||
# Ensure group lookup after potential changes
|
||||
target_group_name="$(getent group "$TARGET_GID" | cut -d: -f1 || echo "$target_group_name")"
|
||||
|
||||
# Update ownership on relevant directories if they exist
|
||||
for path in /var/lib/mysql-runtime /var/lib/mysql /var/lib/mysql-persistent /backups; do
|
||||
if [ -e "$path" ]; then
|
||||
chown -R mysql:"$target_group_name" "$path"
|
||||
fi
|
||||
done
|
||||
|
||||
disable_binlog="${MYSQL_DISABLE_BINLOG:-}"
|
||||
if [ "${disable_binlog}" = "1" ]; then
|
||||
add_skip_flag=1
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--skip-log-bin" ] || [[ "$arg" == --log-bin* ]]; then
|
||||
add_skip_flag=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$add_skip_flag" -eq 1 ]; then
|
||||
set -- "$@" --skip-log-bin
|
||||
fi
|
||||
fi
|
||||
|
||||
exec "$ORIGINAL_ENTRYPOINT" "$@"
|
||||
49
scripts/bash/project_name.sh
Normal file
49
scripts/bash/project_name.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
project_name::extract(){
|
||||
local file="$1"
|
||||
if [ -n "$file" ] && [ -f "$file" ]; then
|
||||
local line value
|
||||
line="$(grep -E '^COMPOSE_PROJECT_NAME=' "$file" 2>/dev/null | tail -n1)"
|
||||
if [ -n "$line" ]; then
|
||||
value="${line#*=}"
|
||||
value="${value%$'\r'}"
|
||||
value="$(printf '%s\n' "$value" | awk -F'#' '{print $1}' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')"
|
||||
value="${value%\"}"; value="${value#\"}"
|
||||
value="${value%\'}"; value="${value#\'}"
|
||||
printf '%s\n' "$value"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
project_name::resolve(){
|
||||
local env_file="$1" template_file="$2" value=""
|
||||
value="$(project_name::extract "$env_file")"
|
||||
if [ -z "$value" ] && [ -n "$template_file" ]; then
|
||||
value="$(project_name::extract "$template_file")"
|
||||
fi
|
||||
if [ -z "$value" ] && [ -n "${COMPOSE_PROJECT_NAME:-}" ]; then
|
||||
value="${COMPOSE_PROJECT_NAME}"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
echo "Error: COMPOSE_PROJECT_NAME not defined in $env_file, $template_file, or environment." >&2
|
||||
exit 1
|
||||
fi
|
||||
printf '%s\n' "$value"
|
||||
}
|
||||
|
||||
project_name::sanitize(){
|
||||
local raw="$1"
|
||||
local sanitized
|
||||
sanitized="$(echo "$raw" | tr '[:upper:]' '[:lower:]')"
|
||||
sanitized="${sanitized// /-}"
|
||||
sanitized="$(echo "$sanitized" | tr -cd 'a-z0-9_-')"
|
||||
if [[ -z "$sanitized" ]]; then
|
||||
echo "Error: COMPOSE_PROJECT_NAME '$raw' is invalid after sanitization." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! "$sanitized" =~ ^[a-z0-9] ]]; then
|
||||
sanitized="ac${sanitized}"
|
||||
fi
|
||||
printf '%s\n' "$sanitized"
|
||||
}
|
||||
484
scripts/bash/rebuild-with-modules.sh
Executable file
484
scripts/bash/rebuild-with-modules.sh
Executable file
@@ -0,0 +1,484 @@
|
||||
#!/bin/bash
|
||||
|
||||
# azerothcore-rm helper to rebuild AzerothCore from source with enabled modules.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
ENV_FILE="$PROJECT_DIR/.env"
|
||||
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
||||
source "$PROJECT_DIR/scripts/bash/project_name.sh"
|
||||
|
||||
# Default project name (read from .env or template)
|
||||
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
||||
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
show_rebuild_step(){
|
||||
local step="$1" total="$2" message="$3"
|
||||
echo -e "${YELLOW}🔧 Step ${step}/${total}: ${message}...${NC}"
|
||||
}
|
||||
|
||||
usage(){
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") [options]
|
||||
|
||||
Options:
|
||||
--yes, -y Skip interactive confirmation prompts
|
||||
--source PATH Override MODULES_REBUILD_SOURCE_PATH from .env
|
||||
--skip-stop Do not run 'docker compose down' in the source tree before rebuilding
|
||||
-h, --help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
read_env(){
|
||||
local key="$1" default="$2" env_path="$ENV_FILE" value
|
||||
if [ -f "$env_path" ]; then
|
||||
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="${!key:-}"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
update_env_value(){
|
||||
local key="$1" value="$2" env_file="$ENV_FILE"
|
||||
[ -n "$env_file" ] || return 0
|
||||
if [ ! -f "$env_file" ]; then
|
||||
printf '%s=%s\n' "$key" "$value" >> "$env_file"
|
||||
return 0
|
||||
fi
|
||||
if grep -q "^${key}=" "$env_file"; then
|
||||
sed -i "s|^${key}=.*|${key}=${value}|" "$env_file"
|
||||
else
|
||||
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
|
||||
fi
|
||||
}
|
||||
|
||||
find_image_with_suffix(){
|
||||
local suffix="$1"
|
||||
docker images --format '{{.Repository}}:{{.Tag}}' | grep -E ":${suffix}$" | head -n1
|
||||
}
|
||||
|
||||
cleanup_legacy_tags(){
|
||||
local suffix="$1" keep_tag="$2"
|
||||
docker images --format '{{.Repository}}:{{.Tag}}' | grep -E ":${suffix}$" | while read -r tag; do
|
||||
[ "$tag" = "$keep_tag" ] && continue
|
||||
docker rmi "$tag" >/dev/null 2>&1 || true
|
||||
done
|
||||
}
|
||||
|
||||
ensure_project_image_tag(){
|
||||
local suffix="$1" target="$2"
|
||||
if [ -n "$target" ] && docker image inspect "$target" >/dev/null 2>&1; then
|
||||
cleanup_legacy_tags "$suffix" "$target"
|
||||
echo "$target"
|
||||
return 0
|
||||
fi
|
||||
local source
|
||||
source="$(find_image_with_suffix "$suffix")"
|
||||
if [ -z "$source" ]; then
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
if docker tag "$source" "$target" >/dev/null 2>&1; then
|
||||
if [ "$source" != "$target" ]; then
|
||||
docker rmi "$source" >/dev/null 2>&1 || true
|
||||
fi
|
||||
cleanup_legacy_tags "$suffix" "$target"
|
||||
echo "$target"
|
||||
return 0
|
||||
fi
|
||||
echo ""
|
||||
return 1
|
||||
}
|
||||
|
||||
resolve_project_name(){
|
||||
local raw_name
|
||||
raw_name="$(read_env COMPOSE_PROJECT_NAME "$DEFAULT_PROJECT_NAME")"
|
||||
project_name::sanitize "$raw_name"
|
||||
}
|
||||
|
||||
resolve_project_image(){
|
||||
local tag="$1"
|
||||
local project_name
|
||||
project_name="$(resolve_project_name)"
|
||||
echo "${project_name}:${tag}"
|
||||
}
|
||||
|
||||
default_source_path(){
|
||||
local require_playerbot
|
||||
require_playerbot="$(modules_require_playerbot_source)"
|
||||
local local_root
|
||||
local_root="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
|
||||
local_root="${local_root%/}"
|
||||
if [[ -z "$local_root" ]]; then
|
||||
local_root="."
|
||||
fi
|
||||
if [ "$require_playerbot" = "1" ]; then
|
||||
echo "${local_root}/source/azerothcore-playerbots"
|
||||
else
|
||||
echo "${local_root}/source/azerothcore"
|
||||
fi
|
||||
}
|
||||
|
||||
confirm(){
|
||||
local prompt="$1" default="$2" reply
|
||||
if [ "$ASSUME_YES" = "1" ]; then
|
||||
return 0
|
||||
fi
|
||||
while true; do
|
||||
if [ "$default" = "y" ]; then
|
||||
read -r -p "$prompt [Y/n]: " reply
|
||||
reply="${reply:-y}"
|
||||
else
|
||||
read -r -p "$prompt [y/N]: " reply
|
||||
reply="${reply:-n}"
|
||||
fi
|
||||
case "$reply" in
|
||||
[Yy]*) return 0 ;;
|
||||
[Nn]*) return 1 ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
ASSUME_YES=0
|
||||
SOURCE_OVERRIDE=""
|
||||
SKIP_STOP=0
|
||||
|
||||
MODULE_HELPER="$PROJECT_DIR/scripts/python/modules.py"
|
||||
MODULE_STATE_DIR=""
|
||||
declare -a MODULES_COMPILE_LIST=()
|
||||
|
||||
resolve_local_storage_path(){
|
||||
local path
|
||||
path="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
|
||||
if [[ "$path" != /* ]]; then
|
||||
path="${path#./}"
|
||||
path="$PROJECT_DIR/$path"
|
||||
fi
|
||||
echo "${path%/}"
|
||||
}
|
||||
|
||||
ensure_module_state(){
|
||||
if [ -n "$MODULE_STATE_DIR" ]; then
|
||||
return 0
|
||||
fi
|
||||
local storage_root
|
||||
storage_root="$(resolve_local_storage_path)"
|
||||
MODULE_STATE_DIR="${storage_root}/modules"
|
||||
if ! python3 "$MODULE_HELPER" --env-path "$ENV_FILE" --manifest "$PROJECT_DIR/config/module-manifest.json" generate --output-dir "$MODULE_STATE_DIR"; then
|
||||
echo "❌ Module manifest validation failed. See details above."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "$MODULE_STATE_DIR/modules.env" ]; then
|
||||
echo "❌ modules.env not produced at $MODULE_STATE_DIR/modules.env"
|
||||
exit 1
|
||||
fi
|
||||
# shellcheck disable=SC1090
|
||||
source "$MODULE_STATE_DIR/modules.env"
|
||||
IFS=' ' read -r -a MODULES_COMPILE_LIST <<< "${MODULES_COMPILE:-}"
|
||||
if [ "${#MODULES_COMPILE_LIST[@]}" -eq 1 ] && [ -z "${MODULES_COMPILE_LIST[0]}" ]; then
|
||||
MODULES_COMPILE_LIST=()
|
||||
fi
|
||||
}
|
||||
|
||||
modules_require_playerbot_source(){
|
||||
ensure_module_state
|
||||
if [ "${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}" = "1" ]; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--yes|-y) ASSUME_YES=1; shift;;
|
||||
--source) SOURCE_OVERRIDE="$2"; shift 2;;
|
||||
--skip-stop) SKIP_STOP=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
echo "❌ Docker CLI not found in PATH."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
echo "❌ python3 not found in PATH."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
|
||||
if [[ "$STORAGE_PATH" != /* ]]; then
|
||||
STORAGE_PATH="$PROJECT_DIR/${STORAGE_PATH#./}"
|
||||
fi
|
||||
# Build sentinel is tracked in local storage
|
||||
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
|
||||
if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
|
||||
# Remove leading ./ if present
|
||||
LOCAL_STORAGE_PATH="${LOCAL_STORAGE_PATH#./}"
|
||||
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
|
||||
fi
|
||||
MODULES_DIR="$STORAGE_PATH/modules"
|
||||
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
|
||||
|
||||
STORAGE_PATH_ABS="$STORAGE_PATH"
|
||||
|
||||
REBUILD_SOURCE_PATH="$SOURCE_OVERRIDE"
|
||||
default_path="$(default_source_path)"
|
||||
if [ -z "$REBUILD_SOURCE_PATH" ]; then
|
||||
REBUILD_SOURCE_PATH="$(read_env MODULES_REBUILD_SOURCE_PATH "$default_path")"
|
||||
fi
|
||||
|
||||
if [ -z "$REBUILD_SOURCE_PATH" ]; then
|
||||
REBUILD_SOURCE_PATH="$default_path"
|
||||
fi
|
||||
|
||||
if [[ "$REBUILD_SOURCE_PATH" != /* ]]; then
|
||||
REBUILD_SOURCE_PATH="$PROJECT_DIR/${REBUILD_SOURCE_PATH#./}"
|
||||
fi
|
||||
|
||||
if [[ "$default_path" != /* ]]; then
|
||||
default_path_abs="$PROJECT_DIR/${default_path#./}"
|
||||
else
|
||||
default_path_abs="$default_path"
|
||||
fi
|
||||
if [[ "$REBUILD_SOURCE_PATH" == "$STORAGE_PATH_ABS"* ]]; then
|
||||
echo "⚠️ Source path $REBUILD_SOURCE_PATH is inside shared storage ($STORAGE_PATH_ABS). Using local workspace $default_path_abs instead."
|
||||
REBUILD_SOURCE_PATH="$default_path_abs"
|
||||
fi
|
||||
|
||||
REBUILD_SOURCE_PATH="$(realpath "$REBUILD_SOURCE_PATH" 2>/dev/null || echo "$REBUILD_SOURCE_PATH")"
|
||||
|
||||
# Check for modules in source directory first, then fall back to shared storage
|
||||
LOCAL_MODULES_DIR="$REBUILD_SOURCE_PATH/modules"
|
||||
LOCAL_STAGING_MODULES_DIR="$LOCAL_STORAGE_PATH/modules"
|
||||
|
||||
if [ -d "$LOCAL_STAGING_MODULES_DIR" ] && [ "$(ls -A "$LOCAL_STAGING_MODULES_DIR" 2>/dev/null)" ]; then
|
||||
echo "🔧 Using modules from local staging: $LOCAL_STAGING_MODULES_DIR"
|
||||
MODULES_DIR="$LOCAL_STAGING_MODULES_DIR"
|
||||
elif [ -d "$LOCAL_MODULES_DIR" ]; then
|
||||
echo "🔧 Using modules from source directory: $LOCAL_MODULES_DIR"
|
||||
MODULES_DIR="$LOCAL_MODULES_DIR"
|
||||
else
|
||||
echo "⚠️ No local module staging detected; falling back to source directory $LOCAL_MODULES_DIR"
|
||||
MODULES_DIR="$LOCAL_MODULES_DIR"
|
||||
fi
|
||||
|
||||
SOURCE_COMPOSE="$REBUILD_SOURCE_PATH/docker-compose.yml"
|
||||
if [ ! -f "$SOURCE_COMPOSE" ]; then
|
||||
if [ -f "$REBUILD_SOURCE_PATH/apps/docker/docker-compose.yml" ]; then
|
||||
SOURCE_COMPOSE="$REBUILD_SOURCE_PATH/apps/docker/docker-compose.yml"
|
||||
else
|
||||
echo "❌ Source docker-compose.yml not found at $REBUILD_SOURCE_PATH (checked $SOURCE_COMPOSE and apps/docker/docker-compose.yml)"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
ensure_module_state
|
||||
|
||||
if [ ${#MODULES_COMPILE_LIST[@]} -eq 0 ]; then
|
||||
echo "✅ No C++ modules enabled that require a source rebuild."
|
||||
rm -f "$SENTINEL_FILE" 2>/dev/null || true
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "🔧 Modules requiring compilation:"
|
||||
for mod in "${MODULES_COMPILE_LIST[@]}"; do
|
||||
echo " • $mod"
|
||||
done
|
||||
|
||||
if [ ! -d "$MODULES_DIR" ]; then
|
||||
echo "⚠️ Modules directory not found at $MODULES_DIR"
|
||||
fi
|
||||
|
||||
if ! confirm "Proceed with source rebuild in $REBUILD_SOURCE_PATH? (15-45 minutes)" n; then
|
||||
echo "❌ Rebuild cancelled"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pushd "$REBUILD_SOURCE_PATH" >/dev/null
|
||||
|
||||
if [ "$SKIP_STOP" != "1" ]; then
|
||||
echo "🛑 Stopping existing source services (if any)..."
|
||||
docker compose down || true
|
||||
fi
|
||||
|
||||
if [ -d "$MODULES_DIR" ]; then
|
||||
echo "🔄 Syncing enabled modules into source tree..."
|
||||
mkdir -p modules
|
||||
find modules -mindepth 1 -maxdepth 1 -type d -name 'mod-*' -exec rm -rf {} + 2>/dev/null || true
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a "$MODULES_DIR"/ modules/
|
||||
else
|
||||
cp -R "$MODULES_DIR"/. modules/
|
||||
fi
|
||||
else
|
||||
echo "⚠️ No modules directory found at $MODULES_DIR; continuing without sync."
|
||||
fi
|
||||
|
||||
echo "🚀 Building AzerothCore with modules..."
|
||||
docker compose build --no-cache
|
||||
|
||||
echo "🔖 Tagging modules-latest images"
|
||||
|
||||
# Get image names and tags from .env.template
|
||||
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
||||
get_template_value() {
|
||||
local key="$1"
|
||||
local fallback="$2"
|
||||
if [ -f "$TEMPLATE_FILE" ]; then
|
||||
local value
|
||||
value=$(grep "^${key}=" "$TEMPLATE_FILE" | head -1 | cut -d'=' -f2- | sed 's/^"\(.*\)"$/\1/')
|
||||
if [[ "$value" =~ ^\$\{[^}]*:-([^}]*)\}$ ]]; then
|
||||
value="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
[ -n "$value" ] && echo "$value" || echo "$fallback"
|
||||
else
|
||||
echo "$fallback"
|
||||
fi
|
||||
}
|
||||
|
||||
strip_tag(){
|
||||
local image="$1"
|
||||
if [[ "$image" == *:* ]]; then
|
||||
echo "${image%:*}"
|
||||
else
|
||||
echo "$image"
|
||||
fi
|
||||
}
|
||||
|
||||
tag_if_exists(){
|
||||
local source_image="$1"
|
||||
local target_image="$2"
|
||||
local description="$3"
|
||||
if [ -z "$source_image" ] || [ -z "$target_image" ]; then
|
||||
return 1
|
||||
fi
|
||||
if docker image inspect "$source_image" >/dev/null 2>&1; then
|
||||
if docker tag "$source_image" "$target_image"; then
|
||||
echo "✅ Tagged ${description}: $target_image (from $source_image)"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
echo "⚠️ Warning: unable to tag ${description} from $source_image"
|
||||
return 1
|
||||
}
|
||||
|
||||
SOURCE_IMAGE_TAG="$(read_env DOCKER_IMAGE_TAG "$(get_template_value "DOCKER_IMAGE_TAG" "master")")"
|
||||
[ -z "$SOURCE_IMAGE_TAG" ] && SOURCE_IMAGE_TAG="master"
|
||||
|
||||
AUTHSERVER_BASE_REPO="$(strip_tag "$(read_env AC_AUTHSERVER_IMAGE_BASE "$(get_template_value "AC_AUTHSERVER_IMAGE_BASE" "acore/ac-wotlk-authserver")")")"
|
||||
WORLDSERVER_BASE_REPO="$(strip_tag "$(read_env AC_WORLDSERVER_IMAGE_BASE "$(get_template_value "AC_WORLDSERVER_IMAGE_BASE" "acore/ac-wotlk-worldserver")")")"
|
||||
DB_IMPORT_BASE_REPO="$(strip_tag "$(read_env AC_DB_IMPORT_IMAGE_BASE "$(get_template_value "AC_DB_IMPORT_IMAGE_BASE" "acore/ac-wotlk-db-import")")")"
|
||||
CLIENT_DATA_BASE_REPO="$(strip_tag "$(read_env AC_CLIENT_DATA_IMAGE_BASE "$(get_template_value "AC_CLIENT_DATA_IMAGE_BASE" "acore/ac-wotlk-client-data")")")"
|
||||
|
||||
BUILT_AUTHSERVER_IMAGE="$AUTHSERVER_BASE_REPO:$SOURCE_IMAGE_TAG"
|
||||
BUILT_WORLDSERVER_IMAGE="$WORLDSERVER_BASE_REPO:$SOURCE_IMAGE_TAG"
|
||||
BUILT_DB_IMPORT_IMAGE="$DB_IMPORT_BASE_REPO:$SOURCE_IMAGE_TAG"
|
||||
BUILT_CLIENT_DATA_IMAGE="$CLIENT_DATA_BASE_REPO:$SOURCE_IMAGE_TAG"
|
||||
|
||||
TARGET_AUTHSERVER_IMAGE="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(get_template_value "AC_AUTHSERVER_IMAGE_MODULES")")"
|
||||
TARGET_WORLDSERVER_IMAGE="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(get_template_value "AC_WORLDSERVER_IMAGE_MODULES")")"
|
||||
PLAYERBOTS_AUTHSERVER_IMAGE="$(read_env AC_AUTHSERVER_IMAGE_PLAYERBOTS "$(get_template_value "AC_AUTHSERVER_IMAGE_PLAYERBOTS")")"
|
||||
PLAYERBOTS_WORLDSERVER_IMAGE="$(read_env AC_WORLDSERVER_IMAGE_PLAYERBOTS "$(get_template_value "AC_WORLDSERVER_IMAGE_PLAYERBOTS")")"
|
||||
|
||||
if [ -z "$TARGET_AUTHSERVER_IMAGE" ]; then
|
||||
echo "❌ AC_AUTHSERVER_IMAGE_MODULES is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$TARGET_WORLDSERVER_IMAGE" ]; then
|
||||
echo "❌ AC_WORLDSERVER_IMAGE_MODULES is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$PLAYERBOTS_AUTHSERVER_IMAGE" ]; then
|
||||
echo "❌ AC_AUTHSERVER_IMAGE_PLAYERBOTS is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$PLAYERBOTS_WORLDSERVER_IMAGE" ]; then
|
||||
echo "❌ AC_WORLDSERVER_IMAGE_PLAYERBOTS is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔁 Tagging modules images from playerbot build artifacts"
|
||||
if tag_if_exists "$BUILT_AUTHSERVER_IMAGE" "$PLAYERBOTS_AUTHSERVER_IMAGE" "playerbots authserver"; then
|
||||
update_env_value "AC_AUTHSERVER_IMAGE_PLAYERBOTS" "$PLAYERBOTS_AUTHSERVER_IMAGE"
|
||||
fi
|
||||
if tag_if_exists "$BUILT_WORLDSERVER_IMAGE" "$PLAYERBOTS_WORLDSERVER_IMAGE" "playerbots worldserver"; then
|
||||
update_env_value "AC_WORLDSERVER_IMAGE_PLAYERBOTS" "$PLAYERBOTS_WORLDSERVER_IMAGE"
|
||||
fi
|
||||
if tag_if_exists "$BUILT_AUTHSERVER_IMAGE" "$TARGET_AUTHSERVER_IMAGE" "modules authserver"; then
|
||||
update_env_value "AC_AUTHSERVER_IMAGE_MODULES" "$TARGET_AUTHSERVER_IMAGE"
|
||||
fi
|
||||
if tag_if_exists "$BUILT_WORLDSERVER_IMAGE" "$TARGET_WORLDSERVER_IMAGE" "modules worldserver"; then
|
||||
update_env_value "AC_WORLDSERVER_IMAGE_MODULES" "$TARGET_WORLDSERVER_IMAGE"
|
||||
fi
|
||||
|
||||
TARGET_DB_IMPORT_IMAGE="$(read_env AC_DB_IMPORT_IMAGE "$(get_template_value "AC_DB_IMPORT_IMAGE")")"
|
||||
if [ -z "$TARGET_DB_IMPORT_IMAGE" ]; then
|
||||
echo "❌ AC_DB_IMPORT_IMAGE is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
if tag_if_exists "$BUILT_DB_IMPORT_IMAGE" "$TARGET_DB_IMPORT_IMAGE" "playerbots db-import"; then
|
||||
update_env_value "AC_DB_IMPORT_IMAGE" "$TARGET_DB_IMPORT_IMAGE"
|
||||
fi
|
||||
|
||||
TARGET_CLIENT_DATA_IMAGE="$(read_env AC_CLIENT_DATA_IMAGE_PLAYERBOTS "$(get_template_value "AC_CLIENT_DATA_IMAGE_PLAYERBOTS")")"
|
||||
if [ -z "$TARGET_CLIENT_DATA_IMAGE" ]; then
|
||||
echo "❌ AC_CLIENT_DATA_IMAGE_PLAYERBOTS is not defined in .env"
|
||||
exit 1
|
||||
fi
|
||||
if tag_if_exists "$BUILT_CLIENT_DATA_IMAGE" "$TARGET_CLIENT_DATA_IMAGE" "playerbots client-data"; then
|
||||
update_env_value "AC_CLIENT_DATA_IMAGE_PLAYERBOTS" "$TARGET_CLIENT_DATA_IMAGE"
|
||||
fi
|
||||
|
||||
show_rebuild_step 5 5 "Cleaning up build containers"
|
||||
echo "🧹 Cleaning up source build containers..."
|
||||
docker compose down --remove-orphans >/dev/null 2>&1 || true
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
remove_sentinel(){
|
||||
local sentinel_path="$1"
|
||||
[ -n "$sentinel_path" ] || return 0
|
||||
[ -f "$sentinel_path" ] || return 0
|
||||
if rm -f "$sentinel_path" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
local db_image
|
||||
db_image="$(read_env AC_DB_IMPORT_IMAGE "acore/ac-wotlk-db-import:master")"
|
||||
if docker image inspect "$db_image" >/dev/null 2>&1; then
|
||||
local mount_dir
|
||||
mount_dir="$(dirname "$sentinel_path")"
|
||||
docker run --rm \
|
||||
--entrypoint /bin/sh \
|
||||
--user 0:0 \
|
||||
-v "$mount_dir":/modules \
|
||||
"$db_image" \
|
||||
-c 'rm -f /modules/.requires_rebuild' >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
if [ -f "$sentinel_path" ]; then
|
||||
echo "⚠️ Unable to remove rebuild sentinel at $sentinel_path. Remove manually if rebuild detection persists."
|
||||
fi
|
||||
}
|
||||
|
||||
remove_sentinel "$SENTINEL_FILE"
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}⚔️ Module build forged successfully! ⚔️${NC}"
|
||||
echo -e "${GREEN}🏰 Your custom AzerothCore images are ready${NC}"
|
||||
echo -e "${GREEN}🗡️ Time to stage your enhanced realm!${NC}"
|
||||
154
scripts/bash/setup-source.sh
Executable file
154
scripts/bash/setup-source.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
# azerothcore-rm source repository setup
|
||||
set -euo pipefail
|
||||
|
||||
echo '🔧 Setting up AzerothCore source repository...'
|
||||
|
||||
# Load environment variables if .env exists
|
||||
if [ -f .env ]; then
|
||||
source .env
|
||||
fi
|
||||
|
||||
# Remember project root for path normalization
|
||||
PROJECT_ROOT="$(pwd)"
|
||||
|
||||
# Default values
|
||||
MODULE_PLAYERBOTS="${MODULE_PLAYERBOTS:-0}"
|
||||
PLAYERBOT_ENABLED="${PLAYERBOT_ENABLED:-0}"
|
||||
STACK_SOURCE_VARIANT="${STACK_SOURCE_VARIANT:-}"
|
||||
if [ -z "$STACK_SOURCE_VARIANT" ]; then
|
||||
if [ "$MODULE_PLAYERBOTS" = "1" ] || [ "$PLAYERBOT_ENABLED" = "1" ]; then
|
||||
STACK_SOURCE_VARIANT="playerbots"
|
||||
else
|
||||
STACK_SOURCE_VARIANT="core"
|
||||
fi
|
||||
fi
|
||||
LOCAL_STORAGE_ROOT="${STORAGE_PATH_LOCAL:-./local-storage}"
|
||||
DEFAULT_STANDARD_PATH="${LOCAL_STORAGE_ROOT%/}/source/azerothcore"
|
||||
DEFAULT_PLAYERBOTS_PATH="${LOCAL_STORAGE_ROOT%/}/source/azerothcore-playerbots"
|
||||
|
||||
SOURCE_PATH_DEFAULT="$DEFAULT_STANDARD_PATH"
|
||||
if [ "$STACK_SOURCE_VARIANT" = "playerbots" ]; then
|
||||
SOURCE_PATH_DEFAULT="$DEFAULT_PLAYERBOTS_PATH"
|
||||
fi
|
||||
SOURCE_PATH="${MODULES_REBUILD_SOURCE_PATH:-$SOURCE_PATH_DEFAULT}"
|
||||
|
||||
show_client_data_requirement(){
|
||||
local repo_path="$1"
|
||||
local detector="$PROJECT_ROOT/scripts/bash/detect-client-data-version.sh"
|
||||
if [ ! -x "$detector" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local detection
|
||||
if ! detection="$("$detector" --no-header "$repo_path" 2>/dev/null | head -n1)"; then
|
||||
echo "⚠️ Could not detect client data version for $repo_path"
|
||||
return
|
||||
fi
|
||||
|
||||
local detected_repo raw_version normalized_version
|
||||
IFS=$'\t' read -r detected_repo raw_version normalized_version <<< "$detection"
|
||||
if [ -z "$normalized_version" ] || [ "$normalized_version" = "<unknown>" ]; then
|
||||
echo "⚠️ Could not detect client data version for $repo_path"
|
||||
return
|
||||
fi
|
||||
|
||||
local env_value="${CLIENT_DATA_VERSION:-}"
|
||||
if [ -n "$env_value" ] && [ "$env_value" != "$normalized_version" ]; then
|
||||
echo "⚠️ Source requires client data ${normalized_version} (raw ${raw_version}) but .env specifies ${env_value}. Update CLIENT_DATA_VERSION to avoid mismatched maps."
|
||||
elif [ -n "$env_value" ]; then
|
||||
echo "📦 Client data requirement satisfied: ${normalized_version} (raw ${raw_version})"
|
||||
else
|
||||
echo "ℹ️ Detected client data requirement: ${normalized_version} (raw ${raw_version}). Set CLIENT_DATA_VERSION in .env to avoid mismatches."
|
||||
fi
|
||||
}
|
||||
|
||||
STORAGE_PATH_VALUE="${STORAGE_PATH:-./storage}"
|
||||
if [[ "$STORAGE_PATH_VALUE" != /* ]]; then
|
||||
STORAGE_PATH_ABS="$PROJECT_ROOT/${STORAGE_PATH_VALUE#./}"
|
||||
else
|
||||
STORAGE_PATH_ABS="$STORAGE_PATH_VALUE"
|
||||
fi
|
||||
|
||||
if [[ "$SOURCE_PATH_DEFAULT" != /* ]]; then
|
||||
DEFAULT_SOURCE_ABS="$PROJECT_ROOT/${SOURCE_PATH_DEFAULT#./}"
|
||||
else
|
||||
DEFAULT_SOURCE_ABS="$SOURCE_PATH_DEFAULT"
|
||||
fi
|
||||
|
||||
# Convert to absolute path if relative and ensure we stay local
|
||||
if [[ "$SOURCE_PATH" != /* ]]; then
|
||||
SOURCE_PATH="$PROJECT_ROOT/${SOURCE_PATH#./}"
|
||||
fi
|
||||
if [[ "$SOURCE_PATH" == "$STORAGE_PATH_ABS"* ]]; then
|
||||
echo "⚠️ Source path $SOURCE_PATH is inside shared storage ($STORAGE_PATH_ABS). Using local workspace $DEFAULT_SOURCE_ABS instead."
|
||||
SOURCE_PATH="$DEFAULT_SOURCE_ABS"
|
||||
MODULES_REBUILD_SOURCE_PATH="$SOURCE_PATH_DEFAULT"
|
||||
fi
|
||||
|
||||
ACORE_REPO_STANDARD="${ACORE_REPO_STANDARD:-https://github.com/azerothcore/azerothcore-wotlk.git}"
|
||||
ACORE_BRANCH_STANDARD="${ACORE_BRANCH_STANDARD:-master}"
|
||||
ACORE_REPO_PLAYERBOTS="${ACORE_REPO_PLAYERBOTS:-https://github.com/mod-playerbots/azerothcore-wotlk.git}"
|
||||
ACORE_BRANCH_PLAYERBOTS="${ACORE_BRANCH_PLAYERBOTS:-Playerbot}"
|
||||
|
||||
# Repository and branch selection based on source variant
|
||||
if [ "$STACK_SOURCE_VARIANT" = "playerbots" ]; then
|
||||
REPO_URL="$ACORE_REPO_PLAYERBOTS"
|
||||
BRANCH="$ACORE_BRANCH_PLAYERBOTS"
|
||||
echo "📌 Playerbots mode: Using $REPO_URL, branch $BRANCH"
|
||||
else
|
||||
REPO_URL="$ACORE_REPO_STANDARD"
|
||||
BRANCH="$ACORE_BRANCH_STANDARD"
|
||||
echo "📌 Standard mode: Using $REPO_URL, branch $BRANCH"
|
||||
fi
|
||||
|
||||
echo "📍 Repository: $REPO_URL"
|
||||
echo "🌿 Branch: $BRANCH"
|
||||
echo "📂 Source path: $SOURCE_PATH"
|
||||
|
||||
# Ensure destination directories exist
|
||||
echo "📂 Preparing local workspace at $(dirname "$SOURCE_PATH")"
|
||||
mkdir -p "$(dirname "$SOURCE_PATH")"
|
||||
|
||||
# Clone or update repository
|
||||
if [ -d "$SOURCE_PATH/.git" ]; then
|
||||
echo "📂 Existing repository found, updating..."
|
||||
cd "$SOURCE_PATH"
|
||||
|
||||
# Check if we're on the correct repository
|
||||
CURRENT_REMOTE=$(git remote get-url origin 2>/dev/null || echo "")
|
||||
if [ "$CURRENT_REMOTE" != "$REPO_URL" ]; then
|
||||
echo "🔄 Repository URL changed, re-cloning..."
|
||||
cd ..
|
||||
rm -rf "$(basename "$SOURCE_PATH")"
|
||||
echo "⏳ Cloning $REPO_URL (branch $BRANCH) into $(basename "$SOURCE_PATH")"
|
||||
git clone -b "$BRANCH" "$REPO_URL" "$(basename "$SOURCE_PATH")"
|
||||
cd "$(basename "$SOURCE_PATH")"
|
||||
else
|
||||
echo "🔄 Fetching latest changes from origin..."
|
||||
git fetch origin --progress
|
||||
echo "🔀 Switching to branch $BRANCH..."
|
||||
git checkout "$BRANCH"
|
||||
echo "⬇️ Pulling latest commits..."
|
||||
git pull --ff-only origin "$BRANCH"
|
||||
echo "✅ Repository updated to latest $BRANCH"
|
||||
fi
|
||||
else
|
||||
echo "📥 Cloning repository..."
|
||||
echo "⏳ Cloning $REPO_URL (branch $BRANCH) into $SOURCE_PATH"
|
||||
git clone -b "$BRANCH" "$REPO_URL" "$SOURCE_PATH"
|
||||
echo "✅ Repository cloned successfully"
|
||||
fi
|
||||
|
||||
cd "$SOURCE_PATH"
|
||||
|
||||
# Display current status
|
||||
CURRENT_COMMIT=$(git rev-parse --short HEAD)
|
||||
CURRENT_BRANCH=$(git branch --show-current)
|
||||
echo "📊 Current status:"
|
||||
echo " Branch: $CURRENT_BRANCH"
|
||||
echo " Commit: $CURRENT_COMMIT"
|
||||
echo " Last commit: $(git log -1 --pretty=format:'%s (%an, %ar)')"
|
||||
show_client_data_requirement "$SOURCE_PATH"
|
||||
|
||||
echo '🎉 Source repository setup complete!'
|
||||
364
scripts/bash/stage-modules.sh
Executable file
364
scripts/bash/stage-modules.sh
Executable file
@@ -0,0 +1,364 @@
|
||||
#!/bin/bash
|
||||
|
||||
# azerothcore-rm helper to automatically stage modules and trigger source builds when needed.
|
||||
|
||||
set -e
|
||||
|
||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m'
|
||||
|
||||
show_staging_header(){
|
||||
printf '\n%b\n' "${BLUE}⚔️ REALM STAGING SYSTEM ⚔️${NC}"
|
||||
printf '%b\n' "${BLUE}══════════════════════════════${NC}"
|
||||
printf '%b\n\n' "${BLUE}🎯 Configuring Your Realm 🎯${NC}"
|
||||
}
|
||||
|
||||
show_staging_step(){
|
||||
local step="$1" message="$2"
|
||||
printf '%b\n' "${YELLOW}🔧 ${step}: ${message}...${NC}"
|
||||
}
|
||||
|
||||
sync_local_staging(){
|
||||
local src_root="$LOCAL_STORAGE_PATH"
|
||||
local dest_root="$STORAGE_PATH"
|
||||
|
||||
if [ -z "$src_root" ] || [ -z "$dest_root" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [ "$src_root" = "$dest_root" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local src_modules="${src_root}/modules"
|
||||
local dest_modules="${dest_root}/modules"
|
||||
|
||||
if [ ! -d "$src_modules" ]; then
|
||||
echo "ℹ️ No local module staging found at $src_modules (skipping sync)."
|
||||
# Check if modules exist in destination storage
|
||||
if [ -d "$dest_modules" ] && [ -n "$(ls -A "$dest_modules" 2>/dev/null)" ]; then
|
||||
local module_count
|
||||
module_count=$(find "$dest_modules" -maxdepth 1 -type d | wc -l)
|
||||
module_count=$((module_count - 1)) # Subtract 1 for the parent directory
|
||||
if [ "$module_count" -gt 0 ]; then
|
||||
echo "✅ Found $module_count modules in shared storage at $dest_modules"
|
||||
fi
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "📦 Syncing local module staging from $src_modules to $dest_modules"
|
||||
if ! mkdir -p "$dest_modules" 2>/dev/null; then
|
||||
echo "ℹ️ Destination storage path $dest_root not accessible (likely remote storage - skipping sync)."
|
||||
echo "ℹ️ Module sync will be handled by the remote deployment."
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a --delete "$src_modules"/ "$dest_modules"/
|
||||
else
|
||||
find "$dest_modules" -mindepth 1 -maxdepth 1 -exec rm -rf {} + 2>/dev/null || true
|
||||
(cd "$src_modules" && tar cf - .) | (cd "$dest_modules" && tar xf -)
|
||||
fi
|
||||
}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
ENV_FILE="$PROJECT_DIR/.env"
|
||||
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
||||
source "$PROJECT_DIR/scripts/bash/project_name.sh"
|
||||
|
||||
# Default project name (read from .env or template)
|
||||
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
||||
DEFAULT_COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
|
||||
source "$PROJECT_DIR/scripts/bash/compose_overrides.sh"
|
||||
|
||||
usage(){
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") [options] [PROFILE]
|
||||
|
||||
Automatically detect and stage modules for AzerothCore.
|
||||
|
||||
Arguments:
|
||||
PROFILE Target profile (standard, playerbots, or auto-detect)
|
||||
|
||||
Options:
|
||||
--force-rebuild Force a source rebuild even if not needed
|
||||
--yes, -y Skip interactive confirmation prompts
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
$(basename "$0") # Auto-detect profile based on enabled modules
|
||||
$(basename "$0") playerbots # Force playerbots profile
|
||||
$(basename "$0") --force-rebuild # Force rebuild and auto-detect
|
||||
EOF
|
||||
}
|
||||
|
||||
read_env(){
|
||||
local key="$1" default="$2" env_path="$ENV_FILE" value
|
||||
if [ -f "$env_path" ]; then
|
||||
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
resolve_project_name(){
|
||||
local raw_name
|
||||
raw_name="$(read_env COMPOSE_PROJECT_NAME "$DEFAULT_PROJECT_NAME")"
|
||||
project_name::sanitize "$raw_name"
|
||||
}
|
||||
|
||||
if [ -z "${COMPOSE_FILE:-}" ]; then
|
||||
compose_files=("$DEFAULT_COMPOSE_FILE")
|
||||
declare -a enabled_overrides=()
|
||||
compose_overrides::list_enabled_files "$PROJECT_DIR" "$ENV_FILE" enabled_overrides
|
||||
if [ "${#enabled_overrides[@]}" -gt 0 ]; then
|
||||
compose_files+=("${enabled_overrides[@]}")
|
||||
fi
|
||||
COMPOSE_FILE="$(IFS=:; echo "${compose_files[*]}")"
|
||||
export COMPOSE_FILE
|
||||
fi
|
||||
|
||||
resolve_project_image(){
|
||||
local tag="$1"
|
||||
local project_name
|
||||
project_name="$(resolve_project_name)"
|
||||
echo "${project_name}:${tag}"
|
||||
}
|
||||
|
||||
canonical_path(){
|
||||
local path="$1"
|
||||
if command -v realpath >/dev/null 2>&1; then
|
||||
realpath -m "$path"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
python3 - "$path" <<'PY'
|
||||
import os, sys
|
||||
print(os.path.normpath(sys.argv[1]))
|
||||
PY
|
||||
else
|
||||
local normalized="$path"
|
||||
# Strip leading "./" portions so relative paths are clean
|
||||
while [[ "$normalized" == ./* ]]; do
|
||||
normalized="${normalized:2}"
|
||||
done
|
||||
# Collapse any embedded "/./" segments that appear in absolute paths
|
||||
while [[ "$normalized" == *"/./"* ]]; do
|
||||
normalized="${normalized//\/\.\//\/}"
|
||||
done
|
||||
# Replace duplicate slashes with a single slash for readability
|
||||
while [[ "$normalized" == *"//"* ]]; do
|
||||
normalized="${normalized//\/\//\/}"
|
||||
done
|
||||
# Preserve absolute path prefix if original started with '/'
|
||||
if [[ "$path" == /* && "$normalized" != /* ]]; then
|
||||
normalized="/${normalized}"
|
||||
fi
|
||||
echo "$normalized"
|
||||
fi
|
||||
}
|
||||
|
||||
confirm(){
|
||||
local prompt="$1" default="$2" reply
|
||||
if [ "$ASSUME_YES" = "1" ]; then
|
||||
return 0
|
||||
fi
|
||||
while true; do
|
||||
if [ "$default" = "y" ]; then
|
||||
read -r -p "$prompt [Y/n]: " reply
|
||||
reply="${reply:-y}"
|
||||
else
|
||||
read -r -p "$prompt [y/N]: " reply
|
||||
reply="${reply:-n}"
|
||||
fi
|
||||
case "$reply" in
|
||||
[Yy]*) return 0 ;;
|
||||
[Nn]*) return 1 ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
ASSUME_YES=0
|
||||
FORCE_REBUILD=0
|
||||
TARGET_PROFILE=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--yes|-y) ASSUME_YES=1; shift;;
|
||||
--force-rebuild) FORCE_REBUILD=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
standard|playerbots) TARGET_PROFILE="$1"; shift;;
|
||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
echo "❌ Docker CLI not found in PATH."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
|
||||
if [[ "$STORAGE_PATH" != /* ]]; then
|
||||
STORAGE_PATH="$PROJECT_DIR/$STORAGE_PATH"
|
||||
fi
|
||||
STORAGE_PATH="$(canonical_path "$STORAGE_PATH")"
|
||||
MODULES_DIR="$STORAGE_PATH/modules"
|
||||
|
||||
# Build sentinel is in local storage, deployment modules are in shared storage
|
||||
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
|
||||
if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
|
||||
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
|
||||
fi
|
||||
LOCAL_STORAGE_PATH="$(canonical_path "$LOCAL_STORAGE_PATH")"
|
||||
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
|
||||
|
||||
# Define module mappings (from rebuild-with-modules.sh)
|
||||
declare -A MODULE_REPO_MAP=(
|
||||
[MODULE_AOE_LOOT]=mod-aoe-loot
|
||||
[MODULE_LEARN_SPELLS]=mod-learn-spells
|
||||
[MODULE_FIREWORKS]=mod-fireworks-on-level
|
||||
[MODULE_INDIVIDUAL_PROGRESSION]=mod-individual-progression
|
||||
[MODULE_AHBOT]=mod-ahbot
|
||||
[MODULE_AUTOBALANCE]=mod-autobalance
|
||||
[MODULE_TRANSMOG]=mod-transmog
|
||||
[MODULE_NPC_BUFFER]=mod-npc-buffer
|
||||
[MODULE_DYNAMIC_XP]=mod-dynamic-xp
|
||||
[MODULE_SOLO_LFG]=mod-solo-lfg
|
||||
[MODULE_1V1_ARENA]=mod-1v1-arena
|
||||
[MODULE_PHASED_DUELS]=mod-phased-duels
|
||||
[MODULE_BREAKING_NEWS]=mod-breaking-news-override
|
||||
[MODULE_BOSS_ANNOUNCER]=mod-boss-announcer
|
||||
[MODULE_ACCOUNT_ACHIEVEMENTS]=mod-account-achievements
|
||||
[MODULE_AUTO_REVIVE]=mod-auto-revive
|
||||
[MODULE_GAIN_HONOR_GUARD]=mod-gain-honor-guard
|
||||
[MODULE_TIME_IS_TIME]=mod-TimeIsTime
|
||||
[MODULE_POCKET_PORTAL]=mod-pocket-portal
|
||||
[MODULE_RANDOM_ENCHANTS]=mod-random-enchants
|
||||
[MODULE_SOLOCRAFT]=mod-solocraft
|
||||
[MODULE_PVP_TITLES]=mod-pvp-titles
|
||||
[MODULE_NPC_BEASTMASTER]=mod-npc-beastmaster
|
||||
[MODULE_NPC_ENCHANTER]=mod-npc-enchanter
|
||||
[MODULE_INSTANCE_RESET]=mod-instance-reset
|
||||
[MODULE_LEVEL_GRANT]=mod-quest-count-level
|
||||
[MODULE_ARAC]=mod-arac
|
||||
[MODULE_ASSISTANT]=mod-assistant
|
||||
[MODULE_REAGENT_BANK]=mod-reagent-bank
|
||||
[MODULE_CHALLENGE_MODES]=mod-challenge-modes
|
||||
[MODULE_OLLAMA_CHAT]=mod-ollama-chat
|
||||
[MODULE_PLAYER_BOT_LEVEL_BRACKETS]=mod-player-bot-level-brackets
|
||||
[MODULE_STATBOOSTER]=StatBooster
|
||||
[MODULE_DUNGEON_RESPAWN]=DungeonRespawn
|
||||
[MODULE_SKELETON_MODULE]=skeleton-module
|
||||
[MODULE_BG_SLAVERYVALLEY]=mod-bg-slaveryvalley
|
||||
[MODULE_AZEROTHSHARD]=mod-azerothshard
|
||||
[MODULE_WORGOBLIN]=mod-worgoblin
|
||||
)
|
||||
|
||||
show_staging_header
|
||||
|
||||
# Check for enabled C++ modules that require compilation
|
||||
compile_modules=()
|
||||
for key in "${!MODULE_REPO_MAP[@]}"; do
|
||||
if [ "$(read_env "$key" "0")" = "1" ]; then
|
||||
compile_modules+=("${MODULE_REPO_MAP[$key]}")
|
||||
fi
|
||||
done
|
||||
|
||||
# Check for playerbots mode
|
||||
PLAYERBOT_ENABLED="$(read_env PLAYERBOT_ENABLED "0")"
|
||||
MODULE_PLAYERBOTS="$(read_env MODULE_PLAYERBOTS "0")"
|
||||
|
||||
# Determine target profile if not specified
|
||||
if [ -z "$TARGET_PROFILE" ]; then
|
||||
show_staging_step "Profile Detection" "Analyzing enabled modules"
|
||||
if [ "$MODULE_PLAYERBOTS" = "1" ] || [ "$PLAYERBOT_ENABLED" = "1" ]; then
|
||||
TARGET_PROFILE="playerbots"
|
||||
echo "🤖 Playerbot profile enabled"
|
||||
if [ ${#compile_modules[@]} -gt 0 ]; then
|
||||
echo " ⚠️ Detected ${#compile_modules[@]} C++ modules. Ensure your playerbot images include these features."
|
||||
fi
|
||||
elif [ ${#compile_modules[@]} -gt 0 ]; then
|
||||
echo "🔧 Detected ${#compile_modules[@]} C++ modules requiring compilation:"
|
||||
for mod in "${compile_modules[@]}"; do
|
||||
echo " • $mod"
|
||||
done
|
||||
TARGET_PROFILE="modules"
|
||||
echo "🧩 Using modules profile for custom source build"
|
||||
else
|
||||
TARGET_PROFILE="standard"
|
||||
echo "✅ No special modules detected - using standard profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🎯 Target profile: services-$TARGET_PROFILE"
|
||||
|
||||
# Check if source rebuild is needed for modules profile
|
||||
REBUILD_NEEDED=0
|
||||
TARGET_WORLDSERVER_IMAGE_MODULES="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
|
||||
if [ "$TARGET_PROFILE" = "modules" ]; then
|
||||
# Check if source image exists
|
||||
if ! docker image inspect "$TARGET_WORLDSERVER_IMAGE_MODULES" >/dev/null 2>&1; then
|
||||
echo "📦 Modules image $TARGET_WORLDSERVER_IMAGE_MODULES not found - rebuild needed"
|
||||
REBUILD_NEEDED=1
|
||||
elif [ -f "$SENTINEL_FILE" ]; then
|
||||
echo "🔄 Modules changed since last build - rebuild needed"
|
||||
REBUILD_NEEDED=1
|
||||
elif [ "$FORCE_REBUILD" = "1" ]; then
|
||||
echo "🔧 Force rebuild requested"
|
||||
REBUILD_NEEDED=1
|
||||
fi
|
||||
|
||||
if [ "$REBUILD_NEEDED" = "1" ]; then
|
||||
show_staging_step "Source Rebuild" "Preparing custom build with modules"
|
||||
echo "🚀 Triggering source rebuild with modules..."
|
||||
if confirm "Proceed with source rebuild? (15-45 minutes)" n; then
|
||||
"$SCRIPT_DIR/rebuild-with-modules.sh" ${ASSUME_YES:+--yes}
|
||||
else
|
||||
echo "❌ Rebuild cancelled"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "✅ Custom worldserver image up to date"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stage the services
|
||||
show_staging_step "Service Orchestration" "Preparing realm services"
|
||||
sync_local_staging
|
||||
echo "🎬 Staging services with profile: services-$TARGET_PROFILE"
|
||||
echo "⏳ Pulling images and starting containers; this can take several minutes on first run."
|
||||
|
||||
# Stop any currently running services
|
||||
echo "🛑 Stopping current services..."
|
||||
docker compose \
|
||||
--profile services-standard \
|
||||
--profile services-playerbots \
|
||||
--profile services-modules \
|
||||
--profile tools \
|
||||
--profile client-data \
|
||||
--profile client-data-bots \
|
||||
down 2>/dev/null || true
|
||||
|
||||
# Build list of profiles to start
|
||||
PROFILE_ARGS=(--profile "services-$TARGET_PROFILE" --profile db --profile modules --profile tools)
|
||||
case "$TARGET_PROFILE" in
|
||||
standard) PROFILE_ARGS+=(--profile client-data) ;;
|
||||
playerbots) PROFILE_ARGS+=(--profile client-data-bots) ;;
|
||||
modules) PROFILE_ARGS+=(--profile client-data) ;;
|
||||
esac
|
||||
|
||||
# Start the target profile
|
||||
show_staging_step "Realm Activation" "Bringing services online"
|
||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||
|
||||
printf '\n%b\n' "${GREEN}⚔️ Realm staging completed successfully! ⚔️${NC}"
|
||||
printf '%b\n' "${GREEN}🏰 Profile: services-$TARGET_PROFILE${NC}"
|
||||
printf '%b\n' "${GREEN}🗡️ Your realm is ready for adventure!${NC}"
|
||||
|
||||
# Show status
|
||||
printf '\n'
|
||||
echo "📊 Service Status:"
|
||||
docker compose ps --format "table {{.Name}}\t{{.Status}}\t{{.Ports}}" | grep -E "(ac-worldserver|ac-authserver|ac-phpmyadmin|ac-keira3|NAME)" || true
|
||||
39
scripts/bash/start-containers.sh
Executable file
39
scripts/bash/start-containers.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Thin wrapper to bring the AzerothCore stack online without triggering rebuilds.
|
||||
# Picks the right profile automatically (standard/playerbots/modules) and delegates
|
||||
# to deploy.sh so all staging/health logic stays consistent.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
PROFILE="$(python3 - <<'PY' "$ROOT_DIR"
|
||||
import json, subprocess, sys
|
||||
from pathlib import Path
|
||||
|
||||
root = Path(sys.argv[1])
|
||||
modules_py = root / "scripts" / "modules.py"
|
||||
env_path = root / ".env"
|
||||
manifest_path = root / "config" / "module-manifest.json"
|
||||
|
||||
state = json.loads(subprocess.check_output([
|
||||
sys.executable,
|
||||
str(modules_py),
|
||||
"--env-path", str(env_path),
|
||||
"--manifest", str(manifest_path),
|
||||
"dump", "--format", "json",
|
||||
]))
|
||||
|
||||
enabled = [m for m in state["modules"] if m["enabled"]]
|
||||
profile = "standard"
|
||||
if any(m["key"] == "MODULE_PLAYERBOTS" and m["enabled"] for m in enabled):
|
||||
profile = "playerbots"
|
||||
elif any(m["needs_build"] and m["enabled"] for m in enabled):
|
||||
profile = "modules"
|
||||
|
||||
print(profile)
|
||||
PY
|
||||
)"
|
||||
|
||||
exec "${ROOT_DIR}/deploy.sh" --profile "$PROFILE" --yes --no-watch
|
||||
9
scripts/bash/stop-containers.sh
Executable file
9
scripts/bash/stop-containers.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Thin wrapper to stop all AzerothCore project containers while preserving data.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
exec "${SCRIPT_DIR}/cleanup.sh" --soft --force
|
||||
196
scripts/bash/verify-deployment.sh
Executable file
196
scripts/bash/verify-deployment.sh
Executable file
@@ -0,0 +1,196 @@
|
||||
#!/bin/bash
|
||||
# Project: azerothcore-rm
|
||||
set -e
|
||||
|
||||
# Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml
|
||||
|
||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
||||
info(){ echo -e "${BLUE}ℹ️ $*${NC}"; }
|
||||
ok(){ echo -e "${GREEN}✅ $*${NC}"; }
|
||||
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
|
||||
err(){ echo -e "${RED}❌ $*${NC}"; }
|
||||
|
||||
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
|
||||
ENV_FILE=""
|
||||
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
||||
source "$PROJECT_DIR/scripts/bash/project_name.sh"
|
||||
source "$PROJECT_DIR/scripts/bash/compose_overrides.sh"
|
||||
PROFILES=(db services-standard client-data modules tools)
|
||||
SKIP_DEPLOY=false
|
||||
QUICK=false
|
||||
|
||||
usage(){
|
||||
cat <<EOF
|
||||
Usage: $0 [--profiles p1,p2,...] [--env-file path] [--skip-deploy] [--quick]
|
||||
Default profiles: db,services-standard,client-data,modules,tools
|
||||
Examples:
|
||||
$0 --profiles db,services-standard,client-data --env-file ./services.env
|
||||
$0 --profiles db,services-playerbots,client-data-bots,modules,tools
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--profiles) IFS=',' read -r -a PROFILES <<< "$2"; shift 2;;
|
||||
--env-file) ENV_FILE="$2"; shift 2;;
|
||||
--skip-deploy) SKIP_DEPLOY=true; shift;;
|
||||
--quick) QUICK=true; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) err "Unknown arg: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
resolve_project_name(){
|
||||
local env_path
|
||||
if [ -n "$ENV_FILE" ]; then
|
||||
env_path="$ENV_FILE"
|
||||
else
|
||||
env_path="$(dirname "$COMPOSE_FILE")/.env"
|
||||
fi
|
||||
local raw_name
|
||||
raw_name="$(project_name::resolve "$env_path" "$TEMPLATE_FILE")"
|
||||
local sanitized
|
||||
sanitized="$(echo "$raw_name" | tr '[:upper:]' '[:lower:]')"
|
||||
sanitized="${sanitized// /-}"
|
||||
sanitized="$(echo "$sanitized" | tr -cd 'a-z0-9_-')"
|
||||
if [[ -z "$sanitized" ]]; then
|
||||
echo "Error: COMPOSE_PROJECT_NAME is invalid" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! "$sanitized" =~ ^[a-z0-9] ]]; then
|
||||
sanitized="ac${sanitized}"
|
||||
fi
|
||||
echo "$sanitized"
|
||||
}
|
||||
|
||||
run_compose(){
|
||||
local compose_args=()
|
||||
local project_name
|
||||
project_name="$(resolve_project_name)"
|
||||
compose_args+=(--project-name "$project_name")
|
||||
if [ -n "$ENV_FILE" ]; then
|
||||
compose_args+=(--env-file "$ENV_FILE")
|
||||
fi
|
||||
compose_args+=(-f "$COMPOSE_FILE")
|
||||
local env_path
|
||||
env_path="$(env_file_path)"
|
||||
declare -a enabled_overrides=()
|
||||
compose_overrides::list_enabled_files "$PROJECT_DIR" "$env_path" enabled_overrides
|
||||
for file in "${enabled_overrides[@]}"; do
|
||||
compose_args+=(-f "$file")
|
||||
done
|
||||
docker compose "${compose_args[@]}" "$@"
|
||||
}
|
||||
|
||||
env_file_path(){
|
||||
if [ -n "$ENV_FILE" ]; then
|
||||
echo "$ENV_FILE"
|
||||
else
|
||||
echo "$(dirname "$COMPOSE_FILE")/.env"
|
||||
fi
|
||||
}
|
||||
|
||||
read_env_value(){
|
||||
local key="$1" default="${2:-}"
|
||||
local env_path value
|
||||
env_path="$(env_file_path)"
|
||||
if [ -f "$env_path" ]; then
|
||||
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
handle_auto_rebuild(){
|
||||
local storage_path
|
||||
storage_path="$(read_env_value STORAGE_PATH_LOCAL "./local-storage")"
|
||||
if [[ "$storage_path" != /* ]]; then
|
||||
# Remove leading ./ if present
|
||||
storage_path="${storage_path#./}"
|
||||
storage_path="$(dirname "$COMPOSE_FILE")/$storage_path"
|
||||
fi
|
||||
local sentinel="$storage_path/modules/.requires_rebuild"
|
||||
[ -f "$sentinel" ] || return 0
|
||||
|
||||
info "Module rebuild required (detected $(realpath "$sentinel" 2>/dev/null || echo "$sentinel"))."
|
||||
local auto_rebuild
|
||||
auto_rebuild="$(read_env_value AUTO_REBUILD_ON_DEPLOY "0")"
|
||||
if [ "$auto_rebuild" != "1" ]; then
|
||||
warn "Run ./scripts/bash/rebuild-with-modules.sh after preparing your source tree."
|
||||
return 0
|
||||
fi
|
||||
|
||||
local rebuild_source
|
||||
rebuild_source="$(read_env_value MODULES_REBUILD_SOURCE_PATH "")"
|
||||
info "AUTO_REBUILD_ON_DEPLOY=1; invoking ./scripts/bash/rebuild-with-modules.sh."
|
||||
local cmd=(./scripts/bash/rebuild-with-modules.sh --yes)
|
||||
if [ -n "$rebuild_source" ]; then
|
||||
cmd+=(--source "$rebuild_source")
|
||||
fi
|
||||
if "${cmd[@]}"; then
|
||||
info "Module rebuild completed."
|
||||
else
|
||||
warn "Automatic rebuild failed; run ./scripts/bash/rebuild-with-modules.sh manually."
|
||||
fi
|
||||
}
|
||||
|
||||
check_health(){
|
||||
local name="$1"
|
||||
local status=$(docker inspect --format='{{.State.Health.Status}}' "$name" 2>/dev/null || echo "no-health-check")
|
||||
if [ "$status" = "healthy" ]; then ok "$name: healthy"; return 0; fi
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${name}$"; then ok "$name: running"; return 0; fi
|
||||
err "$name: not running"; return 1
|
||||
}
|
||||
|
||||
wait_log(){
|
||||
local name="$1"; local needle="$2"; local attempts="${3:-360}"; local interval=5
|
||||
info "Waiting for $name log: '$needle' ... (timeout: $((attempts*interval))s)"
|
||||
for i in $(seq 1 "$attempts"); do
|
||||
if docker logs "$name" 2>/dev/null | grep -q "$needle"; then ok "$name ready"; return 0; fi
|
||||
sleep "$interval"
|
||||
done
|
||||
warn "$name did not report '$needle'"
|
||||
return 1
|
||||
}
|
||||
|
||||
deploy(){
|
||||
info "Deploying profiles: ${PROFILES[*]}"
|
||||
local args=()
|
||||
for p in "${PROFILES[@]}"; do args+=(--profile "$p"); done
|
||||
run_compose "${args[@]}" up -d
|
||||
}
|
||||
|
||||
health_checks(){
|
||||
info "Checking container health"
|
||||
local failures=0
|
||||
check_health ac-mysql || ((failures++))
|
||||
check_health ac-authserver || ((failures++))
|
||||
check_health ac-worldserver || ((failures++))
|
||||
if [ "$QUICK" = false ]; then
|
||||
info "Port checks"
|
||||
for port in 64306 3784 8215 7778 8081 4201; do
|
||||
if timeout 3 bash -c "</dev/tcp/127.0.0.1/$port" 2>/dev/null; then ok "port $port: open"; else warn "port $port: closed"; fi
|
||||
done
|
||||
fi
|
||||
if [ $failures -eq 0 ]; then ok "All core services healthy"; else err "$failures service checks failed"; return 1; fi
|
||||
}
|
||||
|
||||
main(){
|
||||
if [ "$SKIP_DEPLOY" = false ]; then
|
||||
deploy
|
||||
# Wait for client-data completion if profile active
|
||||
if printf '%s\n' "${PROFILES[@]}" | grep -q '^client-data$\|^client-data-bots$'; then
|
||||
wait_log ac-client-data "Game data setup complete" || true
|
||||
fi
|
||||
# Give worldserver time to boot
|
||||
sleep 10
|
||||
fi
|
||||
health_checks
|
||||
handle_auto_rebuild
|
||||
info "Endpoints: MySQL:64306, Auth:3784, World:8215, SOAP:7778, phpMyAdmin:8081, Keira3:4201"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user