mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 17:09:09 +00:00
This commit introduces major enhancements to the module installation system, database management, and configuration handling for AzerothCore deployments. ## Module System Improvements ### Module SQL Staging & Installation - Refactor module SQL staging to properly handle AzerothCore's sql/ directory structure - Fix SQL staging path to use correct AzerothCore format (sql/custom/db_*/*) - Implement conditional module database importing based on enabled modules - Add support for both cpp-modules and lua-scripts module types - Handle rsync exit code 23 (permission warnings) gracefully during deployment ### Module Manifest & Automation - Add automated module manifest generation via GitHub Actions workflow - Implement Python-based module manifest updater with comprehensive validation - Add module dependency tracking and SQL file discovery - Support for blocked modules and module metadata management ## Database Management Enhancements ### Database Import System - Add db-guard container for continuous database health monitoring and verification - Implement conditional database import that skips when databases are current - Add backup restoration and SQL staging coordination - Support for Playerbots database (4th database) in all import operations - Add comprehensive database health checking and status reporting ### Database Configuration - Implement 10 new dbimport.conf settings from environment variables: - Database.Reconnect.Seconds/Attempts for connection reliability - Updates.AllowedModules for module auto-update control - Updates.Redundancy for data integrity checks - Worker/Synch thread settings for all three core databases - Auto-apply dbimport.conf settings via auto-post-install.sh - Add environment variable injection for db-import and db-guard containers ### Backup & Recovery - Fix backup scheduler to prevent immediate execution on container startup - Add backup status monitoring script with detailed reporting - Implement backup import/export utilities - Add database verification scripts for SQL update tracking ## User Import Directory - Add new import/ directory for user-provided database files and configurations - Support for custom SQL files, configuration overrides, and example templates - Automatic import of user-provided databases and configs during initialization - Documentation and examples for custom database imports ## Configuration & Environment - Eliminate CLIENT_DATA_VERSION warning by adding default value syntax - Improve CLIENT_DATA_VERSION documentation in .env.template - Add comprehensive database import settings to .env and .env.template - Update setup.sh to handle new configuration variables with proper defaults ## Monitoring & Debugging - Add status dashboard with Go-based terminal UI (statusdash.go) - Implement JSON status output (statusjson.sh) for programmatic access - Add comprehensive database health check script - Add repair-storage-permissions.sh utility for permission issues ## Testing & Documentation - Add Phase 1 integration test suite for module installation verification - Add comprehensive documentation for: - Database management (DATABASE_MANAGEMENT.md) - Module SQL analysis (AZEROTHCORE_MODULE_SQL_ANALYSIS.md) - Implementation mapping (IMPLEMENTATION_MAP.md) - SQL staging comparison and path coverage - Module assets and DBC file requirements - Update SCRIPTS.md, ADVANCED.md, and troubleshooting documentation - Update references from database-import/ to import/ directory ## Breaking Changes - Renamed database-import/ directory to import/ for clarity - Module SQL files now staged to AzerothCore-compatible paths - db-guard container now required for proper database lifecycle management ## Bug Fixes - Fix module SQL staging directory structure for AzerothCore compatibility - Handle rsync exit code 23 gracefully during deployments - Prevent backup from running immediately on container startup - Correct SQL staging paths for proper module installation
448 lines
16 KiB
Bash
Executable File
448 lines
16 KiB
Bash
Executable File
#!/bin/bash
|
||
# azerothcore-rm
|
||
set -e
|
||
|
||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
|
||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||
|
||
print_help() {
|
||
cat <<'EOF'
|
||
Usage: db-import-conditional.sh [options]
|
||
|
||
Description:
|
||
Conditionally restores AzerothCore databases from backups if available;
|
||
otherwise creates fresh databases and runs the dbimport tool to populate
|
||
schemas. Uses status markers to prevent overwriting restored data.
|
||
|
||
Options:
|
||
-h, --help Show this help message and exit
|
||
|
||
Environment variables:
|
||
CONTAINER_MYSQL Hostname of the MySQL container (default: ac-mysql)
|
||
MYSQL_PORT MySQL port (default: 3306)
|
||
MYSQL_USER MySQL user (default: root)
|
||
MYSQL_ROOT_PASSWORD MySQL password for the user above
|
||
DB_AUTH_NAME Auth DB name (default: acore_auth)
|
||
DB_WORLD_NAME World DB name (default: acore_world)
|
||
DB_CHARACTERS_NAME Characters DB name (default: acore_characters)
|
||
BACKUP DIRS Uses /backups/{daily,timestamped} if present
|
||
STATUS MARKERS Uses /var/lib/mysql-persistent/.restore-*
|
||
|
||
Notes:
|
||
- If a valid backup is detected and successfully restored, schema import is skipped.
|
||
- On fresh setups, the script creates databases and runs dbimport.
|
||
EOF
|
||
}
|
||
|
||
verify_databases_populated() {
|
||
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
|
||
local mysql_port="${MYSQL_PORT:-3306}"
|
||
local mysql_user="${MYSQL_USER:-root}"
|
||
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||
local db_auth="${DB_AUTH_NAME:-acore_auth}"
|
||
local db_world="${DB_WORLD_NAME:-acore_world}"
|
||
local db_characters="${DB_CHARACTERS_NAME:-acore_characters}"
|
||
|
||
if ! command -v mysql >/dev/null 2>&1; then
|
||
echo "⚠️ mysql client is not available to verify restoration status"
|
||
return 1
|
||
fi
|
||
|
||
local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema IN ('$db_auth','$db_world','$db_characters');"
|
||
local table_count
|
||
if ! table_count=$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "$query" 2>/dev/null); then
|
||
echo "⚠️ Unable to query MySQL at ${mysql_host}:${mysql_port} to verify restoration status"
|
||
return 1
|
||
fi
|
||
|
||
if [ "${table_count:-0}" -gt 0 ]; then
|
||
return 0
|
||
fi
|
||
|
||
echo "⚠️ MySQL is reachable but no AzerothCore tables were found"
|
||
return 1
|
||
}
|
||
|
||
wait_for_mysql(){
|
||
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
|
||
local mysql_port="${MYSQL_PORT:-3306}"
|
||
local mysql_user="${MYSQL_USER:-root}"
|
||
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||
local max_attempts=30
|
||
local delay=2
|
||
while [ $max_attempts -gt 0 ]; do
|
||
if MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -e "SELECT 1" >/dev/null 2>&1; then
|
||
return 0
|
||
fi
|
||
max_attempts=$((max_attempts - 1))
|
||
sleep "$delay"
|
||
done
|
||
echo "❌ Unable to connect to MySQL at ${mysql_host}:${mysql_port} after multiple attempts"
|
||
return 1
|
||
}
|
||
|
||
ensure_dbimport_conf(){
|
||
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
||
local dist="${conf}.dist"
|
||
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
|
||
cp "$dist" "$conf"
|
||
fi
|
||
mkdir -p /azerothcore/env/dist/temp
|
||
}
|
||
|
||
case "${1:-}" in
|
||
-h|--help)
|
||
print_help
|
||
exit 0
|
||
;;
|
||
"") ;;
|
||
*)
|
||
echo "Unknown option: $1" >&2
|
||
print_help
|
||
exit 1
|
||
;;
|
||
esac
|
||
|
||
echo "🔧 Conditional AzerothCore Database Import"
|
||
echo "========================================"
|
||
|
||
if ! wait_for_mysql; then
|
||
echo "❌ MySQL service is unavailable; aborting database import"
|
||
exit 1
|
||
fi
|
||
|
||
# Restoration status markers - use writable location
|
||
RESTORE_STATUS_DIR="/var/lib/mysql-persistent"
|
||
MARKER_STATUS_DIR="/tmp"
|
||
RESTORE_SUCCESS_MARKER="$RESTORE_STATUS_DIR/.restore-completed"
|
||
RESTORE_FAILED_MARKER="$RESTORE_STATUS_DIR/.restore-failed"
|
||
RESTORE_SUCCESS_MARKER_TMP="$MARKER_STATUS_DIR/.restore-completed"
|
||
RESTORE_FAILED_MARKER_TMP="$MARKER_STATUS_DIR/.restore-failed"
|
||
|
||
mkdir -p "$RESTORE_STATUS_DIR" 2>/dev/null || true
|
||
if ! touch "$RESTORE_STATUS_DIR/.test-write" 2>/dev/null; then
|
||
echo "⚠️ Cannot write to $RESTORE_STATUS_DIR, using $MARKER_STATUS_DIR for markers"
|
||
RESTORE_SUCCESS_MARKER="$RESTORE_SUCCESS_MARKER_TMP"
|
||
RESTORE_FAILED_MARKER="$RESTORE_FAILED_MARKER_TMP"
|
||
else
|
||
rm -f "$RESTORE_STATUS_DIR/.test-write" 2>/dev/null || true
|
||
fi
|
||
|
||
echo "🔍 Checking restoration status..."
|
||
|
||
if [ -f "$RESTORE_SUCCESS_MARKER" ]; then
|
||
if verify_databases_populated; then
|
||
echo "✅ Backup restoration completed successfully"
|
||
cat "$RESTORE_SUCCESS_MARKER" || true
|
||
echo "🚫 Skipping database import - data already restored from backup"
|
||
exit 0
|
||
fi
|
||
|
||
echo "⚠️ Restoration marker found, but databases are empty - forcing re-import"
|
||
rm -f "$RESTORE_SUCCESS_MARKER" 2>/dev/null || true
|
||
rm -f "$RESTORE_SUCCESS_MARKER_TMP" 2>/dev/null || true
|
||
rm -f "$RESTORE_FAILED_MARKER" 2>/dev/null || true
|
||
fi
|
||
|
||
if [ -f "$RESTORE_FAILED_MARKER" ]; then
|
||
echo "ℹ️ No backup was restored - fresh databases detected"
|
||
cat "$RESTORE_FAILED_MARKER" || true
|
||
echo "▶️ Proceeding with database import to populate fresh databases"
|
||
else
|
||
echo "⚠️ No restoration status found - assuming fresh installation"
|
||
echo "▶️ Proceeding with database import"
|
||
fi
|
||
|
||
echo ""
|
||
echo "🔧 Starting database import process..."
|
||
|
||
echo "🔍 Checking for backups to restore..."
|
||
|
||
# Define backup search paths in priority order
|
||
BACKUP_SEARCH_PATHS=(
|
||
"/backups"
|
||
"/var/lib/mysql-persistent"
|
||
"$PROJECT_ROOT/storage/backups"
|
||
"$PROJECT_ROOT/manual-backups"
|
||
)
|
||
|
||
backup_path=""
|
||
|
||
echo "🔍 Checking for legacy backup file..."
|
||
if [ -f "/var/lib/mysql-persistent/backup.sql" ]; then
|
||
echo "📄 Found legacy backup file, validating content..."
|
||
if timeout 10 head -10 "/var/lib/mysql-persistent/backup.sql" 2>/dev/null | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||
echo "✅ Legacy backup file validated"
|
||
backup_path="/var/lib/mysql-persistent/backup.sql"
|
||
else
|
||
echo "⚠️ Legacy backup file exists but appears invalid or empty"
|
||
fi
|
||
else
|
||
echo "🔍 No legacy backup found"
|
||
fi
|
||
|
||
# Search through backup directories
|
||
if [ -z "$backup_path" ]; then
|
||
for BACKUP_DIRS in "${BACKUP_SEARCH_PATHS[@]}"; do
|
||
if [ ! -d "$BACKUP_DIRS" ]; then
|
||
continue
|
||
fi
|
||
|
||
echo "📁 Checking backup directory: $BACKUP_DIRS"
|
||
if [ -n "$(ls -A "$BACKUP_DIRS" 2>/dev/null)" ]; then
|
||
# Check for daily backups first
|
||
if [ -d "$BACKUP_DIRS/daily" ]; then
|
||
echo "🔍 Checking for daily backups..."
|
||
latest_daily=$(ls -1t "$BACKUP_DIRS/daily" 2>/dev/null | head -n 1)
|
||
if [ -n "$latest_daily" ] && [ -d "$BACKUP_DIRS/daily/$latest_daily" ]; then
|
||
echo "📦 Latest daily backup found: $latest_daily"
|
||
for backup_file in "$BACKUP_DIRS/daily/$latest_daily"/*.sql.gz; do
|
||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||
if timeout 10 zcat "$backup_file" 2>/dev/null | head -20 | grep -q "CREATE DATABASE\|INSERT INTO\|CREATE TABLE"; then
|
||
echo "✅ Valid daily backup file: $(basename "$backup_file")"
|
||
backup_path="$BACKUP_DIRS/daily/$latest_daily"
|
||
break 2
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
fi
|
||
|
||
# Check for hourly backups
|
||
if [ -z "$backup_path" ] && [ -d "$BACKUP_DIRS/hourly" ]; then
|
||
echo "🔍 Checking for hourly backups..."
|
||
latest_hourly=$(ls -1t "$BACKUP_DIRS/hourly" 2>/dev/null | head -n 1)
|
||
if [ -n "$latest_hourly" ] && [ -d "$BACKUP_DIRS/hourly/$latest_hourly" ]; then
|
||
echo "📦 Latest hourly backup found: $latest_hourly"
|
||
for backup_file in "$BACKUP_DIRS/hourly/$latest_hourly"/*.sql.gz; do
|
||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||
if timeout 10 zcat "$backup_file" >/dev/null 2>&1; then
|
||
echo "✅ Valid hourly backup file: $(basename "$backup_file")"
|
||
backup_path="$BACKUP_DIRS/hourly/$latest_hourly"
|
||
break 2
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
fi
|
||
|
||
# Check for timestamped backup directories (like ExportBackup_YYYYMMDD_HHMMSS)
|
||
if [ -z "$backup_path" ]; then
|
||
echo "🔍 Checking for timestamped backup directories..."
|
||
timestamped_backups=$(ls -1t "$BACKUP_DIRS" 2>/dev/null | grep -E '^(ExportBackup_)?[0-9]{8}_[0-9]{6}$' | head -n 1)
|
||
if [ -n "$timestamped_backups" ]; then
|
||
latest_timestamped="$timestamped_backups"
|
||
echo "📦 Found timestamped backup: $latest_timestamped"
|
||
if [ -d "$BACKUP_DIRS/$latest_timestamped" ]; then
|
||
if ls "$BACKUP_DIRS/$latest_timestamped"/*.sql.gz >/dev/null 2>&1; then
|
||
echo "🔍 Validating timestamped backup content..."
|
||
for backup_file in "$BACKUP_DIRS/$latest_timestamped"/*.sql.gz; do
|
||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||
if timeout 10 zcat "$backup_file" >/dev/null 2>&1; then
|
||
echo "✅ Valid timestamped backup found: $(basename "$backup_file")"
|
||
backup_path="$BACKUP_DIRS/$latest_timestamped"
|
||
break 2
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
# Check for manual backups (*.sql files)
|
||
if [ -z "$backup_path" ]; then
|
||
echo "🔍 Checking for manual backup files..."
|
||
latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql 2>/dev/null | head -n 1)
|
||
if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then
|
||
echo "📦 Found manual backup: $(basename "$latest_manual")"
|
||
if timeout 10 head -20 "$latest_manual" >/dev/null 2>&1; then
|
||
echo "✅ Valid manual backup file: $(basename "$latest_manual")"
|
||
backup_path="$latest_manual"
|
||
break
|
||
fi
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
# If we found a backup in this directory, stop searching
|
||
if [ -n "$backup_path" ]; then
|
||
break
|
||
fi
|
||
done
|
||
fi
|
||
|
||
echo "🔄 Final backup path result: '$backup_path'"
|
||
if [ -n "$backup_path" ]; then
|
||
echo "📦 Found backup: $(basename "$backup_path")"
|
||
|
||
restore_backup() {
|
||
local backup_path="$1"
|
||
local restore_success=true
|
||
|
||
if [ -d "$backup_path" ]; then
|
||
echo "🔄 Restoring from backup directory: $backup_path"
|
||
|
||
# Check for manifest file to understand backup structure
|
||
if [ -f "$backup_path/manifest.json" ]; then
|
||
echo "📋 Found manifest file, checking backup contents..."
|
||
cat "$backup_path/manifest.json"
|
||
fi
|
||
|
||
# Restore compressed SQL files
|
||
if ls "$backup_path"/*.sql.gz >/dev/null 2>&1; then
|
||
for backup_file in "$backup_path"/*.sql.gz; do
|
||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||
echo "🔄 Restoring $(basename "$backup_file")..."
|
||
if timeout 300 zcat "$backup_file" | mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD}; then
|
||
echo "✅ Restored $(basename "$backup_file")"
|
||
else
|
||
echo "❌ Failed to restore $(basename "$backup_file")"
|
||
restore_success=false
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
|
||
# Also check for uncompressed SQL files
|
||
if ls "$backup_path"/*.sql >/dev/null 2>&1; then
|
||
for backup_file in "$backup_path"/*.sql; do
|
||
if [ -f "$backup_file" ] && [ -s "$backup_file" ]; then
|
||
echo "🔄 Restoring $(basename "$backup_file")..."
|
||
if timeout 300 mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} < "$backup_file"; then
|
||
echo "✅ Restored $(basename "$backup_file")"
|
||
else
|
||
echo "❌ Failed to restore $(basename "$backup_file")"
|
||
restore_success=false
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
|
||
elif [ -f "$backup_path" ]; then
|
||
echo "🔄 Restoring from backup file: $backup_path"
|
||
case "$backup_path" in
|
||
*.gz)
|
||
if timeout 300 zcat "$backup_path" | mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD}; then
|
||
echo "✅ Restored compressed backup"
|
||
else
|
||
echo "❌ Failed to restore compressed backup"
|
||
restore_success=false
|
||
fi
|
||
;;
|
||
*.sql)
|
||
if timeout 300 mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} < "$backup_path"; then
|
||
echo "✅ Restored SQL backup"
|
||
else
|
||
echo "❌ Failed to restore SQL backup"
|
||
restore_success=false
|
||
fi
|
||
;;
|
||
*)
|
||
echo "⚠️ Unknown backup file format: $backup_path"
|
||
restore_success=false
|
||
;;
|
||
esac
|
||
fi
|
||
|
||
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
||
}
|
||
|
||
verify_and_update_restored_databases() {
|
||
echo "🔍 Verifying restored database integrity..."
|
||
|
||
# Check if dbimport is available
|
||
if [ ! -f "/azerothcore/env/dist/bin/dbimport" ]; then
|
||
echo "⚠️ dbimport not available, skipping verification"
|
||
return 0
|
||
fi
|
||
|
||
ensure_dbimport_conf
|
||
|
||
cd /azerothcore/env/dist/bin
|
||
echo "🔄 Running dbimport to apply any missing updates..."
|
||
if ./dbimport; then
|
||
echo "✅ Database verification complete - all updates current"
|
||
else
|
||
echo "⚠️ dbimport reported issues - check logs"
|
||
return 1
|
||
fi
|
||
|
||
# Verify critical tables exist
|
||
echo "🔍 Checking critical tables..."
|
||
local critical_tables=("account" "characters" "creature" "quest_template")
|
||
local missing_tables=0
|
||
|
||
for table in "${critical_tables[@]}"; do
|
||
local db_name="$DB_WORLD_NAME"
|
||
case "$table" in
|
||
account) db_name="$DB_AUTH_NAME" ;;
|
||
characters) db_name="$DB_CHARACTERS_NAME" ;;
|
||
esac
|
||
|
||
if ! mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} \
|
||
-e "SELECT 1 FROM ${db_name}.${table} LIMIT 1" >/dev/null 2>&1; then
|
||
echo "⚠️ Critical table missing: ${db_name}.${table}"
|
||
missing_tables=$((missing_tables + 1))
|
||
fi
|
||
done
|
||
|
||
if [ "$missing_tables" -gt 0 ]; then
|
||
echo "⚠️ ${missing_tables} critical tables missing after restore"
|
||
return 1
|
||
fi
|
||
|
||
echo "✅ All critical tables verified"
|
||
return 0
|
||
}
|
||
|
||
if restore_backup "$backup_path"; then
|
||
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
||
echo "🎉 Backup restoration completed successfully!"
|
||
|
||
# Verify and apply missing updates
|
||
verify_and_update_restored_databases
|
||
|
||
if [ -x "/tmp/restore-and-stage.sh" ]; then
|
||
echo "🔧 Running restore-time module SQL staging..."
|
||
MODULES_DIR="/modules" \
|
||
RESTORE_SOURCE_DIR="$backup_path" \
|
||
/tmp/restore-and-stage.sh
|
||
else
|
||
echo "ℹ️ restore-and-stage helper not available; skipping automatic module SQL staging"
|
||
fi
|
||
|
||
exit 0
|
||
else
|
||
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
||
echo "⚠️ Backup restoration failed, will proceed with fresh database setup"
|
||
fi
|
||
else
|
||
echo "ℹ️ No valid backups found - proceeding with fresh setup"
|
||
echo "$(date): No backup found - fresh setup needed" > "$RESTORE_FAILED_MARKER"
|
||
fi
|
||
|
||
echo "🗄️ Creating fresh AzerothCore databases..."
|
||
mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e "
|
||
CREATE DATABASE IF NOT EXISTS ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||
CREATE DATABASE IF NOT EXISTS ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||
CREATE DATABASE IF NOT EXISTS ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||
CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
|
||
echo "✅ Fresh databases created - proceeding with schema import"
|
||
|
||
ensure_dbimport_conf
|
||
|
||
echo "🚀 Running database import..."
|
||
cd /azerothcore/env/dist/bin
|
||
if ./dbimport; then
|
||
echo "✅ Database import completed successfully!"
|
||
echo "$(date): Database import completed successfully" > "$RESTORE_STATUS_DIR/.import-completed" || echo "$(date): Database import completed successfully" > "$MARKER_STATUS_DIR/.import-completed"
|
||
else
|
||
echo "❌ Database import failed!"
|
||
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed" || echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed"
|
||
exit 1
|
||
fi
|
||
|
||
echo "🎉 Database import process complete!"
|