mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-02-03 19:03:50 +00:00
refactor module db importing
This commit is contained in:
421
scripts/bash/backup-status.sh
Executable file
421
scripts/bash/backup-status.sh
Executable file
@@ -0,0 +1,421 @@
|
||||
#!/bin/bash
|
||||
# Backup Status Dashboard
|
||||
# Displays comprehensive backup system status and statistics
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_BACKUP="📦"
|
||||
ICON_TIME="🕐"
|
||||
ICON_SIZE="💾"
|
||||
ICON_CHART="📊"
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_SCHEDULE="📅"
|
||||
|
||||
# Default values
|
||||
SHOW_DETAILS=0
|
||||
SHOW_TRENDS=0
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./backup-status.sh [options]
|
||||
|
||||
Display backup system status and statistics.
|
||||
|
||||
Options:
|
||||
-d, --details Show detailed backup listing
|
||||
-t, --trends Show size trends over time
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./backup-status.sh
|
||||
./backup-status.sh --details
|
||||
./backup-status.sh --details --trends
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-d|--details) SHOW_DETAILS=1; shift;;
|
||||
-t|--trends) SHOW_TRENDS=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
BACKUP_PATH="${BACKUP_PATH:-$PROJECT_ROOT/storage/backups}"
|
||||
BACKUP_INTERVAL_MINUTES="${BACKUP_INTERVAL_MINUTES:-60}"
|
||||
BACKUP_RETENTION_HOURS="${BACKUP_RETENTION_HOURS:-6}"
|
||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-3}"
|
||||
BACKUP_DAILY_TIME="${BACKUP_DAILY_TIME:-09}"
|
||||
|
||||
# Format bytes to human readable
|
||||
format_bytes() {
|
||||
local bytes=$1
|
||||
if [ "$bytes" -lt 1024 ]; then
|
||||
echo "${bytes}B"
|
||||
elif [ "$bytes" -lt 1048576 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||
elif [ "$bytes" -lt 1073741824 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||
else
|
||||
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get directory size
|
||||
get_dir_size() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
du -sb "$dir" 2>/dev/null | cut -f1
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Count backups in directory
|
||||
count_backups() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
find "$dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get latest backup timestamp
|
||||
get_latest_backup() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
ls -1t "$dir" 2>/dev/null | head -n1 || echo ""
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse timestamp from backup directory name
|
||||
parse_timestamp() {
|
||||
local backup_name="$1"
|
||||
# Format: YYYYMMDD_HHMMSS or ExportBackup_YYYYMMDD_HHMMSS
|
||||
local timestamp
|
||||
if [[ "$backup_name" =~ ([0-9]{8})_([0-9]{6}) ]]; then
|
||||
timestamp="${BASH_REMATCH[1]}_${BASH_REMATCH[2]}"
|
||||
echo "$timestamp"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate time ago from timestamp
|
||||
time_ago() {
|
||||
local timestamp="$1"
|
||||
if [ -z "$timestamp" ]; then
|
||||
echo "Unknown"
|
||||
return
|
||||
fi
|
||||
|
||||
# Parse timestamp: YYYYMMDD_HHMMSS
|
||||
local year="${timestamp:0:4}"
|
||||
local month="${timestamp:4:2}"
|
||||
local day="${timestamp:6:2}"
|
||||
local hour="${timestamp:9:2}"
|
||||
local minute="${timestamp:11:2}"
|
||||
local second="${timestamp:13:2}"
|
||||
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "$year-$month-$day $hour:$minute:$second" +%s 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$backup_epoch" = "0" ]; then
|
||||
echo "Unknown"
|
||||
return
|
||||
fi
|
||||
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
|
||||
if [ "$diff" -lt 60 ]; then
|
||||
echo "${diff} seconds ago"
|
||||
elif [ "$diff" -lt 3600 ]; then
|
||||
local minutes=$((diff / 60))
|
||||
echo "${minutes} minute(s) ago"
|
||||
elif [ "$diff" -lt 86400 ]; then
|
||||
local hours=$((diff / 3600))
|
||||
echo "${hours} hour(s) ago"
|
||||
else
|
||||
local days=$((diff / 86400))
|
||||
echo "${days} day(s) ago"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate next scheduled backup
|
||||
next_backup_time() {
|
||||
local interval_minutes="$1"
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
|
||||
local next_epoch=$((now_epoch + (interval_minutes * 60)))
|
||||
local in_minutes=$(((next_epoch - now_epoch) / 60))
|
||||
|
||||
if [ "$in_minutes" -lt 60 ]; then
|
||||
echo "in ${in_minutes} minute(s)"
|
||||
else
|
||||
local in_hours=$((in_minutes / 60))
|
||||
local remaining_minutes=$((in_minutes % 60))
|
||||
echo "in ${in_hours} hour(s) ${remaining_minutes} minute(s)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate next daily backup
|
||||
next_daily_backup() {
|
||||
local daily_hour="$1"
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
|
||||
local today_backup_epoch
|
||||
today_backup_epoch=$(date -d "today ${daily_hour}:00:00" +%s)
|
||||
|
||||
local next_epoch
|
||||
if [ "$now_epoch" -lt "$today_backup_epoch" ]; then
|
||||
next_epoch=$today_backup_epoch
|
||||
else
|
||||
next_epoch=$(date -d "tomorrow ${daily_hour}:00:00" +%s)
|
||||
fi
|
||||
|
||||
local diff=$((next_epoch - now_epoch))
|
||||
local hours=$((diff / 3600))
|
||||
local minutes=$(((diff % 3600) / 60))
|
||||
|
||||
echo "in ${hours} hour(s) ${minutes} minute(s)"
|
||||
}
|
||||
|
||||
# Show backup tier status
|
||||
show_backup_tier() {
|
||||
local tier_name="$1"
|
||||
local tier_dir="$2"
|
||||
local retention="$3"
|
||||
|
||||
if [ ! -d "$tier_dir" ]; then
|
||||
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||
return
|
||||
fi
|
||||
|
||||
local count size latest
|
||||
count=$(count_backups "$tier_dir")
|
||||
size=$(get_dir_size "$tier_dir")
|
||||
latest=$(get_latest_backup "$tier_dir")
|
||||
|
||||
if [ "$count" = "0" ]; then
|
||||
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||
return
|
||||
fi
|
||||
|
||||
local latest_timestamp
|
||||
latest_timestamp=$(parse_timestamp "$latest")
|
||||
local ago
|
||||
ago=$(time_ago "$latest_timestamp")
|
||||
|
||||
printf " ${GREEN}${ICON_SUCCESS} %s:${NC} %s backup(s), %s total\n" "$tier_name" "$count" "$(format_bytes "$size")"
|
||||
printf " ${ICON_TIME} Latest: %s (%s)\n" "$latest" "$ago"
|
||||
printf " ${ICON_SCHEDULE} Retention: %s\n" "$retention"
|
||||
|
||||
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||
printf " ${ICON_BACKUP} Available backups:\n"
|
||||
local backup_list
|
||||
backup_list=$(ls -1t "$tier_dir" 2>/dev/null || true)
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local backup_size
|
||||
backup_size=$(get_dir_size "$tier_dir/$backup")
|
||||
local backup_timestamp
|
||||
backup_timestamp=$(parse_timestamp "$backup")
|
||||
local backup_ago
|
||||
backup_ago=$(time_ago "$backup_timestamp")
|
||||
printf " - %s: %s (%s)\n" "$backup" "$(format_bytes "$backup_size")" "$backup_ago"
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
fi
|
||||
}
|
||||
|
||||
# Show size trends
|
||||
show_trends() {
|
||||
printf "${BOLD}${ICON_CHART} Backup Size Trends${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local daily_dir="$BACKUP_PATH/daily"
|
||||
if [ ! -d "$daily_dir" ]; then
|
||||
printf " ${ICON_WARNING} No daily backups found for trend analysis\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Get last 7 daily backups
|
||||
local backup_list
|
||||
backup_list=$(ls -1t "$daily_dir" 2>/dev/null | head -7 | tac)
|
||||
|
||||
if [ -z "$backup_list" ]; then
|
||||
printf " ${ICON_WARNING} Not enough backups for trend analysis\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Find max size for scaling
|
||||
local max_size=0
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$daily_dir/$backup")
|
||||
if [ "$size" -gt "$max_size" ]; then
|
||||
max_size=$size
|
||||
fi
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
|
||||
# Display trend chart
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$daily_dir/$backup")
|
||||
local timestamp
|
||||
timestamp=$(parse_timestamp "$backup")
|
||||
local date_str="${timestamp:0:4}-${timestamp:4:2}-${timestamp:6:2}"
|
||||
|
||||
# Calculate bar length (max 30 chars)
|
||||
local bar_length=0
|
||||
if [ "$max_size" -gt 0 ]; then
|
||||
bar_length=$((size * 30 / max_size))
|
||||
fi
|
||||
|
||||
# Create bar
|
||||
local bar=""
|
||||
for ((i=0; i<bar_length; i++)); do
|
||||
bar+="█"
|
||||
done
|
||||
for ((i=bar_length; i<30; i++)); do
|
||||
bar+="░"
|
||||
done
|
||||
|
||||
printf " %s: %s %s\n" "$date_str" "$(format_bytes "$size" | awk '{printf "%-8s", $0}')" "$bar"
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main status display
|
||||
main() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}${ICON_BACKUP} AZEROTHCORE BACKUP STATUS${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Check if backup directory exists
|
||||
if [ ! -d "$BACKUP_PATH" ]; then
|
||||
printf "${RED}${ICON_WARNING} Backup directory not found: %s${NC}\n\n" "$BACKUP_PATH"
|
||||
printf "Backup system may not be initialized yet.\n\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show current backup tiers
|
||||
printf "${BOLD}${ICON_BACKUP} Backup Tiers${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
show_backup_tier "Hourly Backups" "$BACKUP_PATH/hourly" "${BACKUP_RETENTION_HOURS} hours"
|
||||
show_backup_tier "Daily Backups" "$BACKUP_PATH/daily" "${BACKUP_RETENTION_DAYS} days"
|
||||
|
||||
# Check for manual backups
|
||||
local manual_count=0
|
||||
local manual_size=0
|
||||
if [ -d "$PROJECT_ROOT/manual-backups" ]; then
|
||||
manual_count=$(count_backups "$PROJECT_ROOT/manual-backups")
|
||||
manual_size=$(get_dir_size "$PROJECT_ROOT/manual-backups")
|
||||
fi
|
||||
|
||||
# Also check for export backups in main backup dir
|
||||
local export_count=0
|
||||
if [ -d "$BACKUP_PATH" ]; then
|
||||
export_count=$(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null | wc -l)
|
||||
if [ "$export_count" -gt 0 ]; then
|
||||
local export_size=0
|
||||
while IFS= read -r export_dir; do
|
||||
if [ -n "$export_dir" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$export_dir")
|
||||
export_size=$((export_size + size))
|
||||
fi
|
||||
done < <(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null)
|
||||
manual_size=$((manual_size + export_size))
|
||||
manual_count=$((manual_count + export_count))
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$manual_count" -gt 0 ]; then
|
||||
printf " ${GREEN}${ICON_SUCCESS} Manual/Export Backups:${NC} %s backup(s), %s total\n" "$manual_count" "$(format_bytes "$manual_size")"
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
# Show next scheduled backups
|
||||
printf "${BOLD}${ICON_SCHEDULE} Backup Schedule${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf " ${ICON_TIME} Hourly interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||
printf " ${ICON_TIME} Next hourly backup: %s\n" "$(next_backup_time "$BACKUP_INTERVAL_MINUTES")"
|
||||
printf " ${ICON_TIME} Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||
printf " ${ICON_TIME} Next daily backup: %s\n" "$(next_daily_backup "$BACKUP_DAILY_TIME")"
|
||||
echo
|
||||
|
||||
# Calculate total storage
|
||||
local total_size=0
|
||||
for tier_dir in "$BACKUP_PATH/hourly" "$BACKUP_PATH/daily"; do
|
||||
if [ -d "$tier_dir" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$tier_dir")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
done
|
||||
total_size=$((total_size + manual_size))
|
||||
|
||||
printf "${BOLD}${ICON_SIZE} Total Backup Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||
echo
|
||||
|
||||
# Show trends if requested
|
||||
if [ "$SHOW_TRENDS" = "1" ]; then
|
||||
show_trends
|
||||
fi
|
||||
|
||||
# Show backup configuration
|
||||
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||
printf "${BOLD}⚙️ Backup Configuration${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf " Backup directory: %s\n" "$BACKUP_PATH"
|
||||
printf " Hourly retention: %s hours\n" "$BACKUP_RETENTION_HOURS"
|
||||
printf " Daily retention: %s days\n" "$BACKUP_RETENTION_DAYS"
|
||||
printf " Interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||
printf " Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||
echo
|
||||
fi
|
||||
|
||||
printf "${GREEN}${ICON_SUCCESS} Backup status check complete!${NC}\n"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
389
scripts/bash/db-health-check.sh
Executable file
389
scripts/bash/db-health-check.sh
Executable file
@@ -0,0 +1,389 @@
|
||||
#!/bin/bash
|
||||
# Database Health Check Script
|
||||
# Provides comprehensive health status of AzerothCore databases
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
ICON_DB="🗄️"
|
||||
ICON_SIZE="💾"
|
||||
ICON_TIME="🕐"
|
||||
ICON_MODULE="📦"
|
||||
ICON_UPDATE="🔄"
|
||||
|
||||
# Default values
|
||||
VERBOSE=0
|
||||
SHOW_PENDING=0
|
||||
SHOW_MODULES=1
|
||||
CONTAINER_NAME="ac-mysql"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./db-health-check.sh [options]
|
||||
|
||||
Check the health status of AzerothCore databases.
|
||||
|
||||
Options:
|
||||
-v, --verbose Show detailed information
|
||||
-p, --pending Show pending updates
|
||||
-m, --no-modules Hide module update information
|
||||
-c, --container NAME MySQL container name (default: ac-mysql)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./db-health-check.sh
|
||||
./db-health-check.sh --verbose --pending
|
||||
./db-health-check.sh --container ac-mysql-custom
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-v|--verbose) VERBOSE=1; shift;;
|
||||
-p|--pending) SHOW_PENDING=1; shift;;
|
||||
-m|--no-modules) SHOW_MODULES=0; shift;;
|
||||
-c|--container) CONTAINER_NAME="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||
MYSQL_USER="${MYSQL_USER:-root}"
|
||||
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
|
||||
# MySQL query helper
|
||||
mysql_query() {
|
||||
local database="${1:-}"
|
||||
local query="$2"
|
||||
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
echo "Error: MYSQL_ROOT_PASSWORD not set" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
if [ -n "$database" ]; then
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
else
|
||||
if [ -n "$database" ]; then
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Format bytes to human readable
|
||||
format_bytes() {
|
||||
local bytes=$1
|
||||
if [ "$bytes" -lt 1024 ]; then
|
||||
echo "${bytes}B"
|
||||
elif [ "$bytes" -lt 1048576 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||
elif [ "$bytes" -lt 1073741824 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||
else
|
||||
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if database exists
|
||||
db_exists() {
|
||||
local db_name="$1"
|
||||
local count
|
||||
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||
[ "$count" = "1" ]
|
||||
}
|
||||
|
||||
# Get database size
|
||||
get_db_size() {
|
||||
local db_name="$1"
|
||||
mysql_query "" "SELECT IFNULL(SUM(data_length + index_length), 0) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get update count
|
||||
get_update_count() {
|
||||
local db_name="$1"
|
||||
local state="${2:-}"
|
||||
|
||||
if [ -n "$state" ]; then
|
||||
mysql_query "$db_name" "SELECT COUNT(*) FROM updates WHERE state='$state'" 2>/dev/null || echo "0"
|
||||
else
|
||||
mysql_query "$db_name" "SELECT COUNT(*) FROM updates" 2>/dev/null || echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get last update timestamp
|
||||
get_last_update() {
|
||||
local db_name="$1"
|
||||
mysql_query "$db_name" "SELECT IFNULL(MAX(timestamp), 'Never') FROM updates" 2>/dev/null || echo "Never"
|
||||
}
|
||||
|
||||
# Get table count
|
||||
get_table_count() {
|
||||
local db_name="$1"
|
||||
mysql_query "" "SELECT COUNT(*) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get character count
|
||||
get_character_count() {
|
||||
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get active players (logged in last 24 hours)
|
||||
get_active_players() {
|
||||
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters WHERE logout_time > UNIX_TIMESTAMP(NOW() - INTERVAL 1 DAY)" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get account count
|
||||
get_account_count() {
|
||||
mysql_query "$DB_AUTH_NAME" "SELECT COUNT(*) FROM account" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get pending updates
|
||||
get_pending_updates() {
|
||||
local db_name="$1"
|
||||
mysql_query "$db_name" "SELECT name FROM updates WHERE state='PENDING' ORDER BY name" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Check database health
|
||||
check_database() {
|
||||
local db_name="$1"
|
||||
local display_name="$2"
|
||||
|
||||
if ! db_exists "$db_name"; then
|
||||
printf " ${RED}${ICON_ERROR} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||
printf " ${RED}Database does not exist${NC}\n"
|
||||
return 1
|
||||
fi
|
||||
|
||||
printf " ${GREEN}${ICON_SUCCESS} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||
|
||||
local update_count module_count last_update db_size table_count
|
||||
update_count=$(get_update_count "$db_name" "RELEASED")
|
||||
module_count=$(get_update_count "$db_name" "MODULE")
|
||||
last_update=$(get_last_update "$db_name")
|
||||
db_size=$(get_db_size "$db_name")
|
||||
table_count=$(get_table_count "$db_name")
|
||||
|
||||
printf " ${ICON_UPDATE} Updates: %s applied" "$update_count"
|
||||
if [ "$module_count" != "0" ] && [ "$SHOW_MODULES" = "1" ]; then
|
||||
printf " (%s module)" "$module_count"
|
||||
fi
|
||||
printf "\n"
|
||||
|
||||
printf " ${ICON_TIME} Last update: %s\n" "$last_update"
|
||||
printf " ${ICON_SIZE} Size: %s (%s tables)\n" "$(format_bytes "$db_size")" "$table_count"
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
local custom_count archived_count
|
||||
custom_count=$(get_update_count "$db_name" "CUSTOM")
|
||||
archived_count=$(get_update_count "$db_name" "ARCHIVED")
|
||||
|
||||
if [ "$custom_count" != "0" ]; then
|
||||
printf " ${ICON_INFO} Custom updates: %s\n" "$custom_count"
|
||||
fi
|
||||
if [ "$archived_count" != "0" ]; then
|
||||
printf " ${ICON_INFO} Archived updates: %s\n" "$archived_count"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show pending updates if requested
|
||||
if [ "$SHOW_PENDING" = "1" ]; then
|
||||
local pending_updates
|
||||
pending_updates=$(get_pending_updates "$db_name")
|
||||
if [ -n "$pending_updates" ]; then
|
||||
printf " ${YELLOW}${ICON_WARNING} Pending updates:${NC}\n"
|
||||
while IFS= read -r update; do
|
||||
printf " - %s\n" "$update"
|
||||
done <<< "$pending_updates"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
# Show module updates summary
|
||||
show_module_updates() {
|
||||
if [ "$SHOW_MODULES" = "0" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
printf "${BOLD}${ICON_MODULE} Module Updates${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
# Get module updates from world database (most modules update world DB)
|
||||
local module_updates
|
||||
module_updates=$(mysql_query "$DB_WORLD_NAME" "SELECT SUBSTRING_INDEX(name, '_', 1) as module, COUNT(*) as count FROM updates WHERE state='MODULE' GROUP BY module ORDER BY module" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$module_updates" ]; then
|
||||
printf " ${ICON_INFO} No module updates detected\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
while IFS=$'\t' read -r module count; do
|
||||
printf " ${GREEN}${ICON_SUCCESS}${NC} %s: %s update(s)\n" "$module" "$count"
|
||||
done <<< "$module_updates"
|
||||
echo
|
||||
}
|
||||
|
||||
# Get backup information
|
||||
get_backup_info() {
|
||||
local backup_dir="$PROJECT_ROOT/storage/backups"
|
||||
|
||||
if [ ! -d "$backup_dir" ]; then
|
||||
printf " ${ICON_INFO} No backups directory found\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Check for latest backup
|
||||
local latest_hourly latest_daily
|
||||
if [ -d "$backup_dir/hourly" ]; then
|
||||
latest_hourly=$(ls -1t "$backup_dir/hourly" 2>/dev/null | head -n1 || echo "")
|
||||
fi
|
||||
if [ -d "$backup_dir/daily" ]; then
|
||||
latest_daily=$(ls -1t "$backup_dir/daily" 2>/dev/null | head -n1 || echo "")
|
||||
fi
|
||||
|
||||
if [ -n "$latest_hourly" ]; then
|
||||
# Calculate time ago
|
||||
local backup_timestamp="${latest_hourly:0:8}_${latest_hourly:9:6}"
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
local hours=$((diff / 3600))
|
||||
local minutes=$(((diff % 3600) / 60))
|
||||
|
||||
if [ "$hours" -gt 0 ]; then
|
||||
printf " ${ICON_TIME} Last hourly backup: %s hours ago\n" "$hours"
|
||||
else
|
||||
printf " ${ICON_TIME} Last hourly backup: %s minutes ago\n" "$minutes"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$latest_daily" ] && [ "$latest_daily" != "$latest_hourly" ]; then
|
||||
local backup_timestamp="${latest_daily:0:8}_${latest_daily:9:6}"
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
local days=$((diff / 86400))
|
||||
|
||||
printf " ${ICON_TIME} Last daily backup: %s days ago\n" "$days"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main health check
|
||||
main() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}${ICON_DB} AZEROTHCORE DATABASE HEALTH CHECK${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Test MySQL connection
|
||||
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||
printf "${RED}${ICON_ERROR} Cannot connect to MySQL server${NC}\n"
|
||||
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||
printf " User: %s\n" "$MYSQL_USER"
|
||||
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "${BOLD}${ICON_DB} Database Status${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Check each database
|
||||
check_database "$DB_AUTH_NAME" "Auth DB"
|
||||
check_database "$DB_WORLD_NAME" "World DB"
|
||||
check_database "$DB_CHARACTERS_NAME" "Characters DB"
|
||||
|
||||
# Optional: Check playerbots database
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
check_database "$DB_PLAYERBOTS_NAME" "Playerbots DB"
|
||||
fi
|
||||
|
||||
# Show character/account statistics
|
||||
printf "${BOLD}${CYAN}📊 Server Statistics${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local account_count character_count active_count
|
||||
account_count=$(get_account_count)
|
||||
character_count=$(get_character_count)
|
||||
active_count=$(get_active_players)
|
||||
|
||||
printf " ${ICON_INFO} Accounts: %s\n" "$account_count"
|
||||
printf " ${ICON_INFO} Characters: %s\n" "$character_count"
|
||||
printf " ${ICON_INFO} Active (24h): %s\n" "$active_count"
|
||||
echo
|
||||
|
||||
# Show module updates
|
||||
show_module_updates
|
||||
|
||||
# Show backup information
|
||||
printf "${BOLD}${ICON_SIZE} Backup Information${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
get_backup_info
|
||||
echo
|
||||
|
||||
# Calculate total database size
|
||||
local total_size=0
|
||||
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||
if db_exists "$db"; then
|
||||
local size
|
||||
size=$(get_db_size "$db")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
done
|
||||
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
local size
|
||||
size=$(get_db_size "$DB_PLAYERBOTS_NAME")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
|
||||
printf "${BOLD}💾 Total Database Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||
echo
|
||||
|
||||
printf "${GREEN}${ICON_SUCCESS} Health check complete!${NC}\n"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -280,9 +280,78 @@ if [ -n "$backup_path" ]; then
|
||||
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
||||
}
|
||||
|
||||
verify_and_update_restored_databases() {
|
||||
echo "🔍 Verifying restored database integrity..."
|
||||
|
||||
# Check if dbimport is available
|
||||
if [ ! -f "/azerothcore/env/dist/bin/dbimport" ]; then
|
||||
echo "⚠️ dbimport not available, skipping verification"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Create dbimport config for verification
|
||||
echo "📝 Creating dbimport configuration for verification..."
|
||||
mkdir -p /azerothcore/env/dist/etc
|
||||
TEMP_DIR="/azerothcore/env/dist/temp"
|
||||
mkdir -p "$TEMP_DIR"
|
||||
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
|
||||
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
||||
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
|
||||
Updates.EnableDatabases = 15
|
||||
Updates.AutoSetup = 1
|
||||
TempDir = "${TEMP_DIR}"
|
||||
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
||||
Updates.AllowedModules = "all"
|
||||
SourceDirectory = "/azerothcore"
|
||||
EOF
|
||||
|
||||
cd /azerothcore/env/dist/bin
|
||||
echo "🔄 Running dbimport to apply any missing updates..."
|
||||
if ./dbimport; then
|
||||
echo "✅ Database verification complete - all updates current"
|
||||
else
|
||||
echo "⚠️ dbimport reported issues - check logs"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify critical tables exist
|
||||
echo "🔍 Checking critical tables..."
|
||||
local critical_tables=("account" "characters" "creature" "quest_template")
|
||||
local missing_tables=0
|
||||
|
||||
for table in "${critical_tables[@]}"; do
|
||||
local db_name="$DB_WORLD_NAME"
|
||||
case "$table" in
|
||||
account) db_name="$DB_AUTH_NAME" ;;
|
||||
characters) db_name="$DB_CHARACTERS_NAME" ;;
|
||||
esac
|
||||
|
||||
if ! mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} \
|
||||
-e "SELECT 1 FROM ${db_name}.${table} LIMIT 1" >/dev/null 2>&1; then
|
||||
echo "⚠️ Critical table missing: ${db_name}.${table}"
|
||||
missing_tables=$((missing_tables + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$missing_tables" -gt 0 ]; then
|
||||
echo "⚠️ ${missing_tables} critical tables missing after restore"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "✅ All critical tables verified"
|
||||
return 0
|
||||
}
|
||||
|
||||
if restore_backup "$backup_path"; then
|
||||
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
||||
echo "🎉 Backup restoration completed successfully!"
|
||||
|
||||
# Verify and apply missing updates
|
||||
verify_and_update_restored_databases
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
||||
@@ -311,7 +380,8 @@ cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
||||
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||
Updates.EnableDatabases = 7
|
||||
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
|
||||
Updates.EnableDatabases = 15
|
||||
Updates.AutoSetup = 1
|
||||
TempDir = "${TEMP_DIR}"
|
||||
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
||||
|
||||
@@ -477,19 +477,83 @@ load_sql_helper(){
|
||||
err "SQL helper not found; expected manage-modules-sql.sh to be available"
|
||||
}
|
||||
|
||||
execute_module_sql(){
|
||||
SQL_EXECUTION_FAILED=0
|
||||
if declare -f execute_module_sql_scripts >/dev/null 2>&1; then
|
||||
echo 'Executing module SQL scripts...'
|
||||
if execute_module_sql_scripts; then
|
||||
echo 'SQL execution complete.'
|
||||
else
|
||||
echo '⚠️ Module SQL scripts reported errors'
|
||||
SQL_EXECUTION_FAILED=1
|
||||
fi
|
||||
else
|
||||
info "SQL helper did not expose execute_module_sql_scripts; skipping module SQL execution"
|
||||
stage_module_sql_files(){
|
||||
# Stage SQL files to AzerothCore's native update directory structure
|
||||
# This replaces manual SQL execution with AzerothCore's built-in updater
|
||||
|
||||
local staging_dir="${MODULE_STAGING_DIR:-$MODULES_ROOT}"
|
||||
local sql_manifest="$STATE_DIR/.sql-manifest.json"
|
||||
|
||||
if [ ! -f "$sql_manifest" ]; then
|
||||
info "No SQL manifest found - no SQL files to stage"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if manifest has any modules with SQL
|
||||
local module_count
|
||||
module_count=$(python3 -c "import json; data=json.load(open('$sql_manifest')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$module_count" = "0" ]; then
|
||||
info "No modules with SQL files to stage"
|
||||
return 0
|
||||
fi
|
||||
|
||||
info "Staging SQL for $module_count module(s)"
|
||||
|
||||
# Read each module from manifest and stage its SQL
|
||||
local modules_json
|
||||
modules_json=$(python3 -c "import json; data=json.load(open('$sql_manifest')); print('\n'.join(m['name'] for m in data['modules']))" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$modules_json" ]; then
|
||||
warn "Failed to parse SQL manifest"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local staged_count=0
|
||||
while IFS= read -r module_name; do
|
||||
if [ -z "$module_name" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local module_path="$staging_dir/$module_name"
|
||||
local acore_modules="/azerothcore/modules/$module_name"
|
||||
|
||||
if [ ! -d "$module_path" ]; then
|
||||
warn "Module path not found: $module_path"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Call stage-module-sql.sh for this module
|
||||
local stage_script="${PROJECT_ROOT}/scripts/bash/stage-module-sql.sh"
|
||||
if [ ! -f "$stage_script" ]; then
|
||||
# Try container location
|
||||
stage_script="/scripts/bash/stage-module-sql.sh"
|
||||
fi
|
||||
|
||||
if [ -f "$stage_script" ]; then
|
||||
if "$stage_script" \
|
||||
--module-name "$module_name" \
|
||||
--module-path "$module_path" \
|
||||
--acore-path "$acore_modules"; then
|
||||
((staged_count++))
|
||||
fi
|
||||
else
|
||||
warn "SQL staging script not found: $stage_script"
|
||||
fi
|
||||
done <<< "$modules_json"
|
||||
|
||||
if [ "$staged_count" -gt 0 ]; then
|
||||
ok "Staged SQL for $staged_count module(s)"
|
||||
info "SQL will be applied by AzerothCore's updater on next server startup"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
execute_module_sql(){
|
||||
# Legacy function - now calls staging instead of direct execution
|
||||
SQL_EXECUTION_FAILED=0
|
||||
stage_module_sql_files || SQL_EXECUTION_FAILED=1
|
||||
}
|
||||
|
||||
track_module_state(){
|
||||
@@ -591,13 +655,11 @@ main(){
|
||||
remove_disabled_modules
|
||||
install_enabled_modules
|
||||
manage_configuration_files
|
||||
info "SQL execution gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
||||
info "SQL staging gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
||||
if [ "${MODULES_SKIP_SQL:-0}" = "1" ]; then
|
||||
info "Skipping module SQL execution (MODULES_SKIP_SQL=1)"
|
||||
info "Skipping module SQL staging (MODULES_SKIP_SQL=1)"
|
||||
else
|
||||
info "Initiating module SQL helper"
|
||||
load_sql_helper
|
||||
info "SQL helper loaded from ${SQL_HELPER_PATH:-unknown}"
|
||||
info "Staging module SQL files for AzerothCore updater"
|
||||
execute_module_sql
|
||||
fi
|
||||
track_module_state
|
||||
|
||||
@@ -369,10 +369,85 @@ case "$TARGET_PROFILE" in
|
||||
modules) PROFILE_ARGS+=(--profile client-data) ;;
|
||||
esac
|
||||
|
||||
# Start the target profile
|
||||
show_staging_step "Realm Activation" "Bringing services online"
|
||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||
# Stage module SQL to core updates directory (after containers start)
|
||||
stage_module_sql_to_core() {
|
||||
show_staging_step "Module SQL Staging" "Preparing module database updates"
|
||||
|
||||
# Start containers first to get access to worldserver container
|
||||
show_staging_step "Realm Activation" "Bringing services online"
|
||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||
|
||||
# Wait for worldserver container to be running
|
||||
echo "⏳ Waiting for worldserver container..."
|
||||
local max_wait=60
|
||||
local waited=0
|
||||
while ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver" && [ $waited -lt $max_wait ]; do
|
||||
sleep 2
|
||||
waited=$((waited + 2))
|
||||
done
|
||||
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver"; then
|
||||
echo "⚠️ Worldserver container not found, skipping module SQL staging"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "📦 Staging module SQL files to core updates directory..."
|
||||
|
||||
# Create core updates directories inside container
|
||||
docker exec ac-worldserver bash -c "
|
||||
mkdir -p /azerothcore/data/sql/updates/db_world \
|
||||
/azerothcore/data/sql/updates/db_characters \
|
||||
/azerothcore/data/sql/updates/db_auth
|
||||
" 2>/dev/null || true
|
||||
|
||||
# Stage SQL from all modules
|
||||
local staged_count=0
|
||||
local timestamp=$(date +"%Y_%m_%d_%H%M%S")
|
||||
|
||||
# Find all modules with SQL files
|
||||
for db_type in db-world db-characters db-auth; do
|
||||
local core_dir=""
|
||||
case "$db_type" in
|
||||
db-world) core_dir="db_world" ;;
|
||||
db-characters) core_dir="db_characters" ;;
|
||||
db-auth) core_dir="db_auth" ;;
|
||||
esac
|
||||
|
||||
# Copy SQL files from each module
|
||||
docker exec ac-worldserver bash -c "
|
||||
counter=0
|
||||
for module_dir in /azerothcore/modules/*/data/sql/$db_type; do
|
||||
if [ -d \"\$module_dir\" ]; then
|
||||
module_name=\$(basename \$(dirname \$(dirname \$module_dir)))
|
||||
for sql_file in \"\$module_dir\"/*.sql; do
|
||||
if [ -f \"\$sql_file\" ]; then
|
||||
base_name=\$(basename \"\$sql_file\" .sql)
|
||||
target_name=\"${timestamp}_\${counter}_MODULE_\${module_name}_\${base_name}.sql\"
|
||||
cp \"\$sql_file\" \"/azerothcore/data/sql/updates/$core_dir/\$target_name\"
|
||||
echo \" ✓ Staged \$module_name/$db_type/\$(basename \$sql_file)\"
|
||||
counter=\$((counter + 1))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
echo \$counter
|
||||
" 2>/dev/null | tee /tmp/stage-sql-output.txt || true
|
||||
|
||||
local count=$(tail -1 /tmp/stage-sql-output.txt 2>/dev/null || echo "0")
|
||||
staged_count=$((staged_count + count))
|
||||
done
|
||||
|
||||
if [ "$staged_count" -gt 0 ]; then
|
||||
echo "✅ Staged $staged_count module SQL files to core updates directory"
|
||||
echo "🔄 Restart worldserver to apply: docker restart ac-worldserver"
|
||||
else
|
||||
echo "ℹ️ No module SQL files found to stage"
|
||||
fi
|
||||
}
|
||||
|
||||
# Stage module SQL (this will also start the containers)
|
||||
stage_module_sql_to_core
|
||||
|
||||
printf '\n%b\n' "${GREEN}⚔️ Realm staging completed successfully! ⚔️${NC}"
|
||||
printf '%b\n' "${GREEN}🏰 Profile: services-$TARGET_PROFILE${NC}"
|
||||
|
||||
315
scripts/bash/test-phase1-integration.sh
Executable file
315
scripts/bash/test-phase1-integration.sh
Executable file
@@ -0,0 +1,315 @@
|
||||
#!/bin/bash
|
||||
# Phase 1 Integration Test Script
|
||||
# Tests the complete Phase 1 implementation using build and deploy workflows
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
ICON_TEST="🧪"
|
||||
|
||||
# Counters
|
||||
TESTS_TOTAL=0
|
||||
TESTS_PASSED=0
|
||||
TESTS_FAILED=0
|
||||
|
||||
info() {
|
||||
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||
}
|
||||
|
||||
ok() {
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||
((TESTS_PASSED++))
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||
}
|
||||
|
||||
err() {
|
||||
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||
((TESTS_FAILED++))
|
||||
}
|
||||
|
||||
test_header() {
|
||||
((TESTS_TOTAL++))
|
||||
echo ""
|
||||
echo -e "${BOLD}${ICON_TEST} Test $TESTS_TOTAL: $*${NC}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
}
|
||||
|
||||
section_header() {
|
||||
echo ""
|
||||
echo ""
|
||||
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${BOLD}${BLUE} $*${NC}"
|
||||
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
section_header "Phase 1 Integration Test Suite"
|
||||
|
||||
info "Project root: $PROJECT_ROOT"
|
||||
info "Test started: $(date)"
|
||||
|
||||
# Test 1: Verify .env exists
|
||||
test_header "Environment Configuration Check"
|
||||
if [ -f .env ]; then
|
||||
ok ".env file exists"
|
||||
|
||||
# Count enabled modules
|
||||
enabled_count=$(grep -c "^MODULE_.*=1" .env || echo "0")
|
||||
info "Enabled modules: $enabled_count"
|
||||
|
||||
# Check for playerbots
|
||||
if grep -q "^MODULE_PLAYERBOTS=1" .env; then
|
||||
info "Playerbots module enabled"
|
||||
fi
|
||||
else
|
||||
err ".env file not found"
|
||||
echo "Please run ./setup.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test 2: Module manifest validation
|
||||
test_header "Module Manifest Validation"
|
||||
if [ -f config/module-manifest.json ]; then
|
||||
ok "Module manifest exists"
|
||||
|
||||
# Validate JSON
|
||||
if python3 -m json.tool config/module-manifest.json >/dev/null 2>&1; then
|
||||
ok "Module manifest is valid JSON"
|
||||
else
|
||||
err "Module manifest has invalid JSON"
|
||||
fi
|
||||
else
|
||||
err "Module manifest not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test 3: Generate module state with SQL discovery
|
||||
test_header "Module State Generation (SQL Discovery)"
|
||||
info "Running: python3 scripts/python/modules.py generate"
|
||||
|
||||
if python3 scripts/python/modules.py \
|
||||
--env-path .env \
|
||||
--manifest config/module-manifest.json \
|
||||
generate --output-dir local-storage/modules > /tmp/phase1-modules-generate.log 2>&1; then
|
||||
ok "Module state generation successful"
|
||||
else
|
||||
# Check if it's just warnings
|
||||
if grep -q "warnings detected" /tmp/phase1-modules-generate.log 2>/dev/null; then
|
||||
ok "Module state generation completed with warnings"
|
||||
else
|
||||
err "Module state generation failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test 4: Verify SQL manifest created
|
||||
test_header "SQL Manifest Verification"
|
||||
if [ -f local-storage/modules/.sql-manifest.json ]; then
|
||||
ok "SQL manifest created: local-storage/modules/.sql-manifest.json"
|
||||
|
||||
# Check manifest structure
|
||||
module_count=$(python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
|
||||
info "Modules with SQL: $module_count"
|
||||
|
||||
if [ "$module_count" -gt 0 ]; then
|
||||
ok "SQL manifest contains $module_count module(s)"
|
||||
|
||||
# Show first module
|
||||
info "Sample module SQL info:"
|
||||
python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); m=data['modules'][0] if data['modules'] else {}; print(f\" Name: {m.get('name', 'N/A')}\n SQL files: {len(m.get('sql_files', {}))}\") " 2>/dev/null || true
|
||||
else
|
||||
warn "No modules with SQL files (expected if modules not yet staged)"
|
||||
fi
|
||||
else
|
||||
err "SQL manifest not created"
|
||||
fi
|
||||
|
||||
# Test 5: Verify modules.env created
|
||||
test_header "Module Environment File Check"
|
||||
if [ -f local-storage/modules/modules.env ]; then
|
||||
ok "modules.env created"
|
||||
|
||||
# Check for key exports
|
||||
if grep -q "MODULES_ENABLED=" local-storage/modules/modules.env; then
|
||||
ok "MODULES_ENABLED variable present"
|
||||
fi
|
||||
|
||||
if grep -q "MODULES_REQUIRES_CUSTOM_BUILD=" local-storage/modules/modules.env; then
|
||||
ok "Build requirement flags present"
|
||||
|
||||
# Check if build required
|
||||
source local-storage/modules/modules.env
|
||||
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||
info "Custom build required (C++ modules enabled)"
|
||||
else
|
||||
info "Standard build sufficient (no C++ modules)"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
err "modules.env not created"
|
||||
fi
|
||||
|
||||
# Test 6: Check build requirement
|
||||
test_header "Build Requirement Check"
|
||||
if [ -f local-storage/modules/modules.env ]; then
|
||||
source local-storage/modules/modules.env
|
||||
|
||||
info "MODULES_REQUIRES_CUSTOM_BUILD=${MODULES_REQUIRES_CUSTOM_BUILD:-0}"
|
||||
info "MODULES_REQUIRES_PLAYERBOT_SOURCE=${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}"
|
||||
|
||||
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||
ok "Build system correctly detected C++ modules"
|
||||
BUILD_REQUIRED=1
|
||||
else
|
||||
ok "Build system correctly detected no C++ modules"
|
||||
BUILD_REQUIRED=0
|
||||
fi
|
||||
else
|
||||
warn "Cannot determine build requirements"
|
||||
BUILD_REQUIRED=0
|
||||
fi
|
||||
|
||||
# Test 7: Verify new scripts exist and are executable
|
||||
test_header "New Script Verification"
|
||||
scripts=(
|
||||
"scripts/bash/stage-module-sql.sh"
|
||||
"scripts/bash/verify-sql-updates.sh"
|
||||
"scripts/bash/backup-status.sh"
|
||||
"scripts/bash/db-health-check.sh"
|
||||
)
|
||||
|
||||
for script in "${scripts[@]}"; do
|
||||
if [ -f "$script" ]; then
|
||||
if [ -x "$script" ]; then
|
||||
ok "$(basename "$script") - exists and executable"
|
||||
else
|
||||
warn "$(basename "$script") - exists but not executable"
|
||||
chmod +x "$script"
|
||||
ok "Fixed permissions for $(basename "$script")"
|
||||
fi
|
||||
else
|
||||
err "$(basename "$script") - not found"
|
||||
fi
|
||||
done
|
||||
|
||||
# Test 8: Test backup-status.sh (without running containers)
|
||||
test_header "Backup Status Script Test"
|
||||
if ./scripts/bash/backup-status.sh 2>&1 | head -10 | grep -q "BACKUP STATUS"; then
|
||||
ok "backup-status.sh executes successfully"
|
||||
else
|
||||
err "backup-status.sh failed to execute"
|
||||
fi
|
||||
|
||||
# Test 9: Test db-health-check.sh help
|
||||
test_header "Database Health Check Script Test"
|
||||
if ./scripts/bash/db-health-check.sh --help | grep -q "Check the health status"; then
|
||||
ok "db-health-check.sh help working"
|
||||
else
|
||||
err "db-health-check.sh help failed"
|
||||
fi
|
||||
|
||||
# Test 10: Check modified scripts for new functionality
|
||||
test_header "Modified Script Verification"
|
||||
|
||||
# Check manage-modules.sh has staging function
|
||||
if grep -q "stage_module_sql_files()" scripts/bash/manage-modules.sh; then
|
||||
ok "manage-modules.sh contains SQL staging function"
|
||||
else
|
||||
err "manage-modules.sh missing SQL staging function"
|
||||
fi
|
||||
|
||||
# Check db-import-conditional.sh has playerbots support
|
||||
if grep -q "PlayerbotsDatabaseInfo" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has playerbots database support"
|
||||
else
|
||||
err "db-import-conditional.sh missing playerbots support"
|
||||
fi
|
||||
|
||||
if grep -q "Updates.EnableDatabases = 15" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has correct EnableDatabases value (15)"
|
||||
else
|
||||
warn "db-import-conditional.sh may have incorrect EnableDatabases value"
|
||||
fi
|
||||
|
||||
# Check for post-restore verification
|
||||
if grep -q "verify_and_update_restored_databases" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has post-restore verification"
|
||||
else
|
||||
err "db-import-conditional.sh missing post-restore verification"
|
||||
fi
|
||||
|
||||
# Test 11: Docker Compose configuration check
|
||||
test_header "Docker Compose Configuration Check"
|
||||
if [ -f docker-compose.yml ]; then
|
||||
ok "docker-compose.yml exists"
|
||||
|
||||
# Check for required services
|
||||
if grep -q "ac-mysql:" docker-compose.yml; then
|
||||
ok "MySQL service configured"
|
||||
fi
|
||||
|
||||
if grep -q "ac-worldserver:" docker-compose.yml; then
|
||||
ok "Worldserver service configured"
|
||||
fi
|
||||
else
|
||||
err "docker-compose.yml not found"
|
||||
fi
|
||||
|
||||
# Test Summary
|
||||
section_header "Test Summary"
|
||||
|
||||
echo ""
|
||||
echo -e "${BOLD}Tests Executed: $TESTS_TOTAL${NC}"
|
||||
echo -e "${GREEN}${BOLD}Passed: $TESTS_PASSED${NC}"
|
||||
if [ $TESTS_FAILED -gt 0 ]; then
|
||||
echo -e "${RED}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||
else
|
||||
echo -e "${GREEN}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Calculate success rate
|
||||
if [ $TESTS_TOTAL -gt 0 ]; then
|
||||
success_rate=$((TESTS_PASSED * 100 / TESTS_TOTAL))
|
||||
echo -e "${BOLD}Success Rate: ${success_rate}%${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
if [ $TESTS_FAILED -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}${ICON_SUCCESS} ALL TESTS PASSED${NC}"
|
||||
echo ""
|
||||
echo "Phase 1 implementation is working correctly!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Run './build.sh' if C++ modules are enabled"
|
||||
echo " 2. Run './deploy.sh' to start containers"
|
||||
echo " 3. Verify SQL staging with running containers"
|
||||
echo " 4. Check database health with db-health-check.sh"
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}${ICON_ERROR} SOME TESTS FAILED${NC}"
|
||||
echo ""
|
||||
echo "Please review the failures above before proceeding."
|
||||
exit 1
|
||||
fi
|
||||
348
scripts/bash/verify-sql-updates.sh
Executable file
348
scripts/bash/verify-sql-updates.sh
Executable file
@@ -0,0 +1,348 @@
|
||||
#!/bin/bash
|
||||
# Verify SQL Updates
|
||||
# Checks that SQL updates have been applied via the updates table
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
|
||||
# Default values
|
||||
MODULE_NAME=""
|
||||
DATABASE_NAME=""
|
||||
SHOW_ALL=0
|
||||
CHECK_HASH=0
|
||||
CONTAINER_NAME="ac-mysql"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./verify-sql-updates.sh [options]
|
||||
|
||||
Verify that SQL updates have been applied via AzerothCore's updates table.
|
||||
|
||||
Options:
|
||||
--module NAME Check specific module
|
||||
--database NAME Check specific database (auth/world/characters)
|
||||
--all Show all module updates
|
||||
--check-hash Verify file hashes match database
|
||||
--container NAME MySQL container name (default: ac-mysql)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./verify-sql-updates.sh --all
|
||||
./verify-sql-updates.sh --module mod-aoe-loot
|
||||
./verify-sql-updates.sh --database acore_world --all
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--module) MODULE_NAME="$2"; shift 2;;
|
||||
--database) DATABASE_NAME="$2"; shift 2;;
|
||||
--all) SHOW_ALL=1; shift;;
|
||||
--check-hash) CHECK_HASH=1; shift;;
|
||||
--container) CONTAINER_NAME="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||
MYSQL_USER="${MYSQL_USER:-root}"
|
||||
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
|
||||
# Logging functions
|
||||
info() {
|
||||
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||
}
|
||||
|
||||
ok() {
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||
}
|
||||
|
||||
err() {
|
||||
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||
}
|
||||
|
||||
# MySQL query helper
|
||||
mysql_query() {
|
||||
local database="${1:-}"
|
||||
local query="$2"
|
||||
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
err "MYSQL_ROOT_PASSWORD not set"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
if [ -n "$database" ]; then
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
else
|
||||
if [ -n "$database" ]; then
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if database exists
|
||||
db_exists() {
|
||||
local db_name="$1"
|
||||
local count
|
||||
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||
[ "$count" = "1" ]
|
||||
}
|
||||
|
||||
# Verify module SQL in database
|
||||
verify_module_sql() {
|
||||
local module_name="$1"
|
||||
local database_name="$2"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info "Checking module updates in $database_name"
|
||||
|
||||
# Query updates table for module
|
||||
local query="SELECT name, hash, state, timestamp, speed FROM updates WHERE name LIKE '%${module_name}%' AND state='MODULE' ORDER BY timestamp DESC"
|
||||
local results
|
||||
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$results" ]; then
|
||||
warn "No updates found for module: $module_name in $database_name"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Display results
|
||||
echo
|
||||
printf "${BOLD}${CYAN}Module Updates for %s in %s:${NC}\n" "$module_name" "$database_name"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
while IFS=$'\t' read -r name hash state timestamp speed; do
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||
printf " Hash: %s\n" "${hash:0:12}..."
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
printf " Speed: %sms\n" "$speed"
|
||||
echo
|
||||
done <<< "$results"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# List all module updates
|
||||
list_module_updates() {
|
||||
local database_name="$1"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info "Listing all module updates in $database_name"
|
||||
|
||||
# Query all module updates
|
||||
local query="SELECT name, state, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC"
|
||||
local results
|
||||
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$results" ]; then
|
||||
warn "No module updates found in $database_name"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Display results
|
||||
echo
|
||||
printf "${BOLD}${CYAN}All Module Updates in %s:${NC}\n" "$database_name"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local count=0
|
||||
while IFS=$'\t' read -r name state timestamp; do
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
((count++))
|
||||
done <<< "$results"
|
||||
|
||||
echo
|
||||
ok "Total module updates: $count"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check update applied
|
||||
check_update_applied() {
|
||||
local filename="$1"
|
||||
local database_name="$2"
|
||||
local expected_hash="${3:-}"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Query for specific file
|
||||
local query="SELECT hash, state, timestamp FROM updates WHERE name='$filename' LIMIT 1"
|
||||
local result
|
||||
result=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$result" ]; then
|
||||
warn "Update not found: $filename"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Parse result
|
||||
IFS=$'\t' read -r hash state timestamp <<< "$result"
|
||||
|
||||
ok "Update applied: $filename"
|
||||
printf " Hash: %s\n" "$hash"
|
||||
printf " State: %s\n" "$state"
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
|
||||
# Check hash if provided
|
||||
if [ -n "$expected_hash" ] && [ "$expected_hash" != "$hash" ]; then
|
||||
err "Hash mismatch!"
|
||||
printf " Expected: %s\n" "$expected_hash"
|
||||
printf " Actual: %s\n" "$hash"
|
||||
return 2
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate verification report
|
||||
generate_verification_report() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}🔍 Module SQL Verification Report${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
local total_updates=0
|
||||
local databases=("$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME")
|
||||
|
||||
# Add playerbots if it exists
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
databases+=("$DB_PLAYERBOTS_NAME")
|
||||
fi
|
||||
|
||||
for db in "${databases[@]}"; do
|
||||
if ! db_exists "$db"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get count of module updates
|
||||
local count
|
||||
count=$(mysql_query "$db" "SELECT COUNT(*) FROM updates WHERE state='MODULE'" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$count" != "0" ]; then
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} ${BOLD}%s:${NC} %s module update(s)\n" "$db" "$count"
|
||||
total_updates=$((total_updates + count))
|
||||
|
||||
if [ "$SHOW_ALL" = "1" ]; then
|
||||
# Show recent updates
|
||||
local query="SELECT name, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC LIMIT 5"
|
||||
local results
|
||||
results=$(mysql_query "$db" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$results" ]; then
|
||||
while IFS=$'\t' read -r name timestamp; do
|
||||
printf " - %s (%s)\n" "$name" "$timestamp"
|
||||
done <<< "$results"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING}${NC} ${BOLD}%s:${NC} No module updates\n" "$db"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf "${BOLD}Total: %s module update(s) applied${NC}\n" "$total_updates"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
echo
|
||||
info "SQL Update Verification"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Test MySQL connection
|
||||
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||
err "Cannot connect to MySQL server"
|
||||
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||
printf " User: %s\n" "$MYSQL_USER"
|
||||
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute based on options
|
||||
if [ -n "$MODULE_NAME" ]; then
|
||||
# Check specific module
|
||||
if [ -n "$DATABASE_NAME" ]; then
|
||||
verify_module_sql "$MODULE_NAME" "$DATABASE_NAME"
|
||||
else
|
||||
# Check all databases for this module
|
||||
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||
if db_exists "$db"; then
|
||||
verify_module_sql "$MODULE_NAME" "$db"
|
||||
fi
|
||||
done
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
verify_module_sql "$MODULE_NAME" "$DB_PLAYERBOTS_NAME"
|
||||
fi
|
||||
fi
|
||||
elif [ -n "$DATABASE_NAME" ]; then
|
||||
# List all updates in specific database
|
||||
list_module_updates "$DATABASE_NAME"
|
||||
else
|
||||
# Generate full report
|
||||
generate_verification_report
|
||||
fi
|
||||
|
||||
echo
|
||||
ok "Verification complete"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user