mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 09:07:20 +00:00
feat: comprehensive module system and database management improvements
This commit introduces major enhancements to the module installation system, database management, and configuration handling for AzerothCore deployments. ## Module System Improvements ### Module SQL Staging & Installation - Refactor module SQL staging to properly handle AzerothCore's sql/ directory structure - Fix SQL staging path to use correct AzerothCore format (sql/custom/db_*/*) - Implement conditional module database importing based on enabled modules - Add support for both cpp-modules and lua-scripts module types - Handle rsync exit code 23 (permission warnings) gracefully during deployment ### Module Manifest & Automation - Add automated module manifest generation via GitHub Actions workflow - Implement Python-based module manifest updater with comprehensive validation - Add module dependency tracking and SQL file discovery - Support for blocked modules and module metadata management ## Database Management Enhancements ### Database Import System - Add db-guard container for continuous database health monitoring and verification - Implement conditional database import that skips when databases are current - Add backup restoration and SQL staging coordination - Support for Playerbots database (4th database) in all import operations - Add comprehensive database health checking and status reporting ### Database Configuration - Implement 10 new dbimport.conf settings from environment variables: - Database.Reconnect.Seconds/Attempts for connection reliability - Updates.AllowedModules for module auto-update control - Updates.Redundancy for data integrity checks - Worker/Synch thread settings for all three core databases - Auto-apply dbimport.conf settings via auto-post-install.sh - Add environment variable injection for db-import and db-guard containers ### Backup & Recovery - Fix backup scheduler to prevent immediate execution on container startup - Add backup status monitoring script with detailed reporting - Implement backup import/export utilities - Add database verification scripts for SQL update tracking ## User Import Directory - Add new import/ directory for user-provided database files and configurations - Support for custom SQL files, configuration overrides, and example templates - Automatic import of user-provided databases and configs during initialization - Documentation and examples for custom database imports ## Configuration & Environment - Eliminate CLIENT_DATA_VERSION warning by adding default value syntax - Improve CLIENT_DATA_VERSION documentation in .env.template - Add comprehensive database import settings to .env and .env.template - Update setup.sh to handle new configuration variables with proper defaults ## Monitoring & Debugging - Add status dashboard with Go-based terminal UI (statusdash.go) - Implement JSON status output (statusjson.sh) for programmatic access - Add comprehensive database health check script - Add repair-storage-permissions.sh utility for permission issues ## Testing & Documentation - Add Phase 1 integration test suite for module installation verification - Add comprehensive documentation for: - Database management (DATABASE_MANAGEMENT.md) - Module SQL analysis (AZEROTHCORE_MODULE_SQL_ANALYSIS.md) - Implementation mapping (IMPLEMENTATION_MAP.md) - SQL staging comparison and path coverage - Module assets and DBC file requirements - Update SCRIPTS.md, ADVANCED.md, and troubleshooting documentation - Update references from database-import/ to import/ directory ## Breaking Changes - Renamed database-import/ directory to import/ for clarity - Module SQL files now staged to AzerothCore-compatible paths - db-guard container now required for proper database lifecycle management ## Bug Fixes - Fix module SQL staging directory structure for AzerothCore compatibility - Handle rsync exit code 23 gracefully during deployments - Prevent backup from running immediately on container startup - Correct SQL staging paths for proper module installation
This commit is contained in:
@@ -100,7 +100,14 @@ else
|
||||
|
||||
# Skip core config files (already handled)
|
||||
case "$filename" in
|
||||
authserver.conf|worldserver.conf|dbimport.conf)
|
||||
authserver.conf|worldserver.conf)
|
||||
continue
|
||||
;;
|
||||
dbimport.conf)
|
||||
if [ ! -f "$conffile" ] || grep -q "Updates.ExceptionShutdownDelay" "$conffile"; then
|
||||
echo " 📝 Creating/refreshing $filename from $(basename "$file")"
|
||||
cp "$file" "$conffile"
|
||||
fi
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
@@ -140,6 +147,28 @@ else
|
||||
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/worldserver.conf || true
|
||||
if [ -f "/azerothcore/config/dbimport.conf" ]; then
|
||||
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^PlayerbotsDatabaseInfo *=.*|PlayerbotsDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_PLAYERBOTS_NAME}\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^MySQLExecutable *=.*|MySQLExecutable = \"/usr/bin/mysql\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^TempDir *=.*|TempDir = \"/azerothcore/env/dist/temp\"|" /azerothcore/config/dbimport.conf || true
|
||||
# Database reconnection settings
|
||||
sed -i "s|^Database\.Reconnect\.Seconds *=.*|Database.Reconnect.Seconds = ${DB_RECONNECT_SECONDS:-5}|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^Database\.Reconnect\.Attempts *=.*|Database.Reconnect.Attempts = ${DB_RECONNECT_ATTEMPTS:-5}|" /azerothcore/config/dbimport.conf || true
|
||||
# Update settings
|
||||
sed -i "s|^Updates\.AllowedModules *=.*|Updates.AllowedModules = \"${DB_UPDATES_ALLOWED_MODULES:-all}\"|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^Updates\.Redundancy *=.*|Updates.Redundancy = ${DB_UPDATES_REDUNDANCY:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
# Worker thread settings
|
||||
sed -i "s|^LoginDatabase\.WorkerThreads *=.*|LoginDatabase.WorkerThreads = ${DB_LOGIN_WORKER_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^WorldDatabase\.WorkerThreads *=.*|WorldDatabase.WorkerThreads = ${DB_WORLD_WORKER_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^CharacterDatabase\.WorkerThreads *=.*|CharacterDatabase.WorkerThreads = ${DB_CHARACTER_WORKER_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
# Synch thread settings
|
||||
sed -i "s|^LoginDatabase\.SynchThreads *=.*|LoginDatabase.SynchThreads = ${DB_LOGIN_SYNCH_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^WorldDatabase\.SynchThreads *=.*|WorldDatabase.SynchThreads = ${DB_WORLD_SYNCH_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
sed -i "s|^CharacterDatabase\.SynchThreads *=.*|CharacterDatabase.SynchThreads = ${DB_CHARACTER_SYNCH_THREADS:-1}|" /azerothcore/config/dbimport.conf || true
|
||||
fi
|
||||
update_playerbots_conf /azerothcore/config/modules/playerbots.conf
|
||||
update_playerbots_conf /azerothcore/config/modules/playerbots.conf.dist
|
||||
|
||||
|
||||
@@ -200,8 +200,9 @@ cleanup_old() {
|
||||
|
||||
log "Backup scheduler starting: interval(${BACKUP_INTERVAL_MINUTES}m), daily($RETENTION_DAYS d at ${DAILY_TIME}:00)"
|
||||
|
||||
# Initialize last backup time
|
||||
last_backup=0
|
||||
# Initialize last backup time to current time to prevent immediate backup on startup
|
||||
last_backup=$(date +%s)
|
||||
log "ℹ️ First backup will run in ${BACKUP_INTERVAL_MINUTES} minutes"
|
||||
|
||||
while true; do
|
||||
current_time=$(date +%s)
|
||||
|
||||
421
scripts/bash/backup-status.sh
Executable file
421
scripts/bash/backup-status.sh
Executable file
@@ -0,0 +1,421 @@
|
||||
#!/bin/bash
|
||||
# Backup Status Dashboard
|
||||
# Displays comprehensive backup system status and statistics
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_BACKUP="📦"
|
||||
ICON_TIME="🕐"
|
||||
ICON_SIZE="💾"
|
||||
ICON_CHART="📊"
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_SCHEDULE="📅"
|
||||
|
||||
# Default values
|
||||
SHOW_DETAILS=0
|
||||
SHOW_TRENDS=0
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./backup-status.sh [options]
|
||||
|
||||
Display backup system status and statistics.
|
||||
|
||||
Options:
|
||||
-d, --details Show detailed backup listing
|
||||
-t, --trends Show size trends over time
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./backup-status.sh
|
||||
./backup-status.sh --details
|
||||
./backup-status.sh --details --trends
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-d|--details) SHOW_DETAILS=1; shift;;
|
||||
-t|--trends) SHOW_TRENDS=1; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
BACKUP_PATH="${BACKUP_PATH:-$PROJECT_ROOT/storage/backups}"
|
||||
BACKUP_INTERVAL_MINUTES="${BACKUP_INTERVAL_MINUTES:-60}"
|
||||
BACKUP_RETENTION_HOURS="${BACKUP_RETENTION_HOURS:-6}"
|
||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-3}"
|
||||
BACKUP_DAILY_TIME="${BACKUP_DAILY_TIME:-09}"
|
||||
|
||||
# Format bytes to human readable
|
||||
format_bytes() {
|
||||
local bytes=$1
|
||||
if [ "$bytes" -lt 1024 ]; then
|
||||
echo "${bytes}B"
|
||||
elif [ "$bytes" -lt 1048576 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||
elif [ "$bytes" -lt 1073741824 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||
else
|
||||
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get directory size
|
||||
get_dir_size() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
du -sb "$dir" 2>/dev/null | cut -f1
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Count backups in directory
|
||||
count_backups() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
find "$dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get latest backup timestamp
|
||||
get_latest_backup() {
|
||||
local dir="$1"
|
||||
if [ -d "$dir" ]; then
|
||||
ls -1t "$dir" 2>/dev/null | head -n1 || echo ""
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse timestamp from backup directory name
|
||||
parse_timestamp() {
|
||||
local backup_name="$1"
|
||||
# Format: YYYYMMDD_HHMMSS or ExportBackup_YYYYMMDD_HHMMSS
|
||||
local timestamp
|
||||
if [[ "$backup_name" =~ ([0-9]{8})_([0-9]{6}) ]]; then
|
||||
timestamp="${BASH_REMATCH[1]}_${BASH_REMATCH[2]}"
|
||||
echo "$timestamp"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate time ago from timestamp
|
||||
time_ago() {
|
||||
local timestamp="$1"
|
||||
if [ -z "$timestamp" ]; then
|
||||
echo "Unknown"
|
||||
return
|
||||
fi
|
||||
|
||||
# Parse timestamp: YYYYMMDD_HHMMSS
|
||||
local year="${timestamp:0:4}"
|
||||
local month="${timestamp:4:2}"
|
||||
local day="${timestamp:6:2}"
|
||||
local hour="${timestamp:9:2}"
|
||||
local minute="${timestamp:11:2}"
|
||||
local second="${timestamp:13:2}"
|
||||
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "$year-$month-$day $hour:$minute:$second" +%s 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$backup_epoch" = "0" ]; then
|
||||
echo "Unknown"
|
||||
return
|
||||
fi
|
||||
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
|
||||
if [ "$diff" -lt 60 ]; then
|
||||
echo "${diff} seconds ago"
|
||||
elif [ "$diff" -lt 3600 ]; then
|
||||
local minutes=$((diff / 60))
|
||||
echo "${minutes} minute(s) ago"
|
||||
elif [ "$diff" -lt 86400 ]; then
|
||||
local hours=$((diff / 3600))
|
||||
echo "${hours} hour(s) ago"
|
||||
else
|
||||
local days=$((diff / 86400))
|
||||
echo "${days} day(s) ago"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate next scheduled backup
|
||||
next_backup_time() {
|
||||
local interval_minutes="$1"
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
|
||||
local next_epoch=$((now_epoch + (interval_minutes * 60)))
|
||||
local in_minutes=$(((next_epoch - now_epoch) / 60))
|
||||
|
||||
if [ "$in_minutes" -lt 60 ]; then
|
||||
echo "in ${in_minutes} minute(s)"
|
||||
else
|
||||
local in_hours=$((in_minutes / 60))
|
||||
local remaining_minutes=$((in_minutes % 60))
|
||||
echo "in ${in_hours} hour(s) ${remaining_minutes} minute(s)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate next daily backup
|
||||
next_daily_backup() {
|
||||
local daily_hour="$1"
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
|
||||
local today_backup_epoch
|
||||
today_backup_epoch=$(date -d "today ${daily_hour}:00:00" +%s)
|
||||
|
||||
local next_epoch
|
||||
if [ "$now_epoch" -lt "$today_backup_epoch" ]; then
|
||||
next_epoch=$today_backup_epoch
|
||||
else
|
||||
next_epoch=$(date -d "tomorrow ${daily_hour}:00:00" +%s)
|
||||
fi
|
||||
|
||||
local diff=$((next_epoch - now_epoch))
|
||||
local hours=$((diff / 3600))
|
||||
local minutes=$(((diff % 3600) / 60))
|
||||
|
||||
echo "in ${hours} hour(s) ${minutes} minute(s)"
|
||||
}
|
||||
|
||||
# Show backup tier status
|
||||
show_backup_tier() {
|
||||
local tier_name="$1"
|
||||
local tier_dir="$2"
|
||||
local retention="$3"
|
||||
|
||||
if [ ! -d "$tier_dir" ]; then
|
||||
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||
return
|
||||
fi
|
||||
|
||||
local count size latest
|
||||
count=$(count_backups "$tier_dir")
|
||||
size=$(get_dir_size "$tier_dir")
|
||||
latest=$(get_latest_backup "$tier_dir")
|
||||
|
||||
if [ "$count" = "0" ]; then
|
||||
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||
return
|
||||
fi
|
||||
|
||||
local latest_timestamp
|
||||
latest_timestamp=$(parse_timestamp "$latest")
|
||||
local ago
|
||||
ago=$(time_ago "$latest_timestamp")
|
||||
|
||||
printf " ${GREEN}${ICON_SUCCESS} %s:${NC} %s backup(s), %s total\n" "$tier_name" "$count" "$(format_bytes "$size")"
|
||||
printf " ${ICON_TIME} Latest: %s (%s)\n" "$latest" "$ago"
|
||||
printf " ${ICON_SCHEDULE} Retention: %s\n" "$retention"
|
||||
|
||||
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||
printf " ${ICON_BACKUP} Available backups:\n"
|
||||
local backup_list
|
||||
backup_list=$(ls -1t "$tier_dir" 2>/dev/null || true)
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local backup_size
|
||||
backup_size=$(get_dir_size "$tier_dir/$backup")
|
||||
local backup_timestamp
|
||||
backup_timestamp=$(parse_timestamp "$backup")
|
||||
local backup_ago
|
||||
backup_ago=$(time_ago "$backup_timestamp")
|
||||
printf " - %s: %s (%s)\n" "$backup" "$(format_bytes "$backup_size")" "$backup_ago"
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
fi
|
||||
}
|
||||
|
||||
# Show size trends
|
||||
show_trends() {
|
||||
printf "${BOLD}${ICON_CHART} Backup Size Trends${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local daily_dir="$BACKUP_PATH/daily"
|
||||
if [ ! -d "$daily_dir" ]; then
|
||||
printf " ${ICON_WARNING} No daily backups found for trend analysis\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Get last 7 daily backups
|
||||
local backup_list
|
||||
backup_list=$(ls -1t "$daily_dir" 2>/dev/null | head -7 | tac)
|
||||
|
||||
if [ -z "$backup_list" ]; then
|
||||
printf " ${ICON_WARNING} Not enough backups for trend analysis\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Find max size for scaling
|
||||
local max_size=0
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$daily_dir/$backup")
|
||||
if [ "$size" -gt "$max_size" ]; then
|
||||
max_size=$size
|
||||
fi
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
|
||||
# Display trend chart
|
||||
while IFS= read -r backup; do
|
||||
if [ -n "$backup" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$daily_dir/$backup")
|
||||
local timestamp
|
||||
timestamp=$(parse_timestamp "$backup")
|
||||
local date_str="${timestamp:0:4}-${timestamp:4:2}-${timestamp:6:2}"
|
||||
|
||||
# Calculate bar length (max 30 chars)
|
||||
local bar_length=0
|
||||
if [ "$max_size" -gt 0 ]; then
|
||||
bar_length=$((size * 30 / max_size))
|
||||
fi
|
||||
|
||||
# Create bar
|
||||
local bar=""
|
||||
for ((i=0; i<bar_length; i++)); do
|
||||
bar+="█"
|
||||
done
|
||||
for ((i=bar_length; i<30; i++)); do
|
||||
bar+="░"
|
||||
done
|
||||
|
||||
printf " %s: %s %s\n" "$date_str" "$(format_bytes "$size" | awk '{printf "%-8s", $0}')" "$bar"
|
||||
fi
|
||||
done <<< "$backup_list"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main status display
|
||||
main() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}${ICON_BACKUP} AZEROTHCORE BACKUP STATUS${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Check if backup directory exists
|
||||
if [ ! -d "$BACKUP_PATH" ]; then
|
||||
printf "${RED}${ICON_WARNING} Backup directory not found: %s${NC}\n\n" "$BACKUP_PATH"
|
||||
printf "Backup system may not be initialized yet.\n\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show current backup tiers
|
||||
printf "${BOLD}${ICON_BACKUP} Backup Tiers${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
show_backup_tier "Hourly Backups" "$BACKUP_PATH/hourly" "${BACKUP_RETENTION_HOURS} hours"
|
||||
show_backup_tier "Daily Backups" "$BACKUP_PATH/daily" "${BACKUP_RETENTION_DAYS} days"
|
||||
|
||||
# Check for manual backups
|
||||
local manual_count=0
|
||||
local manual_size=0
|
||||
if [ -d "$PROJECT_ROOT/manual-backups" ]; then
|
||||
manual_count=$(count_backups "$PROJECT_ROOT/manual-backups")
|
||||
manual_size=$(get_dir_size "$PROJECT_ROOT/manual-backups")
|
||||
fi
|
||||
|
||||
# Also check for export backups in main backup dir
|
||||
local export_count=0
|
||||
if [ -d "$BACKUP_PATH" ]; then
|
||||
export_count=$(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null | wc -l)
|
||||
if [ "$export_count" -gt 0 ]; then
|
||||
local export_size=0
|
||||
while IFS= read -r export_dir; do
|
||||
if [ -n "$export_dir" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$export_dir")
|
||||
export_size=$((export_size + size))
|
||||
fi
|
||||
done < <(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null)
|
||||
manual_size=$((manual_size + export_size))
|
||||
manual_count=$((manual_count + export_count))
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$manual_count" -gt 0 ]; then
|
||||
printf " ${GREEN}${ICON_SUCCESS} Manual/Export Backups:${NC} %s backup(s), %s total\n" "$manual_count" "$(format_bytes "$manual_size")"
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
# Show next scheduled backups
|
||||
printf "${BOLD}${ICON_SCHEDULE} Backup Schedule${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf " ${ICON_TIME} Hourly interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||
printf " ${ICON_TIME} Next hourly backup: %s\n" "$(next_backup_time "$BACKUP_INTERVAL_MINUTES")"
|
||||
printf " ${ICON_TIME} Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||
printf " ${ICON_TIME} Next daily backup: %s\n" "$(next_daily_backup "$BACKUP_DAILY_TIME")"
|
||||
echo
|
||||
|
||||
# Calculate total storage
|
||||
local total_size=0
|
||||
for tier_dir in "$BACKUP_PATH/hourly" "$BACKUP_PATH/daily"; do
|
||||
if [ -d "$tier_dir" ]; then
|
||||
local size
|
||||
size=$(get_dir_size "$tier_dir")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
done
|
||||
total_size=$((total_size + manual_size))
|
||||
|
||||
printf "${BOLD}${ICON_SIZE} Total Backup Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||
echo
|
||||
|
||||
# Show trends if requested
|
||||
if [ "$SHOW_TRENDS" = "1" ]; then
|
||||
show_trends
|
||||
fi
|
||||
|
||||
# Show backup configuration
|
||||
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||
printf "${BOLD}⚙️ Backup Configuration${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf " Backup directory: %s\n" "$BACKUP_PATH"
|
||||
printf " Hourly retention: %s hours\n" "$BACKUP_RETENTION_HOURS"
|
||||
printf " Daily retention: %s days\n" "$BACKUP_RETENTION_DAYS"
|
||||
printf " Interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||
printf " Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||
echo
|
||||
fi
|
||||
|
||||
printf "${GREEN}${ICON_SUCCESS} Backup status check complete!${NC}\n"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
178
scripts/bash/db-guard.sh
Normal file
178
scripts/bash/db-guard.sh
Normal file
@@ -0,0 +1,178 @@
|
||||
#!/bin/bash
|
||||
# Continuously ensure the MySQL runtime tmpfs contains the restored data.
|
||||
# If the runtime tables are missing (for example after a host reboot),
|
||||
# automatically rerun db-import-conditional to hydrate from backups.
|
||||
set -euo pipefail
|
||||
|
||||
log(){ echo "🛡️ [db-guard] $*"; }
|
||||
warn(){ echo "⚠️ [db-guard] $*" >&2; }
|
||||
err(){ echo "❌ [db-guard] $*" >&2; }
|
||||
|
||||
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
|
||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||
MYSQL_USER="${MYSQL_USER:-root}"
|
||||
MYSQL_PASS="${MYSQL_ROOT_PASSWORD:-root}"
|
||||
IMPORT_SCRIPT="${DB_GUARD_IMPORT_SCRIPT:-/tmp/db-import-conditional.sh}"
|
||||
|
||||
RECHECK_SECONDS="${DB_GUARD_RECHECK_SECONDS:-120}"
|
||||
RETRY_SECONDS="${DB_GUARD_RETRY_SECONDS:-10}"
|
||||
WAIT_ATTEMPTS="${DB_GUARD_WAIT_ATTEMPTS:-60}"
|
||||
VERIFY_INTERVAL="${DB_GUARD_VERIFY_INTERVAL_SECONDS:-0}"
|
||||
VERIFY_FILE="${DB_GUARD_VERIFY_FILE:-/tmp/db-guard.last-verify}"
|
||||
HEALTH_FILE="${DB_GUARD_HEALTH_FILE:-/tmp/db-guard.ready}"
|
||||
STATUS_FILE="${DB_GUARD_STATUS_FILE:-/tmp/db-guard.status}"
|
||||
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
|
||||
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
|
||||
|
||||
declare -a DB_SCHEMAS=()
|
||||
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
|
||||
value="${!var:-}"
|
||||
if [ -n "$value" ]; then
|
||||
DB_SCHEMAS+=("$value")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "${DB_GUARD_EXTRA_DATABASES:-}" ]; then
|
||||
IFS=',' read -ra extra <<< "${DB_GUARD_EXTRA_DATABASES}"
|
||||
for db in "${extra[@]}"; do
|
||||
if [ -n "${db// }" ]; then
|
||||
DB_SCHEMAS+=("${db// }")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "${#DB_SCHEMAS[@]}" -eq 0 ]; then
|
||||
DB_SCHEMAS=(acore_auth acore_world acore_characters)
|
||||
fi
|
||||
|
||||
SCHEMA_LIST_SQL="$(printf "'%s'," "${DB_SCHEMAS[@]}")"
|
||||
SCHEMA_LIST_SQL="${SCHEMA_LIST_SQL%,}"
|
||||
|
||||
mark_ready(){
|
||||
mkdir -p "$(dirname "$HEALTH_FILE")" 2>/dev/null || true
|
||||
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$STATUS_FILE" >/dev/null
|
||||
: > "$ERROR_FILE"
|
||||
printf '%s\n' "$*" > "$HEALTH_FILE"
|
||||
}
|
||||
|
||||
mark_unhealthy(){
|
||||
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$ERROR_FILE" >&2
|
||||
rm -f "$HEALTH_FILE" 2>/dev/null || true
|
||||
}
|
||||
|
||||
wait_for_mysql(){
|
||||
local attempts="$WAIT_ATTEMPTS"
|
||||
while [ "$attempts" -gt 0 ]; do
|
||||
if MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -e "SELECT 1" >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
attempts=$((attempts - 1))
|
||||
sleep "$RETRY_SECONDS"
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
table_count(){
|
||||
local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema IN (${SCHEMA_LIST_SQL});"
|
||||
MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -N -B -e "$query"
|
||||
}
|
||||
|
||||
rehydrate(){
|
||||
if [ ! -x "$IMPORT_SCRIPT" ]; then
|
||||
err "Import script not found at ${IMPORT_SCRIPT}"
|
||||
return 1
|
||||
fi
|
||||
"$IMPORT_SCRIPT"
|
||||
}
|
||||
|
||||
ensure_dbimport_conf(){
|
||||
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
||||
local dist="${conf}.dist"
|
||||
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
|
||||
cp "$dist" "$conf"
|
||||
fi
|
||||
mkdir -p /azerothcore/env/dist/temp
|
||||
}
|
||||
|
||||
sync_host_stage_files(){
|
||||
local host_root="${MODULE_SQL_HOST_PATH}"
|
||||
[ -d "$host_root" ] || return 0
|
||||
for dir in db_world db_characters db_auth db_playerbots; do
|
||||
local src="$host_root/$dir"
|
||||
local dest="/azerothcore/data/sql/updates/$dir"
|
||||
mkdir -p "$dest"
|
||||
rm -f "$dest"/MODULE_*.sql >/dev/null 2>&1 || true
|
||||
if [ -d "$src" ]; then
|
||||
cp -a "$src"/MODULE_*.sql "$dest"/ >/dev/null 2>&1 || true
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
dbimport_verify(){
|
||||
local bin_dir="/azerothcore/env/dist/bin"
|
||||
ensure_dbimport_conf
|
||||
sync_host_stage_files
|
||||
if [ ! -x "${bin_dir}/dbimport" ]; then
|
||||
warn "dbimport binary not found at ${bin_dir}/dbimport"
|
||||
return 1
|
||||
fi
|
||||
log "Running dbimport verification sweep..."
|
||||
if (cd "$bin_dir" && ./dbimport); then
|
||||
log "dbimport verification finished successfully"
|
||||
return 0
|
||||
fi
|
||||
warn "dbimport verification reported issues - review dbimport logs"
|
||||
return 1
|
||||
}
|
||||
|
||||
maybe_run_verification(){
|
||||
if [ "${VERIFY_INTERVAL}" -lt 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
local now last_run=0
|
||||
now="$(date +%s)"
|
||||
if [ -f "$VERIFY_FILE" ]; then
|
||||
last_run="$(cat "$VERIFY_FILE" 2>/dev/null || echo 0)"
|
||||
if [ "$VERIFY_INTERVAL" -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ $((now - last_run)) -lt "${VERIFY_INTERVAL}" ]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
if dbimport_verify; then
|
||||
echo "$now" > "$VERIFY_FILE"
|
||||
else
|
||||
warn "dbimport verification failed; will retry in ${VERIFY_INTERVAL}s"
|
||||
fi
|
||||
}
|
||||
|
||||
log "Watching MySQL (${MYSQL_HOST}:${MYSQL_PORT}) for ${#DB_SCHEMAS[@]} schemas: ${DB_SCHEMAS[*]}"
|
||||
|
||||
while true; do
|
||||
if ! wait_for_mysql; then
|
||||
mark_unhealthy "MySQL is unreachable after ${WAIT_ATTEMPTS} attempts"
|
||||
sleep "$RETRY_SECONDS"
|
||||
continue
|
||||
fi
|
||||
|
||||
count="$(table_count 2>/dev/null || echo "")"
|
||||
if [ -n "$count" ]; then
|
||||
if [ "$count" -gt 0 ] 2>/dev/null; then
|
||||
mark_ready "Detected ${count} tables across tracked schemas"
|
||||
maybe_run_verification
|
||||
sleep "$RECHECK_SECONDS"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
warn "No tables detected across ${DB_SCHEMAS[*]}; running rehydrate workflow..."
|
||||
if rehydrate; then
|
||||
log "Rehydrate complete - rechecking tables"
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
mark_unhealthy "Rehydrate workflow failed - retrying in ${RETRY_SECONDS}s"
|
||||
sleep "$RETRY_SECONDS"
|
||||
done
|
||||
389
scripts/bash/db-health-check.sh
Executable file
389
scripts/bash/db-health-check.sh
Executable file
@@ -0,0 +1,389 @@
|
||||
#!/bin/bash
|
||||
# Database Health Check Script
|
||||
# Provides comprehensive health status of AzerothCore databases
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
ICON_DB="🗄️"
|
||||
ICON_SIZE="💾"
|
||||
ICON_TIME="🕐"
|
||||
ICON_MODULE="📦"
|
||||
ICON_UPDATE="🔄"
|
||||
|
||||
# Default values
|
||||
VERBOSE=0
|
||||
SHOW_PENDING=0
|
||||
SHOW_MODULES=1
|
||||
CONTAINER_NAME="ac-mysql"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./db-health-check.sh [options]
|
||||
|
||||
Check the health status of AzerothCore databases.
|
||||
|
||||
Options:
|
||||
-v, --verbose Show detailed information
|
||||
-p, --pending Show pending updates
|
||||
-m, --no-modules Hide module update information
|
||||
-c, --container NAME MySQL container name (default: ac-mysql)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./db-health-check.sh
|
||||
./db-health-check.sh --verbose --pending
|
||||
./db-health-check.sh --container ac-mysql-custom
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-v|--verbose) VERBOSE=1; shift;;
|
||||
-p|--pending) SHOW_PENDING=1; shift;;
|
||||
-m|--no-modules) SHOW_MODULES=0; shift;;
|
||||
-c|--container) CONTAINER_NAME="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||
MYSQL_USER="${MYSQL_USER:-root}"
|
||||
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
|
||||
# MySQL query helper
|
||||
mysql_query() {
|
||||
local database="${1:-}"
|
||||
local query="$2"
|
||||
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
echo "Error: MYSQL_ROOT_PASSWORD not set" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
if [ -n "$database" ]; then
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
else
|
||||
if [ -n "$database" ]; then
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Format bytes to human readable
|
||||
format_bytes() {
|
||||
local bytes=$1
|
||||
if [ "$bytes" -lt 1024 ]; then
|
||||
echo "${bytes}B"
|
||||
elif [ "$bytes" -lt 1048576 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||
elif [ "$bytes" -lt 1073741824 ]; then
|
||||
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||
else
|
||||
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if database exists
|
||||
db_exists() {
|
||||
local db_name="$1"
|
||||
local count
|
||||
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||
[ "$count" = "1" ]
|
||||
}
|
||||
|
||||
# Get database size
|
||||
get_db_size() {
|
||||
local db_name="$1"
|
||||
mysql_query "" "SELECT IFNULL(SUM(data_length + index_length), 0) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get update count
|
||||
get_update_count() {
|
||||
local db_name="$1"
|
||||
local state="${2:-}"
|
||||
|
||||
if [ -n "$state" ]; then
|
||||
mysql_query "$db_name" "SELECT COUNT(*) FROM updates WHERE state='$state'" 2>/dev/null || echo "0"
|
||||
else
|
||||
mysql_query "$db_name" "SELECT COUNT(*) FROM updates" 2>/dev/null || echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get last update timestamp
|
||||
get_last_update() {
|
||||
local db_name="$1"
|
||||
mysql_query "$db_name" "SELECT IFNULL(MAX(timestamp), 'Never') FROM updates" 2>/dev/null || echo "Never"
|
||||
}
|
||||
|
||||
# Get table count
|
||||
get_table_count() {
|
||||
local db_name="$1"
|
||||
mysql_query "" "SELECT COUNT(*) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get character count
|
||||
get_character_count() {
|
||||
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get active players (logged in last 24 hours)
|
||||
get_active_players() {
|
||||
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters WHERE logout_time > UNIX_TIMESTAMP(NOW() - INTERVAL 1 DAY)" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get account count
|
||||
get_account_count() {
|
||||
mysql_query "$DB_AUTH_NAME" "SELECT COUNT(*) FROM account" 2>/dev/null || echo "0"
|
||||
}
|
||||
|
||||
# Get pending updates
|
||||
get_pending_updates() {
|
||||
local db_name="$1"
|
||||
mysql_query "$db_name" "SELECT name FROM updates WHERE state='PENDING' ORDER BY name" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Check database health
|
||||
check_database() {
|
||||
local db_name="$1"
|
||||
local display_name="$2"
|
||||
|
||||
if ! db_exists "$db_name"; then
|
||||
printf " ${RED}${ICON_ERROR} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||
printf " ${RED}Database does not exist${NC}\n"
|
||||
return 1
|
||||
fi
|
||||
|
||||
printf " ${GREEN}${ICON_SUCCESS} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||
|
||||
local update_count module_count last_update db_size table_count
|
||||
update_count=$(get_update_count "$db_name" "RELEASED")
|
||||
module_count=$(get_update_count "$db_name" "MODULE")
|
||||
last_update=$(get_last_update "$db_name")
|
||||
db_size=$(get_db_size "$db_name")
|
||||
table_count=$(get_table_count "$db_name")
|
||||
|
||||
printf " ${ICON_UPDATE} Updates: %s applied" "$update_count"
|
||||
if [ "$module_count" != "0" ] && [ "$SHOW_MODULES" = "1" ]; then
|
||||
printf " (%s module)" "$module_count"
|
||||
fi
|
||||
printf "\n"
|
||||
|
||||
printf " ${ICON_TIME} Last update: %s\n" "$last_update"
|
||||
printf " ${ICON_SIZE} Size: %s (%s tables)\n" "$(format_bytes "$db_size")" "$table_count"
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
local custom_count archived_count
|
||||
custom_count=$(get_update_count "$db_name" "CUSTOM")
|
||||
archived_count=$(get_update_count "$db_name" "ARCHIVED")
|
||||
|
||||
if [ "$custom_count" != "0" ]; then
|
||||
printf " ${ICON_INFO} Custom updates: %s\n" "$custom_count"
|
||||
fi
|
||||
if [ "$archived_count" != "0" ]; then
|
||||
printf " ${ICON_INFO} Archived updates: %s\n" "$archived_count"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show pending updates if requested
|
||||
if [ "$SHOW_PENDING" = "1" ]; then
|
||||
local pending_updates
|
||||
pending_updates=$(get_pending_updates "$db_name")
|
||||
if [ -n "$pending_updates" ]; then
|
||||
printf " ${YELLOW}${ICON_WARNING} Pending updates:${NC}\n"
|
||||
while IFS= read -r update; do
|
||||
printf " - %s\n" "$update"
|
||||
done <<< "$pending_updates"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
# Show module updates summary
|
||||
show_module_updates() {
|
||||
if [ "$SHOW_MODULES" = "0" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
printf "${BOLD}${ICON_MODULE} Module Updates${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
# Get module updates from world database (most modules update world DB)
|
||||
local module_updates
|
||||
module_updates=$(mysql_query "$DB_WORLD_NAME" "SELECT SUBSTRING_INDEX(name, '_', 1) as module, COUNT(*) as count FROM updates WHERE state='MODULE' GROUP BY module ORDER BY module" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$module_updates" ]; then
|
||||
printf " ${ICON_INFO} No module updates detected\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
while IFS=$'\t' read -r module count; do
|
||||
printf " ${GREEN}${ICON_SUCCESS}${NC} %s: %s update(s)\n" "$module" "$count"
|
||||
done <<< "$module_updates"
|
||||
echo
|
||||
}
|
||||
|
||||
# Get backup information
|
||||
get_backup_info() {
|
||||
local backup_dir="$PROJECT_ROOT/storage/backups"
|
||||
|
||||
if [ ! -d "$backup_dir" ]; then
|
||||
printf " ${ICON_INFO} No backups directory found\n"
|
||||
return
|
||||
fi
|
||||
|
||||
# Check for latest backup
|
||||
local latest_hourly latest_daily
|
||||
if [ -d "$backup_dir/hourly" ]; then
|
||||
latest_hourly=$(ls -1t "$backup_dir/hourly" 2>/dev/null | head -n1 || echo "")
|
||||
fi
|
||||
if [ -d "$backup_dir/daily" ]; then
|
||||
latest_daily=$(ls -1t "$backup_dir/daily" 2>/dev/null | head -n1 || echo "")
|
||||
fi
|
||||
|
||||
if [ -n "$latest_hourly" ]; then
|
||||
# Calculate time ago
|
||||
local backup_timestamp="${latest_hourly:0:8}_${latest_hourly:9:6}"
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
local hours=$((diff / 3600))
|
||||
local minutes=$(((diff % 3600) / 60))
|
||||
|
||||
if [ "$hours" -gt 0 ]; then
|
||||
printf " ${ICON_TIME} Last hourly backup: %s hours ago\n" "$hours"
|
||||
else
|
||||
printf " ${ICON_TIME} Last hourly backup: %s minutes ago\n" "$minutes"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$latest_daily" ] && [ "$latest_daily" != "$latest_hourly" ]; then
|
||||
local backup_timestamp="${latest_daily:0:8}_${latest_daily:9:6}"
|
||||
local backup_epoch
|
||||
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||
local now_epoch
|
||||
now_epoch=$(date +%s)
|
||||
local diff=$((now_epoch - backup_epoch))
|
||||
local days=$((diff / 86400))
|
||||
|
||||
printf " ${ICON_TIME} Last daily backup: %s days ago\n" "$days"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main health check
|
||||
main() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}${ICON_DB} AZEROTHCORE DATABASE HEALTH CHECK${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Test MySQL connection
|
||||
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||
printf "${RED}${ICON_ERROR} Cannot connect to MySQL server${NC}\n"
|
||||
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||
printf " User: %s\n" "$MYSQL_USER"
|
||||
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "${BOLD}${ICON_DB} Database Status${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Check each database
|
||||
check_database "$DB_AUTH_NAME" "Auth DB"
|
||||
check_database "$DB_WORLD_NAME" "World DB"
|
||||
check_database "$DB_CHARACTERS_NAME" "Characters DB"
|
||||
|
||||
# Optional: Check playerbots database
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
check_database "$DB_PLAYERBOTS_NAME" "Playerbots DB"
|
||||
fi
|
||||
|
||||
# Show character/account statistics
|
||||
printf "${BOLD}${CYAN}📊 Server Statistics${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local account_count character_count active_count
|
||||
account_count=$(get_account_count)
|
||||
character_count=$(get_character_count)
|
||||
active_count=$(get_active_players)
|
||||
|
||||
printf " ${ICON_INFO} Accounts: %s\n" "$account_count"
|
||||
printf " ${ICON_INFO} Characters: %s\n" "$character_count"
|
||||
printf " ${ICON_INFO} Active (24h): %s\n" "$active_count"
|
||||
echo
|
||||
|
||||
# Show module updates
|
||||
show_module_updates
|
||||
|
||||
# Show backup information
|
||||
printf "${BOLD}${ICON_SIZE} Backup Information${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
get_backup_info
|
||||
echo
|
||||
|
||||
# Calculate total database size
|
||||
local total_size=0
|
||||
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||
if db_exists "$db"; then
|
||||
local size
|
||||
size=$(get_db_size "$db")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
done
|
||||
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
local size
|
||||
size=$(get_db_size "$DB_PLAYERBOTS_NAME")
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
|
||||
printf "${BOLD}💾 Total Database Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||
echo
|
||||
|
||||
printf "${GREEN}${ICON_SUCCESS} Health check complete!${NC}\n"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -34,6 +34,62 @@ Notes:
|
||||
EOF
|
||||
}
|
||||
|
||||
verify_databases_populated() {
|
||||
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
|
||||
local mysql_port="${MYSQL_PORT:-3306}"
|
||||
local mysql_user="${MYSQL_USER:-root}"
|
||||
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||||
local db_auth="${DB_AUTH_NAME:-acore_auth}"
|
||||
local db_world="${DB_WORLD_NAME:-acore_world}"
|
||||
local db_characters="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
|
||||
if ! command -v mysql >/dev/null 2>&1; then
|
||||
echo "⚠️ mysql client is not available to verify restoration status"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema IN ('$db_auth','$db_world','$db_characters');"
|
||||
local table_count
|
||||
if ! table_count=$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "$query" 2>/dev/null); then
|
||||
echo "⚠️ Unable to query MySQL at ${mysql_host}:${mysql_port} to verify restoration status"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "${table_count:-0}" -gt 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "⚠️ MySQL is reachable but no AzerothCore tables were found"
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_for_mysql(){
|
||||
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
|
||||
local mysql_port="${MYSQL_PORT:-3306}"
|
||||
local mysql_user="${MYSQL_USER:-root}"
|
||||
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
|
||||
local max_attempts=30
|
||||
local delay=2
|
||||
while [ $max_attempts -gt 0 ]; do
|
||||
if MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -e "SELECT 1" >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
max_attempts=$((max_attempts - 1))
|
||||
sleep "$delay"
|
||||
done
|
||||
echo "❌ Unable to connect to MySQL at ${mysql_host}:${mysql_port} after multiple attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
ensure_dbimport_conf(){
|
||||
local conf="/azerothcore/env/dist/etc/dbimport.conf"
|
||||
local dist="${conf}.dist"
|
||||
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
|
||||
cp "$dist" "$conf"
|
||||
fi
|
||||
mkdir -p /azerothcore/env/dist/temp
|
||||
}
|
||||
|
||||
case "${1:-}" in
|
||||
-h|--help)
|
||||
print_help
|
||||
@@ -50,6 +106,11 @@ esac
|
||||
echo "🔧 Conditional AzerothCore Database Import"
|
||||
echo "========================================"
|
||||
|
||||
if ! wait_for_mysql; then
|
||||
echo "❌ MySQL service is unavailable; aborting database import"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Restoration status markers - use writable location
|
||||
RESTORE_STATUS_DIR="/var/lib/mysql-persistent"
|
||||
MARKER_STATUS_DIR="/tmp"
|
||||
@@ -70,10 +131,17 @@ fi
|
||||
echo "🔍 Checking restoration status..."
|
||||
|
||||
if [ -f "$RESTORE_SUCCESS_MARKER" ]; then
|
||||
echo "✅ Backup restoration completed successfully"
|
||||
cat "$RESTORE_SUCCESS_MARKER" || true
|
||||
echo "🚫 Skipping database import - data already restored from backup"
|
||||
exit 0
|
||||
if verify_databases_populated; then
|
||||
echo "✅ Backup restoration completed successfully"
|
||||
cat "$RESTORE_SUCCESS_MARKER" || true
|
||||
echo "🚫 Skipping database import - data already restored from backup"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "⚠️ Restoration marker found, but databases are empty - forcing re-import"
|
||||
rm -f "$RESTORE_SUCCESS_MARKER" 2>/dev/null || true
|
||||
rm -f "$RESTORE_SUCCESS_MARKER_TMP" 2>/dev/null || true
|
||||
rm -f "$RESTORE_FAILED_MARKER" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if [ -f "$RESTORE_FAILED_MARKER" ]; then
|
||||
@@ -280,9 +348,70 @@ if [ -n "$backup_path" ]; then
|
||||
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
||||
}
|
||||
|
||||
verify_and_update_restored_databases() {
|
||||
echo "🔍 Verifying restored database integrity..."
|
||||
|
||||
# Check if dbimport is available
|
||||
if [ ! -f "/azerothcore/env/dist/bin/dbimport" ]; then
|
||||
echo "⚠️ dbimport not available, skipping verification"
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_dbimport_conf
|
||||
|
||||
cd /azerothcore/env/dist/bin
|
||||
echo "🔄 Running dbimport to apply any missing updates..."
|
||||
if ./dbimport; then
|
||||
echo "✅ Database verification complete - all updates current"
|
||||
else
|
||||
echo "⚠️ dbimport reported issues - check logs"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify critical tables exist
|
||||
echo "🔍 Checking critical tables..."
|
||||
local critical_tables=("account" "characters" "creature" "quest_template")
|
||||
local missing_tables=0
|
||||
|
||||
for table in "${critical_tables[@]}"; do
|
||||
local db_name="$DB_WORLD_NAME"
|
||||
case "$table" in
|
||||
account) db_name="$DB_AUTH_NAME" ;;
|
||||
characters) db_name="$DB_CHARACTERS_NAME" ;;
|
||||
esac
|
||||
|
||||
if ! mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} \
|
||||
-e "SELECT 1 FROM ${db_name}.${table} LIMIT 1" >/dev/null 2>&1; then
|
||||
echo "⚠️ Critical table missing: ${db_name}.${table}"
|
||||
missing_tables=$((missing_tables + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$missing_tables" -gt 0 ]; then
|
||||
echo "⚠️ ${missing_tables} critical tables missing after restore"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "✅ All critical tables verified"
|
||||
return 0
|
||||
}
|
||||
|
||||
if restore_backup "$backup_path"; then
|
||||
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
||||
echo "🎉 Backup restoration completed successfully!"
|
||||
|
||||
# Verify and apply missing updates
|
||||
verify_and_update_restored_databases
|
||||
|
||||
if [ -x "/tmp/restore-and-stage.sh" ]; then
|
||||
echo "🔧 Running restore-time module SQL staging..."
|
||||
MODULES_DIR="/modules" \
|
||||
RESTORE_SOURCE_DIR="$backup_path" \
|
||||
/tmp/restore-and-stage.sh
|
||||
else
|
||||
echo "ℹ️ restore-and-stage helper not available; skipping automatic module SQL staging"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
||||
@@ -302,29 +431,7 @@ CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COL
|
||||
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
|
||||
echo "✅ Fresh databases created - proceeding with schema import"
|
||||
|
||||
echo "📝 Creating dbimport configuration..."
|
||||
mkdir -p /azerothcore/env/dist/etc
|
||||
TEMP_DIR="/azerothcore/env/dist/temp"
|
||||
mkdir -p "$TEMP_DIR"
|
||||
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
|
||||
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
||||
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||
Updates.EnableDatabases = 7
|
||||
Updates.AutoSetup = 1
|
||||
TempDir = "${TEMP_DIR}"
|
||||
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
||||
Updates.AllowedModules = "all"
|
||||
LoginDatabase.WorkerThreads = 1
|
||||
LoginDatabase.SynchThreads = 1
|
||||
WorldDatabase.WorkerThreads = 1
|
||||
WorldDatabase.SynchThreads = 1
|
||||
CharacterDatabase.WorkerThreads = 1
|
||||
CharacterDatabase.SynchThreads = 1
|
||||
SourceDirectory = "/azerothcore"
|
||||
Updates.ExceptionShutdownDelay = 10000
|
||||
EOF
|
||||
ensure_dbimport_conf
|
||||
|
||||
echo "🚀 Running database import..."
|
||||
cd /azerothcore/env/dist/bin
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copy user database files or full backup archives from database-import/ to backup system
|
||||
# Copy user database files or full backup archives from import/db/ or database-import/ to backup system
|
||||
set -euo pipefail
|
||||
|
||||
# Source environment variables
|
||||
@@ -9,10 +9,20 @@ if [ -f ".env" ]; then
|
||||
set +a
|
||||
fi
|
||||
|
||||
IMPORT_DIR="./database-import"
|
||||
# Support both new (import/db) and legacy (database-import) directories
|
||||
IMPORT_DIR_NEW="./import/db"
|
||||
IMPORT_DIR_LEGACY="./database-import"
|
||||
|
||||
# Prefer new directory if it has files, otherwise fall back to legacy
|
||||
IMPORT_DIR="$IMPORT_DIR_NEW"
|
||||
if [ ! -d "$IMPORT_DIR" ] || [ -z "$(ls -A "$IMPORT_DIR" 2>/dev/null)" ]; then
|
||||
IMPORT_DIR="$IMPORT_DIR_LEGACY"
|
||||
fi
|
||||
STORAGE_PATH="${STORAGE_PATH:-./storage}"
|
||||
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
|
||||
BACKUP_ROOT="${STORAGE_PATH}/backups"
|
||||
MYSQL_DATA_VOLUME_NAME="${MYSQL_DATA_VOLUME_NAME:-mysql-data}"
|
||||
ALPINE_IMAGE="${ALPINE_IMAGE:-alpine:latest}"
|
||||
|
||||
shopt -s nullglob
|
||||
sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz)
|
||||
@@ -24,7 +34,25 @@ if [ ! -d "$IMPORT_DIR" ] || [ ${#sql_files[@]} -eq 0 ]; then
|
||||
fi
|
||||
|
||||
# Exit if backup system already has databases restored
|
||||
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
|
||||
has_restore_marker(){
|
||||
# Prefer Docker volume marker (post-migration), fall back to legacy host path
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
if docker volume inspect "$MYSQL_DATA_VOLUME_NAME" >/dev/null 2>&1; then
|
||||
if docker run --rm \
|
||||
-v "${MYSQL_DATA_VOLUME_NAME}:/var/lib/mysql-persistent" \
|
||||
"$ALPINE_IMAGE" \
|
||||
sh -c 'test -f /var/lib/mysql-persistent/.restore-completed' >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
if has_restore_marker; then
|
||||
echo "✅ Database already restored - skipping import"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -477,20 +477,11 @@ load_sql_helper(){
|
||||
err "SQL helper not found; expected manage-modules-sql.sh to be available"
|
||||
}
|
||||
|
||||
execute_module_sql(){
|
||||
SQL_EXECUTION_FAILED=0
|
||||
if declare -f execute_module_sql_scripts >/dev/null 2>&1; then
|
||||
echo 'Executing module SQL scripts...'
|
||||
if execute_module_sql_scripts; then
|
||||
echo 'SQL execution complete.'
|
||||
else
|
||||
echo '⚠️ Module SQL scripts reported errors'
|
||||
SQL_EXECUTION_FAILED=1
|
||||
fi
|
||||
else
|
||||
info "SQL helper did not expose execute_module_sql_scripts; skipping module SQL execution"
|
||||
fi
|
||||
}
|
||||
# REMOVED: stage_module_sql_files() and execute_module_sql()
|
||||
# These functions were part of build-time SQL staging that created files in
|
||||
# /azerothcore/modules/*/data/sql/updates/ which are NEVER scanned by AzerothCore's DBUpdater.
|
||||
# Module SQL is now staged at runtime by stage-modules.sh which copies files to
|
||||
# /azerothcore/data/sql/updates/ (core directory) where they ARE scanned and processed.
|
||||
|
||||
track_module_state(){
|
||||
echo 'Checking for module changes that require rebuild...'
|
||||
@@ -591,20 +582,11 @@ main(){
|
||||
remove_disabled_modules
|
||||
install_enabled_modules
|
||||
manage_configuration_files
|
||||
info "SQL execution gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
||||
if [ "${MODULES_SKIP_SQL:-0}" = "1" ]; then
|
||||
info "Skipping module SQL execution (MODULES_SKIP_SQL=1)"
|
||||
else
|
||||
info "Initiating module SQL helper"
|
||||
load_sql_helper
|
||||
info "SQL helper loaded from ${SQL_HELPER_PATH:-unknown}"
|
||||
execute_module_sql
|
||||
fi
|
||||
track_module_state
|
||||
# NOTE: Module SQL staging is now handled at runtime by stage-modules.sh
|
||||
# which copies SQL files to /azerothcore/data/sql/updates/ after containers start.
|
||||
# Build-time SQL staging has been removed as it created files that were never processed.
|
||||
|
||||
if [ "${SQL_EXECUTION_FAILED:-0}" = "1" ]; then
|
||||
warn "Module SQL execution reported issues; review logs above."
|
||||
fi
|
||||
track_module_state
|
||||
|
||||
echo 'Module management complete.'
|
||||
|
||||
|
||||
139
scripts/bash/repair-storage-permissions.sh
Executable file
139
scripts/bash/repair-storage-permissions.sh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/bin/bash
|
||||
# Normalize permissions across storage/ and local-storage/ so host processes
|
||||
# (and CI tools) can read/write module metadata without manual chown.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
ENV_FILE="$PROJECT_ROOT/.env"
|
||||
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
|
||||
|
||||
usage(){
|
||||
cat <<'EOF'
|
||||
Usage: repair-storage-permissions.sh [options]
|
||||
|
||||
Ensures common storage directories are writable by the current host user.
|
||||
|
||||
Options:
|
||||
--path <dir> Additional directory to fix (can be passed multiple times)
|
||||
--silent Reduce output (only errors/warnings)
|
||||
-h, --help Show this help message
|
||||
EOF
|
||||
}
|
||||
|
||||
read_env(){
|
||||
local key="$1" default="$2" env_path="$ENV_FILE" value=""
|
||||
if [ -f "$env_path" ]; then
|
||||
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ] && [ -f "$TEMPLATE_FILE" ]; then
|
||||
value="$(grep -E "^${key}=" "$TEMPLATE_FILE" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||
fi
|
||||
if [ -z "$value" ]; then
|
||||
value="$default"
|
||||
fi
|
||||
printf '%s\n' "$value"
|
||||
}
|
||||
|
||||
silent=0
|
||||
declare -a extra_paths=()
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--path)
|
||||
shift
|
||||
[ $# -gt 0 ] || { echo "Missing value for --path" >&2; exit 1; }
|
||||
extra_paths+=("$1")
|
||||
;;
|
||||
--silent)
|
||||
silent=1
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
log(){ [ "$silent" -eq 1 ] || echo "$*"; }
|
||||
warn(){ echo "⚠️ $*" >&2; }
|
||||
|
||||
resolve_path(){
|
||||
local path="$1"
|
||||
if [[ "$path" != /* ]]; then
|
||||
path="${path#./}"
|
||||
path="$PROJECT_ROOT/$path"
|
||||
fi
|
||||
printf '%s\n' "$(cd "$(dirname "$path")" 2>/dev/null && pwd 2>/dev/null)/$(basename "$path")"
|
||||
}
|
||||
|
||||
ensure_host_writable(){
|
||||
local target="$1"
|
||||
[ -n "$target" ] || return 0
|
||||
mkdir -p "$target" 2>/dev/null || true
|
||||
[ -d "$target" ] || { warn "Path not found: $target"; return 0; }
|
||||
|
||||
local uid gid
|
||||
uid="$(id -u)"
|
||||
gid="$(id -g)"
|
||||
|
||||
if chown -R "$uid":"$gid" "$target" 2>/dev/null; then
|
||||
:
|
||||
elif command -v docker >/dev/null 2>&1; then
|
||||
local helper_image
|
||||
helper_image="$(read_env ALPINE_IMAGE "alpine:latest")"
|
||||
if ! docker run --rm -u 0:0 -v "$target":/workspace "$helper_image" \
|
||||
sh -c "chown -R ${uid}:${gid} /workspace" >/dev/null 2>&1; then
|
||||
warn "Failed to adjust ownership for $target"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
warn "Cannot adjust ownership for $target (docker unavailable)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
chmod -R ug+rwX "$target" 2>/dev/null || true
|
||||
return 0
|
||||
}
|
||||
|
||||
STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
|
||||
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
|
||||
|
||||
declare -a targets=(
|
||||
"$STORAGE_PATH"
|
||||
"$STORAGE_PATH/modules"
|
||||
"$STORAGE_PATH/modules/.modules-meta"
|
||||
"$STORAGE_PATH/backups"
|
||||
"$STORAGE_PATH/logs"
|
||||
"$STORAGE_PATH/lua_scripts"
|
||||
"$STORAGE_PATH/install-markers"
|
||||
"$STORAGE_PATH/client-data"
|
||||
"$STORAGE_PATH/config"
|
||||
"$LOCAL_STORAGE_PATH"
|
||||
"$LOCAL_STORAGE_PATH/modules"
|
||||
"$LOCAL_STORAGE_PATH/client-data-cache"
|
||||
"$LOCAL_STORAGE_PATH/source"
|
||||
"$LOCAL_STORAGE_PATH/images"
|
||||
)
|
||||
|
||||
targets+=("${extra_paths[@]}")
|
||||
|
||||
declare -A seen=()
|
||||
for raw in "${targets[@]}"; do
|
||||
[ -n "$raw" ] || continue
|
||||
resolved="$(resolve_path "$raw")"
|
||||
if [ -n "${seen[$resolved]:-}" ]; then
|
||||
continue
|
||||
fi
|
||||
seen["$resolved"]=1
|
||||
log "🔧 Fixing permissions for $resolved"
|
||||
ensure_host_writable "$resolved"
|
||||
done
|
||||
|
||||
log "✅ Storage permissions refreshed"
|
||||
22
scripts/bash/restore-and-stage.sh
Executable file
22
scripts/bash/restore-and-stage.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
# Refresh the module metadata after a database restore so runtime staging knows
|
||||
# to re-copy SQL files.
|
||||
set -euo pipefail
|
||||
|
||||
info(){ echo "🔧 [restore-stage] $*"; }
|
||||
warn(){ echo "⚠️ [restore-stage] $*" >&2; }
|
||||
|
||||
MODULES_DIR="${MODULES_DIR:-/modules}"
|
||||
MODULES_META_DIR="${MODULES_DIR}/.modules-meta"
|
||||
RESTORE_FLAG="${MODULES_META_DIR}/.restore-prestaged"
|
||||
|
||||
if [ ! -d "$MODULES_DIR" ]; then
|
||||
warn "Modules directory not found at ${MODULES_DIR}; skipping restore-time staging prep."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir -p "$MODULES_META_DIR" 2>/dev/null || true
|
||||
touch "$RESTORE_FLAG"
|
||||
echo "restore_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > "$RESTORE_FLAG"
|
||||
|
||||
info "Flagged ${RESTORE_FLAG} to force staging on next ./scripts/bash/stage-modules.sh run."
|
||||
@@ -17,6 +17,32 @@ show_staging_step(){
|
||||
printf '%b\n' "${YELLOW}🔧 ${step}: ${message}...${NC}"
|
||||
}
|
||||
|
||||
ensure_host_writable(){
|
||||
local target="$1"
|
||||
[ -n "$target" ] || return 0
|
||||
if [ -d "$target" ] || mkdir -p "$target" 2>/dev/null; then
|
||||
local uid gid
|
||||
uid="$(id -u)"
|
||||
gid="$(id -g)"
|
||||
if ! chown -R "$uid":"$gid" "$target" 2>/dev/null; then
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
local helper_image
|
||||
helper_image="$(read_env ALPINE_IMAGE "alpine:latest")"
|
||||
docker run --rm \
|
||||
-u 0:0 \
|
||||
-v "$target":/workspace \
|
||||
"$helper_image" \
|
||||
sh -c "chown -R ${uid}:${gid} /workspace" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
chmod -R u+rwX "$target" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
seed_sql_ledger_if_needed(){
|
||||
: # No-op; ledger removed
|
||||
}
|
||||
|
||||
sync_local_staging(){
|
||||
local src_root="$LOCAL_STORAGE_PATH"
|
||||
local dest_root="$STORAGE_PATH"
|
||||
@@ -53,8 +79,21 @@ sync_local_staging(){
|
||||
return
|
||||
fi
|
||||
|
||||
# Ensure both source and destination trees are writable by the host user.
|
||||
ensure_host_writable "$src_modules"
|
||||
ensure_host_writable "$dest_modules"
|
||||
|
||||
if command -v rsync >/dev/null 2>&1; then
|
||||
rsync -a --delete "$src_modules"/ "$dest_modules"/
|
||||
# rsync may return exit code 23 (permission warnings) in WSL2 - these are harmless
|
||||
rsync -a --delete "$src_modules"/ "$dest_modules"/ || {
|
||||
local rsync_exit=$?
|
||||
if [ $rsync_exit -eq 23 ]; then
|
||||
echo "ℹ️ rsync completed with permission warnings (normal in WSL2)"
|
||||
else
|
||||
echo "⚠️ rsync failed with exit code $rsync_exit"
|
||||
return $rsync_exit
|
||||
fi
|
||||
}
|
||||
else
|
||||
find "$dest_modules" -mindepth 1 -maxdepth 1 -exec rm -rf {} + 2>/dev/null || true
|
||||
(cd "$src_modules" && tar cf - .) | (cd "$dest_modules" && tar xf -)
|
||||
@@ -219,7 +258,47 @@ if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
|
||||
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
|
||||
fi
|
||||
LOCAL_STORAGE_PATH="$(canonical_path "$LOCAL_STORAGE_PATH")"
|
||||
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_PATH"
|
||||
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
|
||||
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
|
||||
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
|
||||
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
|
||||
MODULE_SQL_STAGE_PATH="$(read_env MODULE_SQL_STAGE_PATH "$STORAGE_PATH/module-sql-updates")"
|
||||
MODULE_SQL_STAGE_PATH="$(eval "echo \"$MODULE_SQL_STAGE_PATH\"")"
|
||||
if [[ "$MODULE_SQL_STAGE_PATH" != /* ]]; then
|
||||
MODULE_SQL_STAGE_PATH="$PROJECT_DIR/$MODULE_SQL_STAGE_PATH"
|
||||
fi
|
||||
MODULE_SQL_STAGE_PATH="$(canonical_path "$MODULE_SQL_STAGE_PATH")"
|
||||
mkdir -p "$MODULE_SQL_STAGE_PATH"
|
||||
ensure_host_writable "$MODULE_SQL_STAGE_PATH"
|
||||
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
|
||||
|
||||
declare -A ENABLED_MODULES=()
|
||||
|
||||
load_enabled_modules(){
|
||||
ENABLED_MODULES=()
|
||||
if [ -f "$MODULES_ENABLED_FILE" ]; then
|
||||
while IFS= read -r enabled_module; do
|
||||
enabled_module="$(echo "$enabled_module" | tr -d '\r')"
|
||||
[ -n "$enabled_module" ] || continue
|
||||
ENABLED_MODULES["$enabled_module"]=1
|
||||
done < "$MODULES_ENABLED_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
module_is_enabled(){
|
||||
local module_dir="$1"
|
||||
if [ ${#ENABLED_MODULES[@]} -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ -n "${ENABLED_MODULES[$module_dir]:-}" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Load the enabled module list (if present) so staging respects disabled modules.
|
||||
load_enabled_modules
|
||||
|
||||
# Define module mappings (from rebuild-with-modules.sh)
|
||||
declare -A MODULE_REPO_MAP=(
|
||||
@@ -338,6 +417,7 @@ fi
|
||||
# Stage the services
|
||||
show_staging_step "Service Orchestration" "Preparing realm services"
|
||||
sync_local_staging
|
||||
|
||||
echo "🎬 Staging services with profile: services-$TARGET_PROFILE"
|
||||
echo "⏳ Pulling images and starting containers; this can take several minutes on first run."
|
||||
|
||||
@@ -360,10 +440,278 @@ case "$TARGET_PROFILE" in
|
||||
modules) PROFILE_ARGS+=(--profile client-data) ;;
|
||||
esac
|
||||
|
||||
# Start the target profile
|
||||
show_staging_step "Realm Activation" "Bringing services online"
|
||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||
# Stage module SQL to core updates directory (after containers start)
|
||||
host_stage_clear(){
|
||||
docker run --rm \
|
||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
||||
"$HOST_STAGE_HELPER_IMAGE" \
|
||||
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
host_stage_reset_dir(){
|
||||
local dir="$1"
|
||||
docker run --rm \
|
||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
||||
"$HOST_STAGE_HELPER_IMAGE" \
|
||||
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
copy_to_host_stage(){
|
||||
local file_path="$1"
|
||||
local core_dir="$2"
|
||||
local target_name="$3"
|
||||
local src_dir
|
||||
src_dir="$(dirname "$file_path")"
|
||||
local base_name
|
||||
base_name="$(basename "$file_path")"
|
||||
docker run --rm \
|
||||
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
|
||||
-v "$src_dir":/src \
|
||||
"$HOST_STAGE_HELPER_IMAGE" \
|
||||
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
stage_module_sql_to_core() {
|
||||
show_staging_step "Module SQL Staging" "Preparing module database updates"
|
||||
|
||||
# Start containers first to get access to worldserver container
|
||||
show_staging_step "Realm Activation" "Bringing services online"
|
||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||
|
||||
# Wait for worldserver container to be running
|
||||
echo "⏳ Waiting for worldserver container..."
|
||||
local max_wait=60
|
||||
local waited=0
|
||||
while ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver" && [ $waited -lt $max_wait ]; do
|
||||
sleep 2
|
||||
waited=$((waited + 2))
|
||||
done
|
||||
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver"; then
|
||||
echo "⚠️ Worldserver container not found, skipping module SQL staging"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ -f "$RESTORE_PRESTAGED_FLAG" ]; then
|
||||
echo "↻ Restore pipeline detected (flag: $RESTORE_PRESTAGED_FLAG); re-staging module SQL so worldserver can apply updates."
|
||||
rm -f "$RESTORE_PRESTAGED_FLAG" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
echo "📦 Staging module SQL files to core updates directory..."
|
||||
host_stage_clear
|
||||
|
||||
# Create core updates directories inside container
|
||||
docker exec ac-worldserver bash -c "
|
||||
mkdir -p /azerothcore/data/sql/updates/db_world \
|
||||
/azerothcore/data/sql/updates/db_characters \
|
||||
/azerothcore/data/sql/updates/db_auth
|
||||
" 2>/dev/null || true
|
||||
|
||||
# Stage SQL from all modules
|
||||
local staged_count=0
|
||||
local total_skipped=0
|
||||
local total_failed=0
|
||||
docker exec ac-worldserver bash -c "find /azerothcore/data/sql/updates -name '*_MODULE_*.sql' -delete" >/dev/null 2>&1 || true
|
||||
|
||||
shopt -s nullglob
|
||||
for db_type in db-world db-characters db-auth db-playerbots; do
|
||||
local core_dir=""
|
||||
local legacy_name=""
|
||||
case "$db_type" in
|
||||
db-world)
|
||||
core_dir="db_world"
|
||||
legacy_name="world" # Some modules use 'world' instead of 'db-world'
|
||||
;;
|
||||
db-characters)
|
||||
core_dir="db_characters"
|
||||
legacy_name="characters"
|
||||
;;
|
||||
db-auth)
|
||||
core_dir="db_auth"
|
||||
legacy_name="auth"
|
||||
;;
|
||||
db-playerbots)
|
||||
core_dir="db_playerbots"
|
||||
legacy_name="playerbots"
|
||||
;;
|
||||
esac
|
||||
|
||||
docker exec ac-worldserver bash -c "mkdir -p /azerothcore/data/sql/updates/$core_dir" >/dev/null 2>&1 || true
|
||||
host_stage_reset_dir "$core_dir"
|
||||
|
||||
local counter=0
|
||||
local skipped=0
|
||||
local failed=0
|
||||
|
||||
local search_paths=(
|
||||
"$MODULES_DIR"/*/data/sql/"$db_type"
|
||||
"$MODULES_DIR"/*/data/sql/"$db_type"/base
|
||||
"$MODULES_DIR"/*/data/sql/"$db_type"/updates
|
||||
"$MODULES_DIR"/*/data/sql/"$legacy_name"
|
||||
"$MODULES_DIR"/*/data/sql/"$legacy_name"/base
|
||||
)
|
||||
|
||||
for module_dir in "${search_paths[@]}"; do
|
||||
for sql_file in "$module_dir"/*.sql; do
|
||||
[ -e "$sql_file" ] || continue
|
||||
|
||||
if [ ! -f "$sql_file" ] || [ ! -s "$sql_file" ]; then
|
||||
echo " ⚠️ Skipped empty or invalid: $(basename "$sql_file")"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
if grep -qE '^[[:space:]]*(system|exec|shell|!)' "$sql_file" 2>/dev/null; then
|
||||
echo " ❌ Security: Rejected $(basename "$(dirname "$module_dir")")/$(basename "$sql_file") (contains shell commands)"
|
||||
failed=$((failed + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
local module_name
|
||||
module_name="$(echo "$sql_file" | sed 's|.*/modules/||' | cut -d'/' -f1)"
|
||||
local base_name
|
||||
base_name="$(basename "$sql_file" .sql)"
|
||||
local update_identifier="MODULE_${module_name}_${base_name}"
|
||||
|
||||
if ! module_is_enabled "$module_name"; then
|
||||
echo " ⏭️ Skipped $module_name/$db_type/$(basename "$sql_file") (module disabled)"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
local target_name="MODULE_${module_name}_${base_name}.sql"
|
||||
if ! copy_to_host_stage "$sql_file" "$core_dir" "$target_name"; then
|
||||
echo " ❌ Failed to copy to host staging: $module_name/$db_type/$(basename "$sql_file")"
|
||||
failed=$((failed + 1))
|
||||
continue
|
||||
fi
|
||||
if docker cp "$sql_file" "ac-worldserver:/azerothcore/data/sql/updates/$core_dir/$target_name" >/dev/null; then
|
||||
echo " ✓ Staged $module_name/$db_type/$(basename "$sql_file")"
|
||||
counter=$((counter + 1))
|
||||
else
|
||||
echo " ❌ Failed to copy: $module_name/$(basename "$sql_file")"
|
||||
failed=$((failed + 1))
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
staged_count=$((staged_count + counter))
|
||||
total_skipped=$((total_skipped + skipped))
|
||||
total_failed=$((total_failed + failed))
|
||||
|
||||
done
|
||||
shopt -u nullglob
|
||||
|
||||
echo ""
|
||||
if [ "$staged_count" -gt 0 ]; then
|
||||
echo "✅ Staged $staged_count module SQL files to core updates directory"
|
||||
[ "$total_skipped" -gt 0 ] && echo "⚠️ Skipped $total_skipped empty/invalid file(s)"
|
||||
[ "$total_failed" -gt 0 ] && echo "❌ Failed to stage $total_failed file(s)"
|
||||
echo "🔄 Restart worldserver to apply: docker restart ac-worldserver"
|
||||
else
|
||||
echo "ℹ️ No module SQL files found to stage"
|
||||
fi
|
||||
}
|
||||
|
||||
get_module_dbc_path(){
|
||||
local module_name="$1"
|
||||
local manifest_file="$PROJECT_DIR/config/module-manifest.json"
|
||||
|
||||
if [ ! -f "$manifest_file" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local dbc_path
|
||||
dbc_path=$(jq -r ".modules[] | select(.name == \"$module_name\") | .server_dbc_path // empty" "$manifest_file" 2>/dev/null)
|
||||
if [ -n "$dbc_path" ]; then
|
||||
echo "$dbc_path"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
stage_module_dbc_files(){
|
||||
show_staging_step "Module DBC Staging" "Deploying binary DBC files to server"
|
||||
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver"; then
|
||||
echo "⚠️ Worldserver container not found, skipping module DBC staging"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "📦 Staging module DBC files to server data directory..."
|
||||
echo " (Using manifest 'server_dbc_path' field to locate server-side DBC files)"
|
||||
|
||||
local staged_count=0
|
||||
local skipped=0
|
||||
local failed=0
|
||||
|
||||
shopt -s nullglob
|
||||
for module_path in "$MODULES_DIR"/*; do
|
||||
[ -d "$module_path" ] || continue
|
||||
local module_name="$(basename "$module_path")"
|
||||
|
||||
# Skip disabled modules
|
||||
if ! module_is_enabled "$module_name"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get DBC path from manifest
|
||||
local dbc_path
|
||||
if ! dbc_path=$(get_module_dbc_path "$module_name"); then
|
||||
# No server_dbc_path defined in manifest - skip this module
|
||||
continue
|
||||
fi
|
||||
|
||||
local dbc_dir="$module_path/$dbc_path"
|
||||
if [ ! -d "$dbc_dir" ]; then
|
||||
echo " ⚠️ $module_name: DBC directory not found at $dbc_path"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
for dbc_file in "$dbc_dir"/*.dbc; do
|
||||
[ -e "$dbc_file" ] || continue
|
||||
|
||||
if [ ! -f "$dbc_file" ] || [ ! -s "$dbc_file" ]; then
|
||||
echo " ⚠️ Skipped empty or invalid: $module_name/$(basename "$dbc_file")"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
local dbc_filename="$(basename "$dbc_file")"
|
||||
|
||||
# Copy to worldserver DBC directory
|
||||
if docker cp "$dbc_file" "ac-worldserver:/azerothcore/data/dbc/$dbc_filename" >/dev/null 2>&1; then
|
||||
echo " ✓ Staged $module_name → $dbc_filename"
|
||||
staged_count=$((staged_count + 1))
|
||||
else
|
||||
echo " ❌ Failed to copy: $module_name/$dbc_filename"
|
||||
failed=$((failed + 1))
|
||||
fi
|
||||
done
|
||||
done
|
||||
shopt -u nullglob
|
||||
|
||||
echo ""
|
||||
if [ "$staged_count" -gt 0 ]; then
|
||||
echo "✅ Staged $staged_count module DBC files to server data directory"
|
||||
[ "$skipped" -gt 0 ] && echo "⚠️ Skipped $skipped file(s) (no server_dbc_path in manifest)"
|
||||
[ "$failed" -gt 0 ] && echo "❌ Failed to stage $failed file(s)"
|
||||
echo "🔄 Restart worldserver to load new DBC data: docker restart ac-worldserver"
|
||||
else
|
||||
echo "ℹ️ No module DBC files found to stage (use 'server_dbc_path' in manifest to enable)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Stage module SQL (this will also start the containers)
|
||||
stage_module_sql_to_core
|
||||
|
||||
# Stage module DBC files
|
||||
stage_module_dbc_files
|
||||
|
||||
printf '\n%b\n' "${GREEN}⚔️ Realm staging completed successfully! ⚔️${NC}"
|
||||
printf '%b\n' "${GREEN}🏰 Profile: services-$TARGET_PROFILE${NC}"
|
||||
|
||||
293
scripts/bash/statusjson.sh
Executable file
293
scripts/bash/statusjson.sh
Executable file
@@ -0,0 +1,293 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
PROJECT_DIR = Path(__file__).resolve().parents[2]
|
||||
ENV_FILE = PROJECT_DIR / ".env"
|
||||
|
||||
def load_env():
|
||||
env = {}
|
||||
if ENV_FILE.exists():
|
||||
for line in ENV_FILE.read_text().splitlines():
|
||||
if not line or line.strip().startswith('#'):
|
||||
continue
|
||||
if '=' not in line:
|
||||
continue
|
||||
key, val = line.split('=', 1)
|
||||
val = val.split('#', 1)[0].strip()
|
||||
env[key.strip()] = val
|
||||
return env
|
||||
|
||||
def read_env(env, key, default=""):
|
||||
return env.get(key, default)
|
||||
|
||||
def docker_exists(name):
|
||||
result = subprocess.run([
|
||||
"docker", "ps", "-a", "--format", "{{.Names}}"
|
||||
], capture_output=True, text=True)
|
||||
names = set(result.stdout.split())
|
||||
return name in names
|
||||
|
||||
def docker_inspect(name, template):
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"docker", "inspect", f"--format={template}", name
|
||||
], capture_output=True, text=True, check=True)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
return ""
|
||||
|
||||
def service_snapshot(name, label):
|
||||
status = "missing"
|
||||
health = "none"
|
||||
started = ""
|
||||
image = ""
|
||||
exit_code = ""
|
||||
if docker_exists(name):
|
||||
status = docker_inspect(name, "{{.State.Status}}") or status
|
||||
health = docker_inspect(name, "{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}") or health
|
||||
started = docker_inspect(name, "{{.State.StartedAt}}") or ""
|
||||
image = docker_inspect(name, "{{.Config.Image}}") or ""
|
||||
exit_code = docker_inspect(name, "{{.State.ExitCode}}") or "0"
|
||||
return {
|
||||
"name": name,
|
||||
"label": label,
|
||||
"status": status,
|
||||
"health": health,
|
||||
"started_at": started,
|
||||
"image": image,
|
||||
"exit_code": exit_code,
|
||||
}
|
||||
|
||||
def port_reachable(port):
|
||||
if not port:
|
||||
return False
|
||||
try:
|
||||
port = int(port)
|
||||
except ValueError:
|
||||
return False
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", port), timeout=1):
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
def module_list(env):
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
# Load module manifest
|
||||
manifest_path = PROJECT_DIR / "config" / "module-manifest.json"
|
||||
manifest_map = {}
|
||||
if manifest_path.exists():
|
||||
try:
|
||||
manifest_data = json.loads(manifest_path.read_text())
|
||||
for mod in manifest_data.get("modules", []):
|
||||
manifest_map[mod["key"]] = mod
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
modules = []
|
||||
pattern = re.compile(r"^MODULE_([A-Z0-9_]+)=1$")
|
||||
if ENV_FILE.exists():
|
||||
for line in ENV_FILE.read_text().splitlines():
|
||||
m = pattern.match(line.strip())
|
||||
if m:
|
||||
key = "MODULE_" + m.group(1)
|
||||
raw = m.group(1).lower().replace('_', ' ')
|
||||
title = raw.title()
|
||||
|
||||
# Look up manifest info
|
||||
mod_info = manifest_map.get(key, {})
|
||||
modules.append({
|
||||
"name": title,
|
||||
"key": key,
|
||||
"description": mod_info.get("description", "No description available"),
|
||||
"category": mod_info.get("category", "unknown"),
|
||||
"type": mod_info.get("type", "unknown")
|
||||
})
|
||||
return modules
|
||||
|
||||
def dir_info(path):
|
||||
p = Path(path)
|
||||
exists = p.exists()
|
||||
size = "--"
|
||||
if exists:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["du", "-sh", str(p)],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
if result.stdout:
|
||||
size = result.stdout.split()[0]
|
||||
except Exception:
|
||||
size = "--"
|
||||
return {"path": str(p), "exists": exists, "size": size}
|
||||
|
||||
def volume_info(name, fallback=None):
|
||||
candidates = [name]
|
||||
if fallback:
|
||||
candidates.append(fallback)
|
||||
for cand in candidates:
|
||||
result = subprocess.run(["docker", "volume", "inspect", cand], capture_output=True, text=True)
|
||||
if result.returncode == 0:
|
||||
try:
|
||||
data = json.loads(result.stdout)[0]
|
||||
return {
|
||||
"name": cand,
|
||||
"exists": True,
|
||||
"mountpoint": data.get("Mountpoint", "-")
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
return {"name": name, "exists": False, "mountpoint": "-"}
|
||||
|
||||
def expand_path(value, env):
|
||||
storage = read_env(env, "STORAGE_PATH", "./storage")
|
||||
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
||||
value = value.replace('${STORAGE_PATH}', storage)
|
||||
value = value.replace('${STORAGE_PATH_LOCAL}', local_storage)
|
||||
return value
|
||||
|
||||
def mysql_query(env, database, query):
|
||||
password = read_env(env, "MYSQL_ROOT_PASSWORD")
|
||||
user = read_env(env, "MYSQL_USER", "root")
|
||||
if not password or not database:
|
||||
return 0
|
||||
cmd = [
|
||||
"docker", "exec", "ac-mysql",
|
||||
"mysql", "-N", "-B",
|
||||
f"-u{user}", f"-p{password}", database,
|
||||
"-e", query
|
||||
]
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
value = result.stdout.strip().splitlines()[-1]
|
||||
return int(value)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def user_stats(env):
|
||||
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
|
||||
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
|
||||
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;")
|
||||
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;")
|
||||
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
|
||||
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;")
|
||||
return {
|
||||
"accounts": accounts,
|
||||
"online": online,
|
||||
"characters": characters,
|
||||
"active7d": active,
|
||||
}
|
||||
|
||||
def docker_stats():
|
||||
"""Get CPU and memory stats for running containers"""
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"docker", "stats", "--no-stream", "--no-trunc",
|
||||
"--format", "{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}"
|
||||
], capture_output=True, text=True, check=True, timeout=4)
|
||||
|
||||
stats = {}
|
||||
for line in result.stdout.strip().splitlines():
|
||||
parts = line.split('\t')
|
||||
if len(parts) == 4:
|
||||
name, cpu, mem_usage, mem_perc = parts
|
||||
# Parse CPU percentage (e.g., "0.50%" -> 0.50)
|
||||
cpu_val = cpu.replace('%', '').strip()
|
||||
try:
|
||||
cpu_float = float(cpu_val)
|
||||
except ValueError:
|
||||
cpu_float = 0.0
|
||||
|
||||
# Parse memory percentage
|
||||
mem_perc_val = mem_perc.replace('%', '').strip()
|
||||
try:
|
||||
mem_perc_float = float(mem_perc_val)
|
||||
except ValueError:
|
||||
mem_perc_float = 0.0
|
||||
|
||||
stats[name] = {
|
||||
"cpu": cpu_float,
|
||||
"memory": mem_usage.strip(),
|
||||
"memory_percent": mem_perc_float
|
||||
}
|
||||
return stats
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def main():
|
||||
env = load_env()
|
||||
project = read_env(env, "COMPOSE_PROJECT_NAME", "acore-compose")
|
||||
network = read_env(env, "NETWORK_NAME", "azerothcore")
|
||||
|
||||
services = [
|
||||
("ac-mysql", "MySQL"),
|
||||
("ac-backup", "Backup"),
|
||||
("ac-volume-init", "Volume Init"),
|
||||
("ac-storage-init", "Storage Init"),
|
||||
("ac-db-init", "DB Init"),
|
||||
("ac-db-import", "DB Import"),
|
||||
("ac-authserver", "Auth Server"),
|
||||
("ac-worldserver", "World Server"),
|
||||
("ac-client-data", "Client Data"),
|
||||
("ac-modules", "Module Manager"),
|
||||
("ac-post-install", "Post Install"),
|
||||
("ac-phpmyadmin", "phpMyAdmin"),
|
||||
("ac-keira3", "Keira3"),
|
||||
]
|
||||
|
||||
service_data = [service_snapshot(name, label) for name, label in services]
|
||||
|
||||
port_entries = [
|
||||
{"name": "Auth", "port": read_env(env, "AUTH_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "AUTH_EXTERNAL_PORT"))},
|
||||
{"name": "World", "port": read_env(env, "WORLD_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "WORLD_EXTERNAL_PORT"))},
|
||||
{"name": "SOAP", "port": read_env(env, "SOAP_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "SOAP_EXTERNAL_PORT"))},
|
||||
{"name": "MySQL", "port": read_env(env, "MYSQL_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "MYSQL_EXTERNAL_PORT")) if read_env(env, "COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED", "0") == "1" else False},
|
||||
{"name": "phpMyAdmin", "port": read_env(env, "PMA_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "PMA_EXTERNAL_PORT"))},
|
||||
{"name": "Keira3", "port": read_env(env, "KEIRA3_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "KEIRA3_EXTERNAL_PORT"))},
|
||||
]
|
||||
|
||||
storage_path = expand_path(read_env(env, "STORAGE_PATH", "./storage"), env)
|
||||
local_storage_path = expand_path(read_env(env, "STORAGE_PATH_LOCAL", "./local-storage"), env)
|
||||
client_data_path = expand_path(read_env(env, "CLIENT_DATA_PATH", f"{storage_path}/client-data"), env)
|
||||
|
||||
storage_info = {
|
||||
"storage": dir_info(storage_path),
|
||||
"local_storage": dir_info(local_storage_path),
|
||||
"client_data": dir_info(client_data_path),
|
||||
"modules": dir_info(os.path.join(storage_path, "modules")),
|
||||
"local_modules": dir_info(os.path.join(local_storage_path, "modules")),
|
||||
}
|
||||
|
||||
volumes = {
|
||||
"client_cache": volume_info(f"{project}_client-data-cache"),
|
||||
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
|
||||
}
|
||||
|
||||
data = {
|
||||
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||
"project": project,
|
||||
"network": network,
|
||||
"services": service_data,
|
||||
"ports": port_entries,
|
||||
"modules": module_list(env),
|
||||
"storage": storage_info,
|
||||
"volumes": volumes,
|
||||
"users": user_stats(env),
|
||||
"stats": docker_stats(),
|
||||
}
|
||||
|
||||
print(json.dumps(data))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
342
scripts/bash/test-phase1-integration.sh
Executable file
342
scripts/bash/test-phase1-integration.sh
Executable file
@@ -0,0 +1,342 @@
|
||||
#!/bin/bash
|
||||
# Phase 1 Integration Test Script
|
||||
# Tests the complete Phase 1 implementation using build and deploy workflows
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
ICON_TEST="🧪"
|
||||
|
||||
# Counters
|
||||
TESTS_TOTAL=0
|
||||
TESTS_PASSED=0
|
||||
TESTS_FAILED=0
|
||||
|
||||
info() {
|
||||
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||
}
|
||||
|
||||
ok() {
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||
((TESTS_PASSED+=1))
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||
}
|
||||
|
||||
err() {
|
||||
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||
((TESTS_FAILED+=1))
|
||||
}
|
||||
|
||||
test_header() {
|
||||
((TESTS_TOTAL+=1))
|
||||
echo ""
|
||||
echo -e "${BOLD}${ICON_TEST} Test $TESTS_TOTAL: $*${NC}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
}
|
||||
|
||||
section_header() {
|
||||
echo ""
|
||||
echo ""
|
||||
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${BOLD}${BLUE} $*${NC}"
|
||||
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
section_header "Phase 1 Integration Test Suite"
|
||||
|
||||
info "Project root: $PROJECT_ROOT"
|
||||
info "Test started: $(date)"
|
||||
|
||||
# Ensure storage directories are writable before generating module state
|
||||
if [ -x "$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" ]; then
|
||||
info "Normalizing storage permissions"
|
||||
"$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" --silent || true
|
||||
fi
|
||||
|
||||
# Test 1: Verify .env exists
|
||||
test_header "Environment Configuration Check"
|
||||
if [ -f .env ]; then
|
||||
ok ".env file exists"
|
||||
|
||||
# Count enabled modules
|
||||
enabled_count=$(grep -c "^MODULE_.*=1" .env || echo "0")
|
||||
info "Enabled modules: $enabled_count"
|
||||
|
||||
# Check for playerbots
|
||||
if grep -q "^MODULE_PLAYERBOTS=1" .env; then
|
||||
info "Playerbots module enabled"
|
||||
fi
|
||||
else
|
||||
err ".env file not found"
|
||||
echo "Please run ./setup.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test 2: Module manifest validation
|
||||
test_header "Module Manifest Validation"
|
||||
if [ -f config/module-manifest.json ]; then
|
||||
ok "Module manifest exists"
|
||||
|
||||
# Validate JSON
|
||||
if python3 -m json.tool config/module-manifest.json >/dev/null 2>&1; then
|
||||
ok "Module manifest is valid JSON"
|
||||
else
|
||||
err "Module manifest has invalid JSON"
|
||||
fi
|
||||
else
|
||||
err "Module manifest not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test 3: Generate module state with SQL discovery
|
||||
test_header "Module State Generation (SQL Discovery)"
|
||||
info "Running: python3 scripts/python/modules.py generate"
|
||||
|
||||
if python3 scripts/python/modules.py \
|
||||
--env-path .env \
|
||||
--manifest config/module-manifest.json \
|
||||
generate --output-dir local-storage/modules > /tmp/phase1-modules-generate.log 2>&1; then
|
||||
ok "Module state generation successful"
|
||||
else
|
||||
# Check if it's just warnings
|
||||
if grep -q "warnings detected" /tmp/phase1-modules-generate.log 2>/dev/null; then
|
||||
ok "Module state generation completed with warnings"
|
||||
else
|
||||
err "Module state generation failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test 4: Verify SQL manifest created
|
||||
test_header "SQL Manifest Verification"
|
||||
if [ -f local-storage/modules/.sql-manifest.json ]; then
|
||||
ok "SQL manifest created: local-storage/modules/.sql-manifest.json"
|
||||
|
||||
# Check manifest structure
|
||||
module_count=$(python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
|
||||
info "Modules with SQL: $module_count"
|
||||
|
||||
if [ "$module_count" -gt 0 ]; then
|
||||
ok "SQL manifest contains $module_count module(s)"
|
||||
|
||||
# Show first module
|
||||
info "Sample module SQL info:"
|
||||
python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); m=data['modules'][0] if data['modules'] else {}; print(f\" Name: {m.get('name', 'N/A')}\n SQL files: {len(m.get('sql_files', {}))}\") " 2>/dev/null || true
|
||||
else
|
||||
warn "No modules with SQL files (expected if modules not yet staged)"
|
||||
fi
|
||||
else
|
||||
err "SQL manifest not created"
|
||||
fi
|
||||
|
||||
# Test 5: Verify modules.env created
|
||||
test_header "Module Environment File Check"
|
||||
if [ -f local-storage/modules/modules.env ]; then
|
||||
ok "modules.env created"
|
||||
|
||||
# Check for key exports
|
||||
if grep -q "MODULES_ENABLED=" local-storage/modules/modules.env; then
|
||||
ok "MODULES_ENABLED variable present"
|
||||
fi
|
||||
|
||||
if grep -q "MODULES_REQUIRES_CUSTOM_BUILD=" local-storage/modules/modules.env; then
|
||||
ok "Build requirement flags present"
|
||||
|
||||
# Check if build required
|
||||
source local-storage/modules/modules.env
|
||||
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||
info "Custom build required (C++ modules enabled)"
|
||||
else
|
||||
info "Standard build sufficient (no C++ modules)"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
err "modules.env not created"
|
||||
fi
|
||||
|
||||
# Test 6: Check build requirement
|
||||
test_header "Build Requirement Check"
|
||||
if [ -f local-storage/modules/modules.env ]; then
|
||||
source local-storage/modules/modules.env
|
||||
|
||||
info "MODULES_REQUIRES_CUSTOM_BUILD=${MODULES_REQUIRES_CUSTOM_BUILD:-0}"
|
||||
info "MODULES_REQUIRES_PLAYERBOT_SOURCE=${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}"
|
||||
|
||||
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||
ok "Build system correctly detected C++ modules"
|
||||
BUILD_REQUIRED=1
|
||||
else
|
||||
ok "Build system correctly detected no C++ modules"
|
||||
BUILD_REQUIRED=0
|
||||
fi
|
||||
else
|
||||
warn "Cannot determine build requirements"
|
||||
BUILD_REQUIRED=0
|
||||
fi
|
||||
|
||||
# Test 7: Verify new scripts exist and are executable
|
||||
test_header "New Script Verification"
|
||||
scripts=(
|
||||
"scripts/bash/verify-sql-updates.sh"
|
||||
"scripts/bash/backup-status.sh"
|
||||
"scripts/bash/db-health-check.sh"
|
||||
)
|
||||
|
||||
for script in "${scripts[@]}"; do
|
||||
if [ -f "$script" ]; then
|
||||
if [ -x "$script" ]; then
|
||||
ok "$(basename "$script") - exists and executable"
|
||||
else
|
||||
warn "$(basename "$script") - exists but not executable"
|
||||
chmod +x "$script"
|
||||
ok "Fixed permissions for $(basename "$script")"
|
||||
fi
|
||||
else
|
||||
err "$(basename "$script") - not found"
|
||||
fi
|
||||
done
|
||||
|
||||
# Test 8: Test backup-status.sh (without running containers)
|
||||
test_header "Backup Status Script Test"
|
||||
backup_status_log="$(mktemp)"
|
||||
if ./scripts/bash/backup-status.sh >"$backup_status_log" 2>&1; then
|
||||
if grep -q "BACKUP STATUS" "$backup_status_log"; then
|
||||
ok "backup-status.sh executes successfully"
|
||||
else
|
||||
err "backup-status.sh output missing 'BACKUP STATUS' marker"
|
||||
fi
|
||||
else
|
||||
err "backup-status.sh failed to execute"
|
||||
fi
|
||||
rm -f "$backup_status_log"
|
||||
|
||||
# Test 9: Test db-health-check.sh help
|
||||
test_header "Database Health Check Script Test"
|
||||
if ./scripts/bash/db-health-check.sh --help | grep -q "Check the health status"; then
|
||||
ok "db-health-check.sh help working"
|
||||
else
|
||||
err "db-health-check.sh help failed"
|
||||
fi
|
||||
|
||||
# Test 10: Check modified scripts for new functionality
|
||||
test_header "Modified Script Verification"
|
||||
|
||||
# Check stage-modules.sh has runtime SQL staging function
|
||||
if grep -q "stage_module_sql_to_core()" scripts/bash/stage-modules.sh; then
|
||||
ok "stage-modules.sh contains runtime SQL staging function"
|
||||
else
|
||||
err "stage-modules.sh missing runtime SQL staging function"
|
||||
fi
|
||||
|
||||
# Check db-import-conditional.sh has playerbots support
|
||||
if grep -q "PlayerbotsDatabaseInfo" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has playerbots database support"
|
||||
else
|
||||
err "db-import-conditional.sh missing playerbots support"
|
||||
fi
|
||||
|
||||
if grep -q "Updates.EnableDatabases = 15" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has correct EnableDatabases value (15)"
|
||||
else
|
||||
warn "db-import-conditional.sh may have incorrect EnableDatabases value"
|
||||
fi
|
||||
|
||||
# Check for restore marker safety net
|
||||
if grep -q "verify_databases_populated" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh verifies live MySQL state before honoring restore markers"
|
||||
else
|
||||
err "db-import-conditional.sh missing restore marker safety check"
|
||||
fi
|
||||
|
||||
# Check for post-restore verification
|
||||
if grep -q "verify_and_update_restored_databases" scripts/bash/db-import-conditional.sh; then
|
||||
ok "db-import-conditional.sh has post-restore verification"
|
||||
else
|
||||
err "db-import-conditional.sh missing post-restore verification"
|
||||
fi
|
||||
|
||||
# Test 11: Restore + Module Staging Automation
|
||||
test_header "Restore + Module Staging Automation"
|
||||
if grep -q "restore-and-stage.sh" docker-compose.yml && \
|
||||
grep -q ".restore-prestaged" scripts/bash/restore-and-stage.sh; then
|
||||
ok "restore-and-stage.sh wired into compose and flags stage-modules to recopy SQL"
|
||||
else
|
||||
err "restore-and-stage.sh missing compose wiring or flag handling"
|
||||
fi
|
||||
|
||||
# Test 12: Docker Compose configuration check
|
||||
test_header "Docker Compose Configuration Check"
|
||||
if [ -f docker-compose.yml ]; then
|
||||
ok "docker-compose.yml exists"
|
||||
|
||||
# Check for required services
|
||||
if grep -q "ac-mysql:" docker-compose.yml; then
|
||||
ok "MySQL service configured"
|
||||
fi
|
||||
|
||||
if grep -q "ac-worldserver:" docker-compose.yml; then
|
||||
ok "Worldserver service configured"
|
||||
fi
|
||||
else
|
||||
err "docker-compose.yml not found"
|
||||
fi
|
||||
|
||||
# Test Summary
|
||||
section_header "Test Summary"
|
||||
|
||||
echo ""
|
||||
echo -e "${BOLD}Tests Executed: $TESTS_TOTAL${NC}"
|
||||
echo -e "${GREEN}${BOLD}Passed: $TESTS_PASSED${NC}"
|
||||
if [ $TESTS_FAILED -gt 0 ]; then
|
||||
echo -e "${RED}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||
else
|
||||
echo -e "${GREEN}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Calculate success rate
|
||||
if [ $TESTS_TOTAL -gt 0 ]; then
|
||||
success_rate=$((TESTS_PASSED * 100 / TESTS_TOTAL))
|
||||
echo -e "${BOLD}Success Rate: ${success_rate}%${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
if [ $TESTS_FAILED -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}${ICON_SUCCESS} ALL TESTS PASSED${NC}"
|
||||
echo ""
|
||||
echo "Phase 1 implementation is working correctly!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Run './build.sh' if C++ modules are enabled"
|
||||
echo " 2. Run './deploy.sh' to start containers"
|
||||
echo " 3. Verify SQL staging with running containers"
|
||||
echo " 4. Check database health with db-health-check.sh"
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}${ICON_ERROR} SOME TESTS FAILED${NC}"
|
||||
echo ""
|
||||
echo "Please review the failures above before proceeding."
|
||||
exit 1
|
||||
fi
|
||||
348
scripts/bash/verify-sql-updates.sh
Executable file
348
scripts/bash/verify-sql-updates.sh
Executable file
@@ -0,0 +1,348 @@
|
||||
#!/bin/bash
|
||||
# Verify SQL Updates
|
||||
# Checks that SQL updates have been applied via the updates table
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Icons
|
||||
ICON_SUCCESS="✅"
|
||||
ICON_WARNING="⚠️"
|
||||
ICON_ERROR="❌"
|
||||
ICON_INFO="ℹ️"
|
||||
|
||||
# Default values
|
||||
MODULE_NAME=""
|
||||
DATABASE_NAME=""
|
||||
SHOW_ALL=0
|
||||
CHECK_HASH=0
|
||||
CONTAINER_NAME="ac-mysql"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: ./verify-sql-updates.sh [options]
|
||||
|
||||
Verify that SQL updates have been applied via AzerothCore's updates table.
|
||||
|
||||
Options:
|
||||
--module NAME Check specific module
|
||||
--database NAME Check specific database (auth/world/characters)
|
||||
--all Show all module updates
|
||||
--check-hash Verify file hashes match database
|
||||
--container NAME MySQL container name (default: ac-mysql)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
./verify-sql-updates.sh --all
|
||||
./verify-sql-updates.sh --module mod-aoe-loot
|
||||
./verify-sql-updates.sh --database acore_world --all
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--module) MODULE_NAME="$2"; shift 2;;
|
||||
--database) DATABASE_NAME="$2"; shift 2;;
|
||||
--all) SHOW_ALL=1; shift;;
|
||||
--check-hash) CHECK_HASH=1; shift;;
|
||||
--container) CONTAINER_NAME="$2"; shift 2;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Load environment
|
||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
source "$PROJECT_ROOT/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||
MYSQL_USER="${MYSQL_USER:-root}"
|
||||
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||
|
||||
# Logging functions
|
||||
info() {
|
||||
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||
}
|
||||
|
||||
ok() {
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||
}
|
||||
|
||||
err() {
|
||||
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||
}
|
||||
|
||||
# MySQL query helper
|
||||
mysql_query() {
|
||||
local database="${1:-}"
|
||||
local query="$2"
|
||||
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||
err "MYSQL_ROOT_PASSWORD not set"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
if [ -n "$database" ]; then
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
else
|
||||
if [ -n "$database" ]; then
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||
else
|
||||
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if database exists
|
||||
db_exists() {
|
||||
local db_name="$1"
|
||||
local count
|
||||
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||
[ "$count" = "1" ]
|
||||
}
|
||||
|
||||
# Verify module SQL in database
|
||||
verify_module_sql() {
|
||||
local module_name="$1"
|
||||
local database_name="$2"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info "Checking module updates in $database_name"
|
||||
|
||||
# Query updates table for module
|
||||
local query="SELECT name, hash, state, timestamp, speed FROM updates WHERE name LIKE '%${module_name}%' AND state='MODULE' ORDER BY timestamp DESC"
|
||||
local results
|
||||
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$results" ]; then
|
||||
warn "No updates found for module: $module_name in $database_name"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Display results
|
||||
echo
|
||||
printf "${BOLD}${CYAN}Module Updates for %s in %s:${NC}\n" "$module_name" "$database_name"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
while IFS=$'\t' read -r name hash state timestamp speed; do
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||
printf " Hash: %s\n" "${hash:0:12}..."
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
printf " Speed: %sms\n" "$speed"
|
||||
echo
|
||||
done <<< "$results"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# List all module updates
|
||||
list_module_updates() {
|
||||
local database_name="$1"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
info "Listing all module updates in $database_name"
|
||||
|
||||
# Query all module updates
|
||||
local query="SELECT name, state, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC"
|
||||
local results
|
||||
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$results" ]; then
|
||||
warn "No module updates found in $database_name"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Display results
|
||||
echo
|
||||
printf "${BOLD}${CYAN}All Module Updates in %s:${NC}\n" "$database_name"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
local count=0
|
||||
while IFS=$'\t' read -r name state timestamp; do
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
((count++))
|
||||
done <<< "$results"
|
||||
|
||||
echo
|
||||
ok "Total module updates: $count"
|
||||
echo
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check update applied
|
||||
check_update_applied() {
|
||||
local filename="$1"
|
||||
local database_name="$2"
|
||||
local expected_hash="${3:-}"
|
||||
|
||||
if ! db_exists "$database_name"; then
|
||||
err "Database does not exist: $database_name"
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Query for specific file
|
||||
local query="SELECT hash, state, timestamp FROM updates WHERE name='$filename' LIMIT 1"
|
||||
local result
|
||||
result=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$result" ]; then
|
||||
warn "Update not found: $filename"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Parse result
|
||||
IFS=$'\t' read -r hash state timestamp <<< "$result"
|
||||
|
||||
ok "Update applied: $filename"
|
||||
printf " Hash: %s\n" "$hash"
|
||||
printf " State: %s\n" "$state"
|
||||
printf " Applied: %s\n" "$timestamp"
|
||||
|
||||
# Check hash if provided
|
||||
if [ -n "$expected_hash" ] && [ "$expected_hash" != "$hash" ]; then
|
||||
err "Hash mismatch!"
|
||||
printf " Expected: %s\n" "$expected_hash"
|
||||
printf " Actual: %s\n" "$hash"
|
||||
return 2
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate verification report
|
||||
generate_verification_report() {
|
||||
echo
|
||||
printf "${BOLD}${BLUE}🔍 Module SQL Verification Report${NC}\n"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
local total_updates=0
|
||||
local databases=("$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME")
|
||||
|
||||
# Add playerbots if it exists
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
databases+=("$DB_PLAYERBOTS_NAME")
|
||||
fi
|
||||
|
||||
for db in "${databases[@]}"; do
|
||||
if ! db_exists "$db"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get count of module updates
|
||||
local count
|
||||
count=$(mysql_query "$db" "SELECT COUNT(*) FROM updates WHERE state='MODULE'" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$count" != "0" ]; then
|
||||
printf "${GREEN}${ICON_SUCCESS}${NC} ${BOLD}%s:${NC} %s module update(s)\n" "$db" "$count"
|
||||
total_updates=$((total_updates + count))
|
||||
|
||||
if [ "$SHOW_ALL" = "1" ]; then
|
||||
# Show recent updates
|
||||
local query="SELECT name, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC LIMIT 5"
|
||||
local results
|
||||
results=$(mysql_query "$db" "$query" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$results" ]; then
|
||||
while IFS=$'\t' read -r name timestamp; do
|
||||
printf " - %s (%s)\n" "$name" "$timestamp"
|
||||
done <<< "$results"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING}${NC} ${BOLD}%s:${NC} No module updates\n" "$db"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
printf "${BOLD}Total: %s module update(s) applied${NC}\n" "$total_updates"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
echo
|
||||
info "SQL Update Verification"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo
|
||||
|
||||
# Test MySQL connection
|
||||
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||
err "Cannot connect to MySQL server"
|
||||
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||
printf " User: %s\n" "$MYSQL_USER"
|
||||
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute based on options
|
||||
if [ -n "$MODULE_NAME" ]; then
|
||||
# Check specific module
|
||||
if [ -n "$DATABASE_NAME" ]; then
|
||||
verify_module_sql "$MODULE_NAME" "$DATABASE_NAME"
|
||||
else
|
||||
# Check all databases for this module
|
||||
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||
if db_exists "$db"; then
|
||||
verify_module_sql "$MODULE_NAME" "$db"
|
||||
fi
|
||||
done
|
||||
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||
verify_module_sql "$MODULE_NAME" "$DB_PLAYERBOTS_NAME"
|
||||
fi
|
||||
fi
|
||||
elif [ -n "$DATABASE_NAME" ]; then
|
||||
# List all updates in specific database
|
||||
list_module_updates "$DATABASE_NAME"
|
||||
else
|
||||
# Generate full report
|
||||
generate_verification_report
|
||||
fi
|
||||
|
||||
echo
|
||||
ok "Verification complete"
|
||||
echo
|
||||
}
|
||||
|
||||
main "$@"
|
||||
10
scripts/go/go.mod
Normal file
10
scripts/go/go.mod
Normal file
@@ -0,0 +1,10 @@
|
||||
module acore-compose/statusdash
|
||||
|
||||
go 1.22.2
|
||||
|
||||
require (
|
||||
github.com/gizak/termui/v3 v3.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.2 // indirect
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect
|
||||
)
|
||||
8
scripts/go/go.sum
Normal file
8
scripts/go/go.sum
Normal file
@@ -0,0 +1,8 @@
|
||||
github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc=
|
||||
github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY=
|
||||
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
|
||||
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
373
scripts/go/statusdash.go
Normal file
373
scripts/go/statusdash.go
Normal file
@@ -0,0 +1,373 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ui "github.com/gizak/termui/v3"
|
||||
"github.com/gizak/termui/v3/widgets"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
Name string `json:"name"`
|
||||
Label string `json:"label"`
|
||||
Status string `json:"status"`
|
||||
Health string `json:"health"`
|
||||
StartedAt string `json:"started_at"`
|
||||
Image string `json:"image"`
|
||||
ExitCode string `json:"exit_code"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory string `json:"memory"`
|
||||
MemoryPercent float64 `json:"memory_percent"`
|
||||
}
|
||||
|
||||
type Port struct {
|
||||
Name string `json:"name"`
|
||||
Port string `json:"port"`
|
||||
Reachable bool `json:"reachable"`
|
||||
}
|
||||
|
||||
type DirInfo struct {
|
||||
Path string `json:"path"`
|
||||
Exists bool `json:"exists"`
|
||||
Size string `json:"size"`
|
||||
}
|
||||
|
||||
type VolumeInfo struct {
|
||||
Name string `json:"name"`
|
||||
Exists bool `json:"exists"`
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
}
|
||||
|
||||
type UserStats struct {
|
||||
Accounts int `json:"accounts"`
|
||||
Online int `json:"online"`
|
||||
Characters int `json:"characters"`
|
||||
Active7d int `json:"active7d"`
|
||||
}
|
||||
|
||||
type Module struct {
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
Description string `json:"description"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Project string `json:"project"`
|
||||
Network string `json:"network"`
|
||||
Services []Service `json:"services"`
|
||||
Ports []Port `json:"ports"`
|
||||
Modules []Module `json:"modules"`
|
||||
Storage map[string]DirInfo `json:"storage"`
|
||||
Volumes map[string]VolumeInfo `json:"volumes"`
|
||||
Users UserStats `json:"users"`
|
||||
Stats map[string]ContainerStats `json:"stats"`
|
||||
}
|
||||
|
||||
func runSnapshot() (*Snapshot, error) {
|
||||
cmd := exec.Command("./scripts/bash/statusjson.sh")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snap := &Snapshot{}
|
||||
if err := json.Unmarshal(output, snap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func buildServicesTable(s *Snapshot) *TableNoCol {
|
||||
table := NewTableNoCol()
|
||||
rows := [][]string{{"Service", "Status", "Health", "CPU%", "Memory"}}
|
||||
for _, svc := range s.Services {
|
||||
cpu := "-"
|
||||
mem := "-"
|
||||
if stats, ok := s.Stats[svc.Name]; ok {
|
||||
cpu = fmt.Sprintf("%.1f", stats.CPU)
|
||||
mem = strings.Split(stats.Memory, " / ")[0] // Just show used, not total
|
||||
}
|
||||
// Combine health with exit code for stopped containers
|
||||
health := svc.Health
|
||||
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
|
||||
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
|
||||
}
|
||||
rows = append(rows, []string{svc.Label, svc.Status, health, cpu, mem})
|
||||
}
|
||||
table.Rows = rows
|
||||
table.RowSeparator = false
|
||||
table.Border = true
|
||||
table.Title = "Services"
|
||||
return table
|
||||
}
|
||||
|
||||
func buildPortsTable(s *Snapshot) *TableNoCol {
|
||||
table := NewTableNoCol()
|
||||
rows := [][]string{{"Port", "Number", "Reachable"}}
|
||||
for _, p := range s.Ports {
|
||||
state := "down"
|
||||
if p.Reachable {
|
||||
state = "up"
|
||||
}
|
||||
rows = append(rows, []string{p.Name, p.Port, state})
|
||||
}
|
||||
table.Rows = rows
|
||||
table.RowSeparator = true
|
||||
table.Border = true
|
||||
table.Title = "Ports"
|
||||
return table
|
||||
}
|
||||
|
||||
func buildModulesList(s *Snapshot) *widgets.List {
|
||||
list := widgets.NewList()
|
||||
list.Title = fmt.Sprintf("Modules (%d)", len(s.Modules))
|
||||
rows := make([]string, len(s.Modules))
|
||||
for i, mod := range s.Modules {
|
||||
rows[i] = mod.Name
|
||||
}
|
||||
list.Rows = rows
|
||||
list.WrapText = false
|
||||
list.Border = true
|
||||
list.BorderStyle = ui.NewStyle(ui.ColorCyan)
|
||||
list.SelectedRowStyle = ui.NewStyle(ui.ColorCyan)
|
||||
return list
|
||||
}
|
||||
|
||||
func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "STORAGE:\n")
|
||||
entries := []struct {
|
||||
Key string
|
||||
Label string
|
||||
}{
|
||||
{"storage", "Storage"},
|
||||
{"local_storage", "Local Storage"},
|
||||
{"client_data", "Client Data"},
|
||||
{"modules", "Modules"},
|
||||
{"local_modules", "Local Modules"},
|
||||
}
|
||||
for _, item := range entries {
|
||||
info, ok := s.Storage[item.Key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
mark := "○"
|
||||
if info.Exists {
|
||||
mark = "●"
|
||||
}
|
||||
fmt.Fprintf(&b, " %-15s %s %s (%s)\n", item.Label, mark, info.Path, info.Size)
|
||||
}
|
||||
par := widgets.NewParagraph()
|
||||
par.Title = "Storage"
|
||||
par.Text = b.String()
|
||||
par.Border = true
|
||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
return par
|
||||
}
|
||||
|
||||
func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "VOLUMES:\n")
|
||||
entries := []struct {
|
||||
Key string
|
||||
Label string
|
||||
}{
|
||||
{"client_cache", "Client Cache"},
|
||||
{"mysql_data", "MySQL Data"},
|
||||
}
|
||||
for _, item := range entries {
|
||||
info, ok := s.Volumes[item.Key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
mark := "○"
|
||||
if info.Exists {
|
||||
mark = "●"
|
||||
}
|
||||
fmt.Fprintf(&b, " %-13s %s %s\n", item.Label, mark, info.Mountpoint)
|
||||
}
|
||||
par := widgets.NewParagraph()
|
||||
par.Title = "Volumes"
|
||||
par.Text = b.String()
|
||||
par.Border = true
|
||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
return par
|
||||
}
|
||||
|
||||
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
||||
servicesTable := buildServicesTable(s)
|
||||
for i := 1; i < len(servicesTable.Rows); i++ {
|
||||
if servicesTable.RowStyles == nil {
|
||||
servicesTable.RowStyles = make(map[int]ui.Style)
|
||||
}
|
||||
state := strings.ToLower(servicesTable.Rows[i][1])
|
||||
switch state {
|
||||
case "running", "healthy":
|
||||
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
|
||||
case "restarting", "unhealthy":
|
||||
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
|
||||
case "exited":
|
||||
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
|
||||
default:
|
||||
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorWhite)
|
||||
}
|
||||
}
|
||||
portsTable := buildPortsTable(s)
|
||||
for i := 1; i < len(portsTable.Rows); i++ {
|
||||
if portsTable.RowStyles == nil {
|
||||
portsTable.RowStyles = make(map[int]ui.Style)
|
||||
}
|
||||
if portsTable.Rows[i][2] == "up" {
|
||||
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
|
||||
} else {
|
||||
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
|
||||
}
|
||||
}
|
||||
modulesList := buildModulesList(s)
|
||||
if selectedModule >= 0 && selectedModule < len(modulesList.Rows) {
|
||||
modulesList.SelectedRow = selectedModule
|
||||
}
|
||||
helpPar := widgets.NewParagraph()
|
||||
helpPar.Title = "Controls"
|
||||
helpPar.Text = " ↓ : Down\n ↑ : Up"
|
||||
helpPar.Border = true
|
||||
helpPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
|
||||
|
||||
moduleInfoPar := widgets.NewParagraph()
|
||||
moduleInfoPar.Title = "Module Info"
|
||||
if selectedModule >= 0 && selectedModule < len(s.Modules) {
|
||||
mod := s.Modules[selectedModule]
|
||||
moduleInfoPar.Text = fmt.Sprintf("%s\n\nCategory: %s\nType: %s", mod.Description, mod.Category, mod.Type)
|
||||
} else {
|
||||
moduleInfoPar.Text = "Select a module to view info"
|
||||
}
|
||||
moduleInfoPar.Border = true
|
||||
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
|
||||
storagePar := buildStorageParagraph(s)
|
||||
storagePar.Border = true
|
||||
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||
storagePar.PaddingLeft = 1
|
||||
storagePar.PaddingRight = 1
|
||||
volumesPar := buildVolumesParagraph(s)
|
||||
|
||||
header := widgets.NewParagraph()
|
||||
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp)
|
||||
header.Border = true
|
||||
|
||||
usersPar := widgets.NewParagraph()
|
||||
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d)
|
||||
usersPar.Border = true
|
||||
|
||||
grid := ui.NewGrid()
|
||||
termWidth, termHeight := ui.TerminalDimensions()
|
||||
grid.SetRect(0, 0, termWidth, termHeight)
|
||||
grid.Set(
|
||||
ui.NewRow(0.18,
|
||||
ui.NewCol(0.6, header),
|
||||
ui.NewCol(0.4, usersPar),
|
||||
),
|
||||
ui.NewRow(0.42,
|
||||
ui.NewCol(0.6, servicesTable),
|
||||
ui.NewCol(0.4, portsTable),
|
||||
),
|
||||
ui.NewRow(0.40,
|
||||
ui.NewCol(0.25, modulesList),
|
||||
ui.NewCol(0.15,
|
||||
ui.NewRow(0.30, helpPar),
|
||||
ui.NewRow(0.70, moduleInfoPar),
|
||||
),
|
||||
ui.NewCol(0.6,
|
||||
ui.NewRow(0.55,
|
||||
ui.NewCol(1.0, storagePar),
|
||||
),
|
||||
ui.NewRow(0.45,
|
||||
ui.NewCol(1.0, volumesPar),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
ui.Render(grid)
|
||||
return modulesList, grid
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := ui.Init(); err != nil {
|
||||
log.Fatalf("failed to init termui: %v", err)
|
||||
}
|
||||
defer ui.Close()
|
||||
|
||||
snapshot, err := runSnapshot()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to fetch snapshot: %v", err)
|
||||
}
|
||||
selectedModule := 0
|
||||
modulesWidget, currentGrid := renderSnapshot(snapshot, selectedModule)
|
||||
|
||||
snapCh := make(chan *Snapshot, 1)
|
||||
go func() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
snap, err := runSnapshot()
|
||||
if err != nil {
|
||||
log.Printf("snapshot error: %v", err)
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case snapCh <- snap:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
events := ui.PollEvents()
|
||||
for {
|
||||
select {
|
||||
case e := <-events:
|
||||
switch e.ID {
|
||||
case "q", "<C-c>":
|
||||
return
|
||||
case "<Down>", "j":
|
||||
if selectedModule < len(snapshot.Modules)-1 {
|
||||
selectedModule++
|
||||
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
|
||||
}
|
||||
case "<Up>", "k":
|
||||
if selectedModule > 0 {
|
||||
selectedModule--
|
||||
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
|
||||
}
|
||||
case "<Resize>":
|
||||
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
|
||||
continue
|
||||
}
|
||||
if modulesWidget != nil {
|
||||
if selectedModule >= 0 && selectedModule < len(modulesWidget.Rows) {
|
||||
modulesWidget.SelectedRow = selectedModule
|
||||
}
|
||||
}
|
||||
if currentGrid != nil {
|
||||
ui.Render(currentGrid)
|
||||
}
|
||||
case snap := <-snapCh:
|
||||
snapshot = snap
|
||||
if selectedModule >= len(snapshot.Modules) {
|
||||
selectedModule = len(snapshot.Modules) - 1
|
||||
if selectedModule < 0 {
|
||||
selectedModule = 0
|
||||
}
|
||||
}
|
||||
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
|
||||
}
|
||||
}
|
||||
}
|
||||
101
scripts/go/table_nocol.go
Normal file
101
scripts/go/table_nocol.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"image"
|
||||
|
||||
ui "github.com/gizak/termui/v3"
|
||||
"github.com/gizak/termui/v3/widgets"
|
||||
)
|
||||
|
||||
// TableNoCol is a modified table widget that doesn't draw column separators
|
||||
type TableNoCol struct {
|
||||
widgets.Table
|
||||
}
|
||||
|
||||
func NewTableNoCol() *TableNoCol {
|
||||
t := &TableNoCol{}
|
||||
t.Table = *widgets.NewTable()
|
||||
return t
|
||||
}
|
||||
|
||||
// Draw overrides the default Draw to skip column separators
|
||||
func (self *TableNoCol) Draw(buf *ui.Buffer) {
|
||||
self.Block.Draw(buf)
|
||||
|
||||
if len(self.Rows) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
self.ColumnResizer()
|
||||
|
||||
columnWidths := self.ColumnWidths
|
||||
if len(columnWidths) == 0 {
|
||||
columnCount := len(self.Rows[0])
|
||||
columnWidth := self.Inner.Dx() / columnCount
|
||||
for i := 0; i < columnCount; i++ {
|
||||
columnWidths = append(columnWidths, columnWidth)
|
||||
}
|
||||
}
|
||||
|
||||
yCoordinate := self.Inner.Min.Y
|
||||
|
||||
// draw rows
|
||||
for i := 0; i < len(self.Rows) && yCoordinate < self.Inner.Max.Y; i++ {
|
||||
row := self.Rows[i]
|
||||
colXCoordinate := self.Inner.Min.X
|
||||
|
||||
rowStyle := self.TextStyle
|
||||
// get the row style if one exists
|
||||
if style, ok := self.RowStyles[i]; ok {
|
||||
rowStyle = style
|
||||
}
|
||||
|
||||
if self.FillRow {
|
||||
blankCell := ui.NewCell(' ', rowStyle)
|
||||
buf.Fill(blankCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
|
||||
}
|
||||
|
||||
// draw row cells
|
||||
for j := 0; j < len(row); j++ {
|
||||
col := ui.ParseStyles(row[j], rowStyle)
|
||||
// draw row cell
|
||||
if len(col) > columnWidths[j] || self.TextAlignment == ui.AlignLeft {
|
||||
for _, cx := range ui.BuildCellWithXArray(col) {
|
||||
k, cell := cx.X, cx.Cell
|
||||
if k == columnWidths[j] || colXCoordinate+k == self.Inner.Max.X {
|
||||
cell.Rune = ui.ELLIPSES
|
||||
buf.SetCell(cell, image.Pt(colXCoordinate+k-1, yCoordinate))
|
||||
break
|
||||
} else {
|
||||
buf.SetCell(cell, image.Pt(colXCoordinate+k, yCoordinate))
|
||||
}
|
||||
}
|
||||
} else if self.TextAlignment == ui.AlignCenter {
|
||||
xCoordinateOffset := (columnWidths[j] - len(col)) / 2
|
||||
stringXCoordinate := xCoordinateOffset + colXCoordinate
|
||||
for _, cx := range ui.BuildCellWithXArray(col) {
|
||||
k, cell := cx.X, cx.Cell
|
||||
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
|
||||
}
|
||||
} else if self.TextAlignment == ui.AlignRight {
|
||||
stringXCoordinate := ui.MinInt(colXCoordinate+columnWidths[j], self.Inner.Max.X) - len(col)
|
||||
for _, cx := range ui.BuildCellWithXArray(col) {
|
||||
k, cell := cx.X, cx.Cell
|
||||
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
|
||||
}
|
||||
}
|
||||
colXCoordinate += columnWidths[j] + 1
|
||||
}
|
||||
|
||||
// SKIP drawing vertical separators - this is the key change
|
||||
|
||||
yCoordinate++
|
||||
|
||||
// draw horizontal separator
|
||||
horizontalCell := ui.NewCell(ui.HORIZONTAL_LINE, self.Block.BorderStyle)
|
||||
if self.RowSeparator && yCoordinate < self.Inner.Max.Y && i != len(self.Rows)-1 {
|
||||
buf.Fill(horizontalCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
|
||||
yCoordinate++
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
scripts/python/__pycache__/modules.cpython-312.pyc
Normal file
BIN
scripts/python/__pycache__/modules.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
@@ -82,6 +82,64 @@ def load_manifest(manifest_path: Path) -> List[Dict[str, object]]:
|
||||
return validated
|
||||
|
||||
|
||||
def discover_sql_files(module_path: Path, module_name: str) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Scan module for SQL files.
|
||||
|
||||
Returns:
|
||||
Dict mapping database type to list of SQL file paths
|
||||
Example: {
|
||||
'db_auth': [Path('file1.sql'), ...],
|
||||
'db_world': [Path('file2.sql'), ...],
|
||||
'db_characters': [Path('file3.sql'), ...]
|
||||
}
|
||||
"""
|
||||
sql_files: Dict[str, List[str]] = {}
|
||||
sql_base = module_path / 'data' / 'sql'
|
||||
|
||||
if not sql_base.exists():
|
||||
return sql_files
|
||||
|
||||
# Map to support both underscore and hyphen naming conventions
|
||||
db_types = {
|
||||
'db_auth': ['db_auth', 'db-auth'],
|
||||
'db_world': ['db_world', 'db-world'],
|
||||
'db_characters': ['db_characters', 'db-characters'],
|
||||
'db_playerbots': ['db_playerbots', 'db-playerbots']
|
||||
}
|
||||
|
||||
for canonical_name, variants in db_types.items():
|
||||
# Check base/ with all variants
|
||||
for variant in variants:
|
||||
base_dir = sql_base / 'base' / variant
|
||||
if base_dir.exists():
|
||||
for sql_file in base_dir.glob('*.sql'):
|
||||
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||
|
||||
# Check updates/ with all variants
|
||||
for variant in variants:
|
||||
updates_dir = sql_base / 'updates' / variant
|
||||
if updates_dir.exists():
|
||||
for sql_file in updates_dir.glob('*.sql'):
|
||||
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||
|
||||
# Check custom/ with all variants
|
||||
for variant in variants:
|
||||
custom_dir = sql_base / 'custom' / variant
|
||||
if custom_dir.exists():
|
||||
for sql_file in custom_dir.glob('*.sql'):
|
||||
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||
|
||||
# ALSO check direct db-type directories (legacy format used by many modules)
|
||||
for variant in variants:
|
||||
direct_dir = sql_base / variant
|
||||
if direct_dir.exists():
|
||||
for sql_file in direct_dir.glob('*.sql'):
|
||||
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||
|
||||
return sql_files
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModuleState:
|
||||
key: str
|
||||
@@ -103,6 +161,7 @@ class ModuleState:
|
||||
dependency_issues: List[str] = field(default_factory=list)
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
errors: List[str] = field(default_factory=list)
|
||||
sql_files: Dict[str, List[str]] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def blocked(self) -> bool:
|
||||
@@ -340,6 +399,30 @@ def write_outputs(state: ModuleCollectionState, output_dir: Path) -> None:
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
# Discover SQL files for all modules in output directory
|
||||
for module in state.modules:
|
||||
module_path = output_dir / module.name
|
||||
if module_path.exists():
|
||||
module.sql_files = discover_sql_files(module_path, module.name)
|
||||
|
||||
# Generate SQL manifest for enabled modules with SQL files
|
||||
sql_manifest = {
|
||||
"modules": [
|
||||
{
|
||||
"name": module.name,
|
||||
"key": module.key,
|
||||
"sql_files": module.sql_files
|
||||
}
|
||||
for module in state.enabled_modules()
|
||||
if module.sql_files
|
||||
]
|
||||
}
|
||||
sql_manifest_path = output_dir / ".sql-manifest.json"
|
||||
sql_manifest_path.write_text(
|
||||
json.dumps(sql_manifest, indent=2) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
def print_list(state: ModuleCollectionState, selector: str) -> None:
|
||||
if selector == "compile":
|
||||
|
||||
298
scripts/python/update_module_manifest.py
Executable file
298
scripts/python/update_module_manifest.py
Executable file
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate or update config/module-manifest.json from GitHub topics.
|
||||
|
||||
The script queries the GitHub Search API for repositories tagged with
|
||||
AzerothCore-specific topics (for example ``azerothcore-module`` or
|
||||
``azerothcore-lua``) and merges the discovered projects into the existing
|
||||
module manifest. It intentionally keeps all user-defined fields intact so the
|
||||
script can be run safely in CI or locally to add new repositories as they are
|
||||
published.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Iterable, List, Optional, Sequence
|
||||
from urllib import error, parse, request
|
||||
|
||||
API_ROOT = "https://api.github.com"
|
||||
DEFAULT_TOPICS = [
|
||||
"azerothcore-module",
|
||||
"azerothcore-module+ac-premium",
|
||||
"azerothcore-tools",
|
||||
"azerothcore-lua",
|
||||
"azerothcore-sql",
|
||||
]
|
||||
# Map topic keywords to module ``type`` values used in the manifest.
|
||||
TOPIC_TYPE_HINTS = {
|
||||
"azerothcore-lua": "lua",
|
||||
"lua": "lua",
|
||||
"azerothcore-sql": "sql",
|
||||
"sql": "sql",
|
||||
"azerothcore-tools": "tool",
|
||||
"tools": "tool",
|
||||
}
|
||||
CATEGORY_BY_TYPE = {
|
||||
"lua": "scripting",
|
||||
"sql": "database",
|
||||
"tool": "tooling",
|
||||
"data": "data",
|
||||
"cpp": "uncategorized",
|
||||
}
|
||||
USER_AGENT = "acore-compose-module-manifest"
|
||||
|
||||
|
||||
def parse_args(argv: Sequence[str]) -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
"--manifest",
|
||||
default="config/module-manifest.json",
|
||||
help="Path to manifest JSON file (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--topic",
|
||||
action="append",
|
||||
default=[],
|
||||
dest="topics",
|
||||
help="GitHub topic (or '+' separated topics) to scan. Defaults to core topics if not provided.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
help="GitHub API token (defaults to $GITHUB_TOKEN or $GITHUB_API_TOKEN)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-pages",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Maximum pages (x100 results) to fetch per topic (default: %(default)s)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--refresh-existing",
|
||||
action="store_true",
|
||||
help="Refresh name/description/type for repos already present in manifest",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Fetch and display the summary without writing to disk",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log",
|
||||
action="store_true",
|
||||
help="Print verbose progress information",
|
||||
)
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RepoRecord:
|
||||
data: dict
|
||||
topic_expr: str
|
||||
module_type: str
|
||||
|
||||
|
||||
class GitHubClient:
|
||||
def __init__(self, token: Optional[str], verbose: bool = False) -> None:
|
||||
self.token = token
|
||||
self.verbose = verbose
|
||||
|
||||
def _request(self, url: str) -> dict:
|
||||
req = request.Request(url)
|
||||
req.add_header("Accept", "application/vnd.github+json")
|
||||
req.add_header("User-Agent", USER_AGENT)
|
||||
if self.token:
|
||||
req.add_header("Authorization", f"Bearer {self.token}")
|
||||
try:
|
||||
with request.urlopen(req) as resp:
|
||||
payload = resp.read().decode("utf-8")
|
||||
return json.loads(payload)
|
||||
except error.HTTPError as exc: # pragma: no cover - network failure path
|
||||
detail = exc.read().decode("utf-8", errors="ignore")
|
||||
raise RuntimeError(f"GitHub API request failed: {exc.code} {exc.reason}: {detail}") from exc
|
||||
|
||||
def search_repositories(self, topic_expr: str, max_pages: int) -> List[dict]:
|
||||
query = build_topic_query(topic_expr)
|
||||
results: List[dict] = []
|
||||
for page in range(1, max_pages + 1):
|
||||
url = (
|
||||
f"{API_ROOT}/search/repositories?"
|
||||
f"q={parse.quote(query)}&per_page=100&page={page}&sort=updated&order=desc"
|
||||
)
|
||||
data = self._request(url)
|
||||
items = data.get("items", [])
|
||||
if self.verbose:
|
||||
print(f"Fetched {len(items)} repos for '{topic_expr}' (page {page})")
|
||||
results.extend(items)
|
||||
if len(items) < 100:
|
||||
break
|
||||
# Avoid secondary rate-limits.
|
||||
time.sleep(0.5)
|
||||
return results
|
||||
|
||||
|
||||
def build_topic_query(expr: str) -> str:
|
||||
parts = [part.strip() for part in expr.split("+") if part.strip()]
|
||||
if not parts:
|
||||
raise ValueError("Topic expression must contain at least one topic")
|
||||
return "+".join(f"topic:{part}" for part in parts)
|
||||
|
||||
|
||||
def guess_module_type(expr: str) -> str:
|
||||
parts = [part.strip().lower() for part in expr.split("+") if part.strip()]
|
||||
for part in parts:
|
||||
hint = TOPIC_TYPE_HINTS.get(part)
|
||||
if hint:
|
||||
return hint
|
||||
return "cpp"
|
||||
|
||||
|
||||
def normalize_repo_url(url: str) -> str:
|
||||
if url.endswith(".git"):
|
||||
return url[:-4]
|
||||
return url
|
||||
|
||||
|
||||
def repo_name_to_key(name: str) -> str:
|
||||
sanitized = re.sub(r"[^A-Za-z0-9]+", "_", name).strip("_")
|
||||
sanitized = sanitized.upper()
|
||||
if not sanitized:
|
||||
sanitized = "MODULE_UNKNOWN"
|
||||
if not sanitized.startswith("MODULE_"):
|
||||
sanitized = f"MODULE_{sanitized}"
|
||||
return sanitized
|
||||
|
||||
|
||||
def load_manifest(path: str) -> Dict[str, List[dict]]:
|
||||
manifest_path = os.path.abspath(path)
|
||||
if not os.path.exists(manifest_path):
|
||||
return {"modules": []}
|
||||
try:
|
||||
with open(manifest_path, "r", encoding="utf-8") as handle:
|
||||
return json.load(handle)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(f"Unable to parse manifest {path}: {exc}") from exc
|
||||
|
||||
|
||||
def ensure_defaults(entry: dict) -> None:
|
||||
entry.setdefault("type", "cpp")
|
||||
entry.setdefault("status", "active")
|
||||
entry.setdefault("order", 5000)
|
||||
entry.setdefault("requires", [])
|
||||
entry.setdefault("post_install_hooks", [])
|
||||
entry.setdefault("config_cleanup", [])
|
||||
|
||||
|
||||
def update_entry_from_repo(entry: dict, repo: dict, repo_type: str, topic_expr: str, refresh: bool) -> None:
|
||||
# Only overwrite descriptive fields when refresh is enabled or when they are missing.
|
||||
if refresh or not entry.get("name"):
|
||||
entry["name"] = repo.get("name") or entry.get("name")
|
||||
if refresh or not entry.get("repo"):
|
||||
entry["repo"] = repo.get("clone_url") or repo.get("html_url", entry.get("repo"))
|
||||
if refresh or not entry.get("description"):
|
||||
entry["description"] = repo.get("description") or entry.get("description", "")
|
||||
if refresh or not entry.get("type"):
|
||||
entry["type"] = repo_type
|
||||
if refresh or not entry.get("category"):
|
||||
entry["category"] = CATEGORY_BY_TYPE.get(repo_type, entry.get("category", "uncategorized"))
|
||||
ensure_defaults(entry)
|
||||
notes = entry.get("notes") or ""
|
||||
tag_note = f"Discovered via GitHub topic '{topic_expr}'"
|
||||
if tag_note not in notes:
|
||||
entry["notes"] = (notes + " \n" + tag_note).strip()
|
||||
|
||||
|
||||
def merge_repositories(
|
||||
manifest: Dict[str, List[dict]],
|
||||
repos: Iterable[RepoRecord],
|
||||
refresh_existing: bool,
|
||||
) -> tuple[int, int]:
|
||||
modules = manifest.setdefault("modules", [])
|
||||
by_key = {module.get("key"): module for module in modules if module.get("key")}
|
||||
by_repo = {
|
||||
normalize_repo_url(str(module.get("repo", ""))): module
|
||||
for module in modules
|
||||
if module.get("repo")
|
||||
}
|
||||
added = 0
|
||||
updated = 0
|
||||
|
||||
for record in repos:
|
||||
repo = record.data
|
||||
repo_url = normalize_repo_url(repo.get("clone_url") or repo.get("html_url") or "")
|
||||
existing = by_repo.get(repo_url)
|
||||
key = repo_name_to_key(repo.get("name", ""))
|
||||
if not existing:
|
||||
existing = by_key.get(key)
|
||||
if not existing:
|
||||
existing = {
|
||||
"key": key,
|
||||
"name": repo.get("name", key),
|
||||
"repo": repo.get("clone_url") or repo.get("html_url", ""),
|
||||
"description": repo.get("description") or "",
|
||||
"type": record.module_type,
|
||||
"category": CATEGORY_BY_TYPE.get(record.module_type, "uncategorized"),
|
||||
"notes": "",
|
||||
}
|
||||
ensure_defaults(existing)
|
||||
modules.append(existing)
|
||||
by_key[key] = existing
|
||||
if repo_url:
|
||||
by_repo[repo_url] = existing
|
||||
added += 1
|
||||
else:
|
||||
updated += 1
|
||||
update_entry_from_repo(existing, repo, record.module_type, record.topic_expr, refresh_existing)
|
||||
|
||||
return added, updated
|
||||
|
||||
|
||||
def collect_repositories(
|
||||
client: GitHubClient, topics: Sequence[str], max_pages: int
|
||||
) -> List[RepoRecord]:
|
||||
seen: Dict[str, RepoRecord] = {}
|
||||
for expr in topics:
|
||||
repos = client.search_repositories(expr, max_pages)
|
||||
repo_type = guess_module_type(expr)
|
||||
for repo in repos:
|
||||
full_name = repo.get("full_name")
|
||||
if not full_name:
|
||||
continue
|
||||
record = seen.get(full_name)
|
||||
if record is None:
|
||||
seen[full_name] = RepoRecord(repo, expr, repo_type)
|
||||
else:
|
||||
# Prefer the most specific type (non-default) if available.
|
||||
if record.module_type == "cpp" and repo_type != "cpp":
|
||||
record.module_type = repo_type
|
||||
return list(seen.values())
|
||||
|
||||
|
||||
def main(argv: Sequence[str]) -> int:
|
||||
args = parse_args(argv)
|
||||
topics = args.topics or DEFAULT_TOPICS
|
||||
token = args.token or os.environ.get("GITHUB_TOKEN") or os.environ.get("GITHUB_API_TOKEN")
|
||||
client = GitHubClient(token, verbose=args.log)
|
||||
|
||||
manifest = load_manifest(args.manifest)
|
||||
repos = collect_repositories(client, topics, args.max_pages)
|
||||
added, updated = merge_repositories(manifest, repos, args.refresh_existing)
|
||||
if args.dry_run:
|
||||
print(f"Discovered {len(repos)} repositories (added={added}, updated={updated})")
|
||||
return 0
|
||||
|
||||
with open(args.manifest, "w", encoding="utf-8") as handle:
|
||||
json.dump(manifest, handle, indent=2)
|
||||
handle.write("\n")
|
||||
|
||||
print(f"Updated manifest {args.manifest}: added {added}, refreshed {updated}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
Reference in New Issue
Block a user