mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-02-03 02:43:50 +00:00
refactor module db importing
This commit is contained in:
@@ -109,6 +109,8 @@ For complete spawn commands, coordinates, and functionality details, see **[docs
|
|||||||
|
|
||||||
For common workflows, management commands, and database operations, see **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**.
|
For common workflows, management commands, and database operations, see **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**.
|
||||||
|
|
||||||
|
- Keep the module catalog current with `scripts/python/update_module_manifest.py` or trigger the scheduled **Sync Module Manifest** GitHub Action to auto-open a PR with the latest AzerothCore topic repos.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Advanced Configuration
|
## Advanced Configuration
|
||||||
|
|||||||
@@ -73,7 +73,9 @@
|
|||||||
"config_cleanup": [
|
"config_cleanup": [
|
||||||
"mod_ahbot.conf*"
|
"mod_ahbot.conf*"
|
||||||
],
|
],
|
||||||
"category": "economy"
|
"category": "economy",
|
||||||
|
"status": "blocked",
|
||||||
|
"block_reason": "Linker error: Missing Addmod_ahbotScripts() function (use MODULE_LUA_AH_BOT instead)"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key": "MODULE_AUTOBALANCE",
|
"key": "MODULE_AUTOBALANCE",
|
||||||
@@ -343,6 +345,8 @@
|
|||||||
"name": "mod-quest-count-level",
|
"name": "mod-quest-count-level",
|
||||||
"repo": "https://github.com/michaeldelago/mod-quest-count-level.git",
|
"repo": "https://github.com/michaeldelago/mod-quest-count-level.git",
|
||||||
"type": "cpp",
|
"type": "cpp",
|
||||||
|
"status": "blocked",
|
||||||
|
"block_reason": "Uses removed ConfigMgr::GetBoolDefault API; fails to compile on modern cores",
|
||||||
"post_install_hooks": [],
|
"post_install_hooks": [],
|
||||||
"config_cleanup": [
|
"config_cleanup": [
|
||||||
"levelGrant.conf*"
|
"levelGrant.conf*"
|
||||||
@@ -399,9 +403,11 @@
|
|||||||
"name": "mod-challenge-modes",
|
"name": "mod-challenge-modes",
|
||||||
"repo": "https://github.com/ZhengPeiRu21/mod-challenge-modes.git",
|
"repo": "https://github.com/ZhengPeiRu21/mod-challenge-modes.git",
|
||||||
"type": "cpp",
|
"type": "cpp",
|
||||||
|
"block_reason": "Compilation error: Override signature mismatch on OnGiveXP",
|
||||||
"post_install_hooks": [],
|
"post_install_hooks": [],
|
||||||
"description": "Implements keystone-style timed runs with leaderboards and scaling modifiers",
|
"description": "Implements keystone-style timed runs with leaderboards and scaling modifiers",
|
||||||
"category": "gameplay-enhancement"
|
"category": "gameplay-enhancement",
|
||||||
|
"status": "blocked"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key": "MODULE_OLLAMA_CHAT",
|
"key": "MODULE_OLLAMA_CHAT",
|
||||||
@@ -475,8 +481,10 @@
|
|||||||
"repo": "https://github.com/azerothcore/mod-azerothshard.git",
|
"repo": "https://github.com/azerothcore/mod-azerothshard.git",
|
||||||
"type": "cpp",
|
"type": "cpp",
|
||||||
"post_install_hooks": [],
|
"post_install_hooks": [],
|
||||||
|
"block_reason": "Compilation error: Method name mismatch (getLevel vs GetLevel)",
|
||||||
"description": "Bundles AzerothShard tweaks: utility NPCs, scripted events, and gameplay improvements",
|
"description": "Bundles AzerothShard tweaks: utility NPCs, scripted events, and gameplay improvements",
|
||||||
"category": "content"
|
"category": "content",
|
||||||
|
"status": "blocked"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key": "MODULE_WORGOBLIN",
|
"key": "MODULE_WORGOBLIN",
|
||||||
@@ -680,7 +688,9 @@
|
|||||||
"copy-standard-lua"
|
"copy-standard-lua"
|
||||||
],
|
],
|
||||||
"description": "Enables multiple NPC merchants with database integration",
|
"description": "Enables multiple NPC merchants with database integration",
|
||||||
"category": "npc-service"
|
"category": "npc-service",
|
||||||
|
"status": "blocked",
|
||||||
|
"block_reason": "Linker error: Missing script loader function"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key": "MODULE_TREASURE_CHEST_SYSTEM",
|
"key": "MODULE_TREASURE_CHEST_SYSTEM",
|
||||||
|
|||||||
@@ -400,7 +400,7 @@ services:
|
|||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
AC_SOAP_PORT: "7878"
|
AC_SOAP_PORT: "7878"
|
||||||
@@ -489,7 +489,7 @@ services:
|
|||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
environment:
|
environment:
|
||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
AC_LOG_LEVEL: "1"
|
AC_LOG_LEVEL: "1"
|
||||||
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
||||||
@@ -526,7 +526,7 @@ services:
|
|||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
AC_SOAP_PORT: "7878"
|
AC_SOAP_PORT: "7878"
|
||||||
@@ -579,7 +579,7 @@ services:
|
|||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
AC_SOAP_PORT: "7878"
|
AC_SOAP_PORT: "7878"
|
||||||
|
|||||||
@@ -182,6 +182,22 @@ Central module registry and management system:
|
|||||||
|
|
||||||
This centralized approach eliminates duplicate module definitions across scripts.
|
This centralized approach eliminates duplicate module definitions across scripts.
|
||||||
|
|
||||||
|
#### `scripts/python/update_module_manifest.py` - GitHub Topic Sync
|
||||||
|
Automates manifest population directly from the official AzerothCore GitHub topics.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Preview new modules across all default topics
|
||||||
|
python3 scripts/python/update_module_manifest.py --dry-run --log
|
||||||
|
|
||||||
|
# Update config/module-manifest.json with latest repos (requires GITHUB_TOKEN)
|
||||||
|
GITHUB_TOKEN=ghp_yourtoken python3 scripts/python/update_module_manifest.py --refresh-existing
|
||||||
|
```
|
||||||
|
|
||||||
|
- Queries `azerothcore-module`, `azerothcore-lua`, `azerothcore-sql`, `azerothcore-tools`, and `azerothcore-module+ac-premium`
|
||||||
|
- Merges new repositories without touching existing customizations
|
||||||
|
- Optional `--refresh-existing` flag rehydrates names/descriptions from GitHub
|
||||||
|
- Designed for both local execution and the accompanying GitHub Action workflow
|
||||||
|
|
||||||
#### `scripts/bash/manage-modules-sql.sh` - Module Database Integration
|
#### `scripts/bash/manage-modules-sql.sh` - Module Database Integration
|
||||||
Executes module-specific SQL scripts for database schema updates.
|
Executes module-specific SQL scripts for database schema updates.
|
||||||
|
|
||||||
|
|||||||
421
scripts/bash/backup-status.sh
Executable file
421
scripts/bash/backup-status.sh
Executable file
@@ -0,0 +1,421 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Backup Status Dashboard
|
||||||
|
# Displays comprehensive backup system status and statistics
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
BOLD='\033[1m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# Icons
|
||||||
|
ICON_BACKUP="📦"
|
||||||
|
ICON_TIME="🕐"
|
||||||
|
ICON_SIZE="💾"
|
||||||
|
ICON_CHART="📊"
|
||||||
|
ICON_SUCCESS="✅"
|
||||||
|
ICON_WARNING="⚠️"
|
||||||
|
ICON_SCHEDULE="📅"
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
SHOW_DETAILS=0
|
||||||
|
SHOW_TRENDS=0
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: ./backup-status.sh [options]
|
||||||
|
|
||||||
|
Display backup system status and statistics.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-d, --details Show detailed backup listing
|
||||||
|
-t, --trends Show size trends over time
|
||||||
|
-h, --help Show this help
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
./backup-status.sh
|
||||||
|
./backup-status.sh --details
|
||||||
|
./backup-status.sh --details --trends
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
-d|--details) SHOW_DETAILS=1; shift;;
|
||||||
|
-t|--trends) SHOW_TRENDS=1; shift;;
|
||||||
|
-h|--help) usage; exit 0;;
|
||||||
|
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Load environment
|
||||||
|
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||||
|
set -a
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source "$PROJECT_ROOT/.env"
|
||||||
|
set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
BACKUP_PATH="${BACKUP_PATH:-$PROJECT_ROOT/storage/backups}"
|
||||||
|
BACKUP_INTERVAL_MINUTES="${BACKUP_INTERVAL_MINUTES:-60}"
|
||||||
|
BACKUP_RETENTION_HOURS="${BACKUP_RETENTION_HOURS:-6}"
|
||||||
|
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-3}"
|
||||||
|
BACKUP_DAILY_TIME="${BACKUP_DAILY_TIME:-09}"
|
||||||
|
|
||||||
|
# Format bytes to human readable
|
||||||
|
format_bytes() {
|
||||||
|
local bytes=$1
|
||||||
|
if [ "$bytes" -lt 1024 ]; then
|
||||||
|
echo "${bytes}B"
|
||||||
|
elif [ "$bytes" -lt 1048576 ]; then
|
||||||
|
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||||
|
elif [ "$bytes" -lt 1073741824 ]; then
|
||||||
|
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||||
|
else
|
||||||
|
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get directory size
|
||||||
|
get_dir_size() {
|
||||||
|
local dir="$1"
|
||||||
|
if [ -d "$dir" ]; then
|
||||||
|
du -sb "$dir" 2>/dev/null | cut -f1
|
||||||
|
else
|
||||||
|
echo "0"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Count backups in directory
|
||||||
|
count_backups() {
|
||||||
|
local dir="$1"
|
||||||
|
if [ -d "$dir" ]; then
|
||||||
|
find "$dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l
|
||||||
|
else
|
||||||
|
echo "0"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get latest backup timestamp
|
||||||
|
get_latest_backup() {
|
||||||
|
local dir="$1"
|
||||||
|
if [ -d "$dir" ]; then
|
||||||
|
ls -1t "$dir" 2>/dev/null | head -n1 || echo ""
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse timestamp from backup directory name
|
||||||
|
parse_timestamp() {
|
||||||
|
local backup_name="$1"
|
||||||
|
# Format: YYYYMMDD_HHMMSS or ExportBackup_YYYYMMDD_HHMMSS
|
||||||
|
local timestamp
|
||||||
|
if [[ "$backup_name" =~ ([0-9]{8})_([0-9]{6}) ]]; then
|
||||||
|
timestamp="${BASH_REMATCH[1]}_${BASH_REMATCH[2]}"
|
||||||
|
echo "$timestamp"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate time ago from timestamp
|
||||||
|
time_ago() {
|
||||||
|
local timestamp="$1"
|
||||||
|
if [ -z "$timestamp" ]; then
|
||||||
|
echo "Unknown"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse timestamp: YYYYMMDD_HHMMSS
|
||||||
|
local year="${timestamp:0:4}"
|
||||||
|
local month="${timestamp:4:2}"
|
||||||
|
local day="${timestamp:6:2}"
|
||||||
|
local hour="${timestamp:9:2}"
|
||||||
|
local minute="${timestamp:11:2}"
|
||||||
|
local second="${timestamp:13:2}"
|
||||||
|
|
||||||
|
local backup_epoch
|
||||||
|
backup_epoch=$(date -d "$year-$month-$day $hour:$minute:$second" +%s 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
if [ "$backup_epoch" = "0" ]; then
|
||||||
|
echo "Unknown"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local now_epoch
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
local diff=$((now_epoch - backup_epoch))
|
||||||
|
|
||||||
|
if [ "$diff" -lt 60 ]; then
|
||||||
|
echo "${diff} seconds ago"
|
||||||
|
elif [ "$diff" -lt 3600 ]; then
|
||||||
|
local minutes=$((diff / 60))
|
||||||
|
echo "${minutes} minute(s) ago"
|
||||||
|
elif [ "$diff" -lt 86400 ]; then
|
||||||
|
local hours=$((diff / 3600))
|
||||||
|
echo "${hours} hour(s) ago"
|
||||||
|
else
|
||||||
|
local days=$((diff / 86400))
|
||||||
|
echo "${days} day(s) ago"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate next scheduled backup
|
||||||
|
next_backup_time() {
|
||||||
|
local interval_minutes="$1"
|
||||||
|
local now_epoch
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
|
||||||
|
local next_epoch=$((now_epoch + (interval_minutes * 60)))
|
||||||
|
local in_minutes=$(((next_epoch - now_epoch) / 60))
|
||||||
|
|
||||||
|
if [ "$in_minutes" -lt 60 ]; then
|
||||||
|
echo "in ${in_minutes} minute(s)"
|
||||||
|
else
|
||||||
|
local in_hours=$((in_minutes / 60))
|
||||||
|
local remaining_minutes=$((in_minutes % 60))
|
||||||
|
echo "in ${in_hours} hour(s) ${remaining_minutes} minute(s)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate next daily backup
|
||||||
|
next_daily_backup() {
|
||||||
|
local daily_hour="$1"
|
||||||
|
local now_epoch
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
|
||||||
|
local today_backup_epoch
|
||||||
|
today_backup_epoch=$(date -d "today ${daily_hour}:00:00" +%s)
|
||||||
|
|
||||||
|
local next_epoch
|
||||||
|
if [ "$now_epoch" -lt "$today_backup_epoch" ]; then
|
||||||
|
next_epoch=$today_backup_epoch
|
||||||
|
else
|
||||||
|
next_epoch=$(date -d "tomorrow ${daily_hour}:00:00" +%s)
|
||||||
|
fi
|
||||||
|
|
||||||
|
local diff=$((next_epoch - now_epoch))
|
||||||
|
local hours=$((diff / 3600))
|
||||||
|
local minutes=$(((diff % 3600) / 60))
|
||||||
|
|
||||||
|
echo "in ${hours} hour(s) ${minutes} minute(s)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show backup tier status
|
||||||
|
show_backup_tier() {
|
||||||
|
local tier_name="$1"
|
||||||
|
local tier_dir="$2"
|
||||||
|
local retention="$3"
|
||||||
|
|
||||||
|
if [ ! -d "$tier_dir" ]; then
|
||||||
|
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local count size latest
|
||||||
|
count=$(count_backups "$tier_dir")
|
||||||
|
size=$(get_dir_size "$tier_dir")
|
||||||
|
latest=$(get_latest_backup "$tier_dir")
|
||||||
|
|
||||||
|
if [ "$count" = "0" ]; then
|
||||||
|
printf " ${ICON_WARNING} ${YELLOW}%s:${NC} No backups found\n" "$tier_name"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local latest_timestamp
|
||||||
|
latest_timestamp=$(parse_timestamp "$latest")
|
||||||
|
local ago
|
||||||
|
ago=$(time_ago "$latest_timestamp")
|
||||||
|
|
||||||
|
printf " ${GREEN}${ICON_SUCCESS} %s:${NC} %s backup(s), %s total\n" "$tier_name" "$count" "$(format_bytes "$size")"
|
||||||
|
printf " ${ICON_TIME} Latest: %s (%s)\n" "$latest" "$ago"
|
||||||
|
printf " ${ICON_SCHEDULE} Retention: %s\n" "$retention"
|
||||||
|
|
||||||
|
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||||
|
printf " ${ICON_BACKUP} Available backups:\n"
|
||||||
|
local backup_list
|
||||||
|
backup_list=$(ls -1t "$tier_dir" 2>/dev/null || true)
|
||||||
|
while IFS= read -r backup; do
|
||||||
|
if [ -n "$backup" ]; then
|
||||||
|
local backup_size
|
||||||
|
backup_size=$(get_dir_size "$tier_dir/$backup")
|
||||||
|
local backup_timestamp
|
||||||
|
backup_timestamp=$(parse_timestamp "$backup")
|
||||||
|
local backup_ago
|
||||||
|
backup_ago=$(time_ago "$backup_timestamp")
|
||||||
|
printf " - %s: %s (%s)\n" "$backup" "$(format_bytes "$backup_size")" "$backup_ago"
|
||||||
|
fi
|
||||||
|
done <<< "$backup_list"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show size trends
|
||||||
|
show_trends() {
|
||||||
|
printf "${BOLD}${ICON_CHART} Backup Size Trends${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
local daily_dir="$BACKUP_PATH/daily"
|
||||||
|
if [ ! -d "$daily_dir" ]; then
|
||||||
|
printf " ${ICON_WARNING} No daily backups found for trend analysis\n\n"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get last 7 daily backups
|
||||||
|
local backup_list
|
||||||
|
backup_list=$(ls -1t "$daily_dir" 2>/dev/null | head -7 | tac)
|
||||||
|
|
||||||
|
if [ -z "$backup_list" ]; then
|
||||||
|
printf " ${ICON_WARNING} Not enough backups for trend analysis\n\n"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Find max size for scaling
|
||||||
|
local max_size=0
|
||||||
|
while IFS= read -r backup; do
|
||||||
|
if [ -n "$backup" ]; then
|
||||||
|
local size
|
||||||
|
size=$(get_dir_size "$daily_dir/$backup")
|
||||||
|
if [ "$size" -gt "$max_size" ]; then
|
||||||
|
max_size=$size
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done <<< "$backup_list"
|
||||||
|
|
||||||
|
# Display trend chart
|
||||||
|
while IFS= read -r backup; do
|
||||||
|
if [ -n "$backup" ]; then
|
||||||
|
local size
|
||||||
|
size=$(get_dir_size "$daily_dir/$backup")
|
||||||
|
local timestamp
|
||||||
|
timestamp=$(parse_timestamp "$backup")
|
||||||
|
local date_str="${timestamp:0:4}-${timestamp:4:2}-${timestamp:6:2}"
|
||||||
|
|
||||||
|
# Calculate bar length (max 30 chars)
|
||||||
|
local bar_length=0
|
||||||
|
if [ "$max_size" -gt 0 ]; then
|
||||||
|
bar_length=$((size * 30 / max_size))
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create bar
|
||||||
|
local bar=""
|
||||||
|
for ((i=0; i<bar_length; i++)); do
|
||||||
|
bar+="█"
|
||||||
|
done
|
||||||
|
for ((i=bar_length; i<30; i++)); do
|
||||||
|
bar+="░"
|
||||||
|
done
|
||||||
|
|
||||||
|
printf " %s: %s %s\n" "$date_str" "$(format_bytes "$size" | awk '{printf "%-8s", $0}')" "$bar"
|
||||||
|
fi
|
||||||
|
done <<< "$backup_list"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main status display
|
||||||
|
main() {
|
||||||
|
echo
|
||||||
|
printf "${BOLD}${BLUE}${ICON_BACKUP} AZEROTHCORE BACKUP STATUS${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Check if backup directory exists
|
||||||
|
if [ ! -d "$BACKUP_PATH" ]; then
|
||||||
|
printf "${RED}${ICON_WARNING} Backup directory not found: %s${NC}\n\n" "$BACKUP_PATH"
|
||||||
|
printf "Backup system may not be initialized yet.\n\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show current backup tiers
|
||||||
|
printf "${BOLD}${ICON_BACKUP} Backup Tiers${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
show_backup_tier "Hourly Backups" "$BACKUP_PATH/hourly" "${BACKUP_RETENTION_HOURS} hours"
|
||||||
|
show_backup_tier "Daily Backups" "$BACKUP_PATH/daily" "${BACKUP_RETENTION_DAYS} days"
|
||||||
|
|
||||||
|
# Check for manual backups
|
||||||
|
local manual_count=0
|
||||||
|
local manual_size=0
|
||||||
|
if [ -d "$PROJECT_ROOT/manual-backups" ]; then
|
||||||
|
manual_count=$(count_backups "$PROJECT_ROOT/manual-backups")
|
||||||
|
manual_size=$(get_dir_size "$PROJECT_ROOT/manual-backups")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Also check for export backups in main backup dir
|
||||||
|
local export_count=0
|
||||||
|
if [ -d "$BACKUP_PATH" ]; then
|
||||||
|
export_count=$(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null | wc -l)
|
||||||
|
if [ "$export_count" -gt 0 ]; then
|
||||||
|
local export_size=0
|
||||||
|
while IFS= read -r export_dir; do
|
||||||
|
if [ -n "$export_dir" ]; then
|
||||||
|
local size
|
||||||
|
size=$(get_dir_size "$export_dir")
|
||||||
|
export_size=$((export_size + size))
|
||||||
|
fi
|
||||||
|
done < <(find "$BACKUP_PATH" -maxdepth 1 -type d -name "ExportBackup_*" 2>/dev/null)
|
||||||
|
manual_size=$((manual_size + export_size))
|
||||||
|
manual_count=$((manual_count + export_count))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$manual_count" -gt 0 ]; then
|
||||||
|
printf " ${GREEN}${ICON_SUCCESS} Manual/Export Backups:${NC} %s backup(s), %s total\n" "$manual_count" "$(format_bytes "$manual_size")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Show next scheduled backups
|
||||||
|
printf "${BOLD}${ICON_SCHEDULE} Backup Schedule${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
printf " ${ICON_TIME} Hourly interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||||
|
printf " ${ICON_TIME} Next hourly backup: %s\n" "$(next_backup_time "$BACKUP_INTERVAL_MINUTES")"
|
||||||
|
printf " ${ICON_TIME} Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||||
|
printf " ${ICON_TIME} Next daily backup: %s\n" "$(next_daily_backup "$BACKUP_DAILY_TIME")"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Calculate total storage
|
||||||
|
local total_size=0
|
||||||
|
for tier_dir in "$BACKUP_PATH/hourly" "$BACKUP_PATH/daily"; do
|
||||||
|
if [ -d "$tier_dir" ]; then
|
||||||
|
local size
|
||||||
|
size=$(get_dir_size "$tier_dir")
|
||||||
|
total_size=$((total_size + size))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
total_size=$((total_size + manual_size))
|
||||||
|
|
||||||
|
printf "${BOLD}${ICON_SIZE} Total Backup Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Show trends if requested
|
||||||
|
if [ "$SHOW_TRENDS" = "1" ]; then
|
||||||
|
show_trends
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show backup configuration
|
||||||
|
if [ "$SHOW_DETAILS" = "1" ]; then
|
||||||
|
printf "${BOLD}⚙️ Backup Configuration${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
printf " Backup directory: %s\n" "$BACKUP_PATH"
|
||||||
|
printf " Hourly retention: %s hours\n" "$BACKUP_RETENTION_HOURS"
|
||||||
|
printf " Daily retention: %s days\n" "$BACKUP_RETENTION_DAYS"
|
||||||
|
printf " Interval: every %s minutes\n" "$BACKUP_INTERVAL_MINUTES"
|
||||||
|
printf " Daily backup time: %s:00\n" "$BACKUP_DAILY_TIME"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "${GREEN}${ICON_SUCCESS} Backup status check complete!${NC}\n"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
389
scripts/bash/db-health-check.sh
Executable file
389
scripts/bash/db-health-check.sh
Executable file
@@ -0,0 +1,389 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Database Health Check Script
|
||||||
|
# Provides comprehensive health status of AzerothCore databases
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
BOLD='\033[1m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# Icons
|
||||||
|
ICON_SUCCESS="✅"
|
||||||
|
ICON_WARNING="⚠️"
|
||||||
|
ICON_ERROR="❌"
|
||||||
|
ICON_INFO="ℹ️"
|
||||||
|
ICON_DB="🗄️"
|
||||||
|
ICON_SIZE="💾"
|
||||||
|
ICON_TIME="🕐"
|
||||||
|
ICON_MODULE="📦"
|
||||||
|
ICON_UPDATE="🔄"
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
VERBOSE=0
|
||||||
|
SHOW_PENDING=0
|
||||||
|
SHOW_MODULES=1
|
||||||
|
CONTAINER_NAME="ac-mysql"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: ./db-health-check.sh [options]
|
||||||
|
|
||||||
|
Check the health status of AzerothCore databases.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-v, --verbose Show detailed information
|
||||||
|
-p, --pending Show pending updates
|
||||||
|
-m, --no-modules Hide module update information
|
||||||
|
-c, --container NAME MySQL container name (default: ac-mysql)
|
||||||
|
-h, --help Show this help
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
./db-health-check.sh
|
||||||
|
./db-health-check.sh --verbose --pending
|
||||||
|
./db-health-check.sh --container ac-mysql-custom
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
-v|--verbose) VERBOSE=1; shift;;
|
||||||
|
-p|--pending) SHOW_PENDING=1; shift;;
|
||||||
|
-m|--no-modules) SHOW_MODULES=0; shift;;
|
||||||
|
-c|--container) CONTAINER_NAME="$2"; shift 2;;
|
||||||
|
-h|--help) usage; exit 0;;
|
||||||
|
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Load environment
|
||||||
|
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||||
|
set -a
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source "$PROJECT_ROOT/.env"
|
||||||
|
set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||||
|
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||||
|
MYSQL_USER="${MYSQL_USER:-root}"
|
||||||
|
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||||
|
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||||
|
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||||
|
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||||
|
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||||
|
|
||||||
|
# MySQL query helper
|
||||||
|
mysql_query() {
|
||||||
|
local database="${1:-}"
|
||||||
|
local query="$2"
|
||||||
|
|
||||||
|
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "Error: MYSQL_ROOT_PASSWORD not set" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v docker >/dev/null 2>&1; then
|
||||||
|
if [ -n "$database" ]; then
|
||||||
|
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||||
|
else
|
||||||
|
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ -n "$database" ]; then
|
||||||
|
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||||
|
else
|
||||||
|
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Format bytes to human readable
|
||||||
|
format_bytes() {
|
||||||
|
local bytes=$1
|
||||||
|
if [ "$bytes" -lt 1024 ]; then
|
||||||
|
echo "${bytes}B"
|
||||||
|
elif [ "$bytes" -lt 1048576 ]; then
|
||||||
|
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1024}")KB"
|
||||||
|
elif [ "$bytes" -lt 1073741824 ]; then
|
||||||
|
echo "$(awk "BEGIN {printf \"%.1f\", $bytes/1048576}")MB"
|
||||||
|
else
|
||||||
|
echo "$(awk "BEGIN {printf \"%.2f\", $bytes/1073741824}")GB"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if database exists
|
||||||
|
db_exists() {
|
||||||
|
local db_name="$1"
|
||||||
|
local count
|
||||||
|
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||||
|
[ "$count" = "1" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get database size
|
||||||
|
get_db_size() {
|
||||||
|
local db_name="$1"
|
||||||
|
mysql_query "" "SELECT IFNULL(SUM(data_length + index_length), 0) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get update count
|
||||||
|
get_update_count() {
|
||||||
|
local db_name="$1"
|
||||||
|
local state="${2:-}"
|
||||||
|
|
||||||
|
if [ -n "$state" ]; then
|
||||||
|
mysql_query "$db_name" "SELECT COUNT(*) FROM updates WHERE state='$state'" 2>/dev/null || echo "0"
|
||||||
|
else
|
||||||
|
mysql_query "$db_name" "SELECT COUNT(*) FROM updates" 2>/dev/null || echo "0"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get last update timestamp
|
||||||
|
get_last_update() {
|
||||||
|
local db_name="$1"
|
||||||
|
mysql_query "$db_name" "SELECT IFNULL(MAX(timestamp), 'Never') FROM updates" 2>/dev/null || echo "Never"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get table count
|
||||||
|
get_table_count() {
|
||||||
|
local db_name="$1"
|
||||||
|
mysql_query "" "SELECT COUNT(*) FROM information_schema.TABLES WHERE table_schema='$db_name'" 2>/dev/null || echo "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get character count
|
||||||
|
get_character_count() {
|
||||||
|
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters" 2>/dev/null || echo "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get active players (logged in last 24 hours)
|
||||||
|
get_active_players() {
|
||||||
|
mysql_query "$DB_CHARACTERS_NAME" "SELECT COUNT(*) FROM characters WHERE logout_time > UNIX_TIMESTAMP(NOW() - INTERVAL 1 DAY)" 2>/dev/null || echo "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get account count
|
||||||
|
get_account_count() {
|
||||||
|
mysql_query "$DB_AUTH_NAME" "SELECT COUNT(*) FROM account" 2>/dev/null || echo "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get pending updates
|
||||||
|
get_pending_updates() {
|
||||||
|
local db_name="$1"
|
||||||
|
mysql_query "$db_name" "SELECT name FROM updates WHERE state='PENDING' ORDER BY name" 2>/dev/null || true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check database health
|
||||||
|
check_database() {
|
||||||
|
local db_name="$1"
|
||||||
|
local display_name="$2"
|
||||||
|
|
||||||
|
if ! db_exists "$db_name"; then
|
||||||
|
printf " ${RED}${ICON_ERROR} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||||
|
printf " ${RED}Database does not exist${NC}\n"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf " ${GREEN}${ICON_SUCCESS} %s (%s)${NC}\n" "$display_name" "$db_name"
|
||||||
|
|
||||||
|
local update_count module_count last_update db_size table_count
|
||||||
|
update_count=$(get_update_count "$db_name" "RELEASED")
|
||||||
|
module_count=$(get_update_count "$db_name" "MODULE")
|
||||||
|
last_update=$(get_last_update "$db_name")
|
||||||
|
db_size=$(get_db_size "$db_name")
|
||||||
|
table_count=$(get_table_count "$db_name")
|
||||||
|
|
||||||
|
printf " ${ICON_UPDATE} Updates: %s applied" "$update_count"
|
||||||
|
if [ "$module_count" != "0" ] && [ "$SHOW_MODULES" = "1" ]; then
|
||||||
|
printf " (%s module)" "$module_count"
|
||||||
|
fi
|
||||||
|
printf "\n"
|
||||||
|
|
||||||
|
printf " ${ICON_TIME} Last update: %s\n" "$last_update"
|
||||||
|
printf " ${ICON_SIZE} Size: %s (%s tables)\n" "$(format_bytes "$db_size")" "$table_count"
|
||||||
|
|
||||||
|
if [ "$VERBOSE" = "1" ]; then
|
||||||
|
local custom_count archived_count
|
||||||
|
custom_count=$(get_update_count "$db_name" "CUSTOM")
|
||||||
|
archived_count=$(get_update_count "$db_name" "ARCHIVED")
|
||||||
|
|
||||||
|
if [ "$custom_count" != "0" ]; then
|
||||||
|
printf " ${ICON_INFO} Custom updates: %s\n" "$custom_count"
|
||||||
|
fi
|
||||||
|
if [ "$archived_count" != "0" ]; then
|
||||||
|
printf " ${ICON_INFO} Archived updates: %s\n" "$archived_count"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show pending updates if requested
|
||||||
|
if [ "$SHOW_PENDING" = "1" ]; then
|
||||||
|
local pending_updates
|
||||||
|
pending_updates=$(get_pending_updates "$db_name")
|
||||||
|
if [ -n "$pending_updates" ]; then
|
||||||
|
printf " ${YELLOW}${ICON_WARNING} Pending updates:${NC}\n"
|
||||||
|
while IFS= read -r update; do
|
||||||
|
printf " - %s\n" "$update"
|
||||||
|
done <<< "$pending_updates"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show module updates summary
|
||||||
|
show_module_updates() {
|
||||||
|
if [ "$SHOW_MODULES" = "0" ]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "${BOLD}${ICON_MODULE} Module Updates${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
# Get module updates from world database (most modules update world DB)
|
||||||
|
local module_updates
|
||||||
|
module_updates=$(mysql_query "$DB_WORLD_NAME" "SELECT SUBSTRING_INDEX(name, '_', 1) as module, COUNT(*) as count FROM updates WHERE state='MODULE' GROUP BY module ORDER BY module" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$module_updates" ]; then
|
||||||
|
printf " ${ICON_INFO} No module updates detected\n\n"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
while IFS=$'\t' read -r module count; do
|
||||||
|
printf " ${GREEN}${ICON_SUCCESS}${NC} %s: %s update(s)\n" "$module" "$count"
|
||||||
|
done <<< "$module_updates"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get backup information
|
||||||
|
get_backup_info() {
|
||||||
|
local backup_dir="$PROJECT_ROOT/storage/backups"
|
||||||
|
|
||||||
|
if [ ! -d "$backup_dir" ]; then
|
||||||
|
printf " ${ICON_INFO} No backups directory found\n"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for latest backup
|
||||||
|
local latest_hourly latest_daily
|
||||||
|
if [ -d "$backup_dir/hourly" ]; then
|
||||||
|
latest_hourly=$(ls -1t "$backup_dir/hourly" 2>/dev/null | head -n1 || echo "")
|
||||||
|
fi
|
||||||
|
if [ -d "$backup_dir/daily" ]; then
|
||||||
|
latest_daily=$(ls -1t "$backup_dir/daily" 2>/dev/null | head -n1 || echo "")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$latest_hourly" ]; then
|
||||||
|
# Calculate time ago
|
||||||
|
local backup_timestamp="${latest_hourly:0:8}_${latest_hourly:9:6}"
|
||||||
|
local backup_epoch
|
||||||
|
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||||
|
local now_epoch
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
local diff=$((now_epoch - backup_epoch))
|
||||||
|
local hours=$((diff / 3600))
|
||||||
|
local minutes=$(((diff % 3600) / 60))
|
||||||
|
|
||||||
|
if [ "$hours" -gt 0 ]; then
|
||||||
|
printf " ${ICON_TIME} Last hourly backup: %s hours ago\n" "$hours"
|
||||||
|
else
|
||||||
|
printf " ${ICON_TIME} Last hourly backup: %s minutes ago\n" "$minutes"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$latest_daily" ] && [ "$latest_daily" != "$latest_hourly" ]; then
|
||||||
|
local backup_timestamp="${latest_daily:0:8}_${latest_daily:9:6}"
|
||||||
|
local backup_epoch
|
||||||
|
backup_epoch=$(date -d "${backup_timestamp:0:4}-${backup_timestamp:4:2}-${backup_timestamp:6:2} ${backup_timestamp:9:2}:${backup_timestamp:11:2}:${backup_timestamp:13:2}" +%s 2>/dev/null || echo "0")
|
||||||
|
local now_epoch
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
local diff=$((now_epoch - backup_epoch))
|
||||||
|
local days=$((diff / 86400))
|
||||||
|
|
||||||
|
printf " ${ICON_TIME} Last daily backup: %s days ago\n" "$days"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main health check
|
||||||
|
main() {
|
||||||
|
echo
|
||||||
|
printf "${BOLD}${BLUE}${ICON_DB} AZEROTHCORE DATABASE HEALTH CHECK${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Test MySQL connection
|
||||||
|
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||||
|
printf "${RED}${ICON_ERROR} Cannot connect to MySQL server${NC}\n"
|
||||||
|
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||||
|
printf " User: %s\n" "$MYSQL_USER"
|
||||||
|
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "${BOLD}${ICON_DB} Database Status${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Check each database
|
||||||
|
check_database "$DB_AUTH_NAME" "Auth DB"
|
||||||
|
check_database "$DB_WORLD_NAME" "World DB"
|
||||||
|
check_database "$DB_CHARACTERS_NAME" "Characters DB"
|
||||||
|
|
||||||
|
# Optional: Check playerbots database
|
||||||
|
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||||
|
check_database "$DB_PLAYERBOTS_NAME" "Playerbots DB"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show character/account statistics
|
||||||
|
printf "${BOLD}${CYAN}📊 Server Statistics${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
local account_count character_count active_count
|
||||||
|
account_count=$(get_account_count)
|
||||||
|
character_count=$(get_character_count)
|
||||||
|
active_count=$(get_active_players)
|
||||||
|
|
||||||
|
printf " ${ICON_INFO} Accounts: %s\n" "$account_count"
|
||||||
|
printf " ${ICON_INFO} Characters: %s\n" "$character_count"
|
||||||
|
printf " ${ICON_INFO} Active (24h): %s\n" "$active_count"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Show module updates
|
||||||
|
show_module_updates
|
||||||
|
|
||||||
|
# Show backup information
|
||||||
|
printf "${BOLD}${ICON_SIZE} Backup Information${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
get_backup_info
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Calculate total database size
|
||||||
|
local total_size=0
|
||||||
|
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||||
|
if db_exists "$db"; then
|
||||||
|
local size
|
||||||
|
size=$(get_db_size "$db")
|
||||||
|
total_size=$((total_size + size))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||||
|
local size
|
||||||
|
size=$(get_db_size "$DB_PLAYERBOTS_NAME")
|
||||||
|
total_size=$((total_size + size))
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "${BOLD}💾 Total Database Storage: %s${NC}\n" "$(format_bytes "$total_size")"
|
||||||
|
echo
|
||||||
|
|
||||||
|
printf "${GREEN}${ICON_SUCCESS} Health check complete!${NC}\n"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -280,9 +280,78 @@ if [ -n "$backup_path" ]; then
|
|||||||
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
return $([ "$restore_success" = true ] && echo 0 || echo 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verify_and_update_restored_databases() {
|
||||||
|
echo "🔍 Verifying restored database integrity..."
|
||||||
|
|
||||||
|
# Check if dbimport is available
|
||||||
|
if [ ! -f "/azerothcore/env/dist/bin/dbimport" ]; then
|
||||||
|
echo "⚠️ dbimport not available, skipping verification"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create dbimport config for verification
|
||||||
|
echo "📝 Creating dbimport configuration for verification..."
|
||||||
|
mkdir -p /azerothcore/env/dist/etc
|
||||||
|
TEMP_DIR="/azerothcore/env/dist/temp"
|
||||||
|
mkdir -p "$TEMP_DIR"
|
||||||
|
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
|
||||||
|
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
||||||
|
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
|
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
|
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
|
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
|
||||||
|
Updates.EnableDatabases = 15
|
||||||
|
Updates.AutoSetup = 1
|
||||||
|
TempDir = "${TEMP_DIR}"
|
||||||
|
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
||||||
|
Updates.AllowedModules = "all"
|
||||||
|
SourceDirectory = "/azerothcore"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cd /azerothcore/env/dist/bin
|
||||||
|
echo "🔄 Running dbimport to apply any missing updates..."
|
||||||
|
if ./dbimport; then
|
||||||
|
echo "✅ Database verification complete - all updates current"
|
||||||
|
else
|
||||||
|
echo "⚠️ dbimport reported issues - check logs"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify critical tables exist
|
||||||
|
echo "🔍 Checking critical tables..."
|
||||||
|
local critical_tables=("account" "characters" "creature" "quest_template")
|
||||||
|
local missing_tables=0
|
||||||
|
|
||||||
|
for table in "${critical_tables[@]}"; do
|
||||||
|
local db_name="$DB_WORLD_NAME"
|
||||||
|
case "$table" in
|
||||||
|
account) db_name="$DB_AUTH_NAME" ;;
|
||||||
|
characters) db_name="$DB_CHARACTERS_NAME" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if ! mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} \
|
||||||
|
-e "SELECT 1 FROM ${db_name}.${table} LIMIT 1" >/dev/null 2>&1; then
|
||||||
|
echo "⚠️ Critical table missing: ${db_name}.${table}"
|
||||||
|
missing_tables=$((missing_tables + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$missing_tables" -gt 0 ]; then
|
||||||
|
echo "⚠️ ${missing_tables} critical tables missing after restore"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ All critical tables verified"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
if restore_backup "$backup_path"; then
|
if restore_backup "$backup_path"; then
|
||||||
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
echo "$(date): Backup successfully restored from $backup_path" > "$RESTORE_SUCCESS_MARKER"
|
||||||
echo "🎉 Backup restoration completed successfully!"
|
echo "🎉 Backup restoration completed successfully!"
|
||||||
|
|
||||||
|
# Verify and apply missing updates
|
||||||
|
verify_and_update_restored_databases
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
echo "$(date): Backup restoration failed - proceeding with fresh setup" > "$RESTORE_FAILED_MARKER"
|
||||||
@@ -311,7 +380,8 @@ cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
|
|||||||
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
Updates.EnableDatabases = 7
|
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
|
||||||
|
Updates.EnableDatabases = 15
|
||||||
Updates.AutoSetup = 1
|
Updates.AutoSetup = 1
|
||||||
TempDir = "${TEMP_DIR}"
|
TempDir = "${TEMP_DIR}"
|
||||||
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
MySQLExecutable = "${MYSQL_EXECUTABLE}"
|
||||||
|
|||||||
@@ -477,19 +477,83 @@ load_sql_helper(){
|
|||||||
err "SQL helper not found; expected manage-modules-sql.sh to be available"
|
err "SQL helper not found; expected manage-modules-sql.sh to be available"
|
||||||
}
|
}
|
||||||
|
|
||||||
execute_module_sql(){
|
stage_module_sql_files(){
|
||||||
SQL_EXECUTION_FAILED=0
|
# Stage SQL files to AzerothCore's native update directory structure
|
||||||
if declare -f execute_module_sql_scripts >/dev/null 2>&1; then
|
# This replaces manual SQL execution with AzerothCore's built-in updater
|
||||||
echo 'Executing module SQL scripts...'
|
|
||||||
if execute_module_sql_scripts; then
|
local staging_dir="${MODULE_STAGING_DIR:-$MODULES_ROOT}"
|
||||||
echo 'SQL execution complete.'
|
local sql_manifest="$STATE_DIR/.sql-manifest.json"
|
||||||
else
|
|
||||||
echo '⚠️ Module SQL scripts reported errors'
|
if [ ! -f "$sql_manifest" ]; then
|
||||||
SQL_EXECUTION_FAILED=1
|
info "No SQL manifest found - no SQL files to stage"
|
||||||
fi
|
return 0
|
||||||
else
|
|
||||||
info "SQL helper did not expose execute_module_sql_scripts; skipping module SQL execution"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check if manifest has any modules with SQL
|
||||||
|
local module_count
|
||||||
|
module_count=$(python3 -c "import json; data=json.load(open('$sql_manifest')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
if [ "$module_count" = "0" ]; then
|
||||||
|
info "No modules with SQL files to stage"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Staging SQL for $module_count module(s)"
|
||||||
|
|
||||||
|
# Read each module from manifest and stage its SQL
|
||||||
|
local modules_json
|
||||||
|
modules_json=$(python3 -c "import json; data=json.load(open('$sql_manifest')); print('\n'.join(m['name'] for m in data['modules']))" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$modules_json" ]; then
|
||||||
|
warn "Failed to parse SQL manifest"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local staged_count=0
|
||||||
|
while IFS= read -r module_name; do
|
||||||
|
if [ -z "$module_name" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
local module_path="$staging_dir/$module_name"
|
||||||
|
local acore_modules="/azerothcore/modules/$module_name"
|
||||||
|
|
||||||
|
if [ ! -d "$module_path" ]; then
|
||||||
|
warn "Module path not found: $module_path"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Call stage-module-sql.sh for this module
|
||||||
|
local stage_script="${PROJECT_ROOT}/scripts/bash/stage-module-sql.sh"
|
||||||
|
if [ ! -f "$stage_script" ]; then
|
||||||
|
# Try container location
|
||||||
|
stage_script="/scripts/bash/stage-module-sql.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$stage_script" ]; then
|
||||||
|
if "$stage_script" \
|
||||||
|
--module-name "$module_name" \
|
||||||
|
--module-path "$module_path" \
|
||||||
|
--acore-path "$acore_modules"; then
|
||||||
|
((staged_count++))
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn "SQL staging script not found: $stage_script"
|
||||||
|
fi
|
||||||
|
done <<< "$modules_json"
|
||||||
|
|
||||||
|
if [ "$staged_count" -gt 0 ]; then
|
||||||
|
ok "Staged SQL for $staged_count module(s)"
|
||||||
|
info "SQL will be applied by AzerothCore's updater on next server startup"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
execute_module_sql(){
|
||||||
|
# Legacy function - now calls staging instead of direct execution
|
||||||
|
SQL_EXECUTION_FAILED=0
|
||||||
|
stage_module_sql_files || SQL_EXECUTION_FAILED=1
|
||||||
}
|
}
|
||||||
|
|
||||||
track_module_state(){
|
track_module_state(){
|
||||||
@@ -591,13 +655,11 @@ main(){
|
|||||||
remove_disabled_modules
|
remove_disabled_modules
|
||||||
install_enabled_modules
|
install_enabled_modules
|
||||||
manage_configuration_files
|
manage_configuration_files
|
||||||
info "SQL execution gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
info "SQL staging gate: MODULES_SKIP_SQL=${MODULES_SKIP_SQL:-0}"
|
||||||
if [ "${MODULES_SKIP_SQL:-0}" = "1" ]; then
|
if [ "${MODULES_SKIP_SQL:-0}" = "1" ]; then
|
||||||
info "Skipping module SQL execution (MODULES_SKIP_SQL=1)"
|
info "Skipping module SQL staging (MODULES_SKIP_SQL=1)"
|
||||||
else
|
else
|
||||||
info "Initiating module SQL helper"
|
info "Staging module SQL files for AzerothCore updater"
|
||||||
load_sql_helper
|
|
||||||
info "SQL helper loaded from ${SQL_HELPER_PATH:-unknown}"
|
|
||||||
execute_module_sql
|
execute_module_sql
|
||||||
fi
|
fi
|
||||||
track_module_state
|
track_module_state
|
||||||
|
|||||||
@@ -369,10 +369,85 @@ case "$TARGET_PROFILE" in
|
|||||||
modules) PROFILE_ARGS+=(--profile client-data) ;;
|
modules) PROFILE_ARGS+=(--profile client-data) ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Start the target profile
|
# Stage module SQL to core updates directory (after containers start)
|
||||||
show_staging_step "Realm Activation" "Bringing services online"
|
stage_module_sql_to_core() {
|
||||||
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
show_staging_step "Module SQL Staging" "Preparing module database updates"
|
||||||
docker compose "${PROFILE_ARGS[@]}" up -d
|
|
||||||
|
# Start containers first to get access to worldserver container
|
||||||
|
show_staging_step "Realm Activation" "Bringing services online"
|
||||||
|
echo "🟢 Starting services-$TARGET_PROFILE profile..."
|
||||||
|
docker compose "${PROFILE_ARGS[@]}" up -d
|
||||||
|
|
||||||
|
# Wait for worldserver container to be running
|
||||||
|
echo "⏳ Waiting for worldserver container..."
|
||||||
|
local max_wait=60
|
||||||
|
local waited=0
|
||||||
|
while ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver" && [ $waited -lt $max_wait ]; do
|
||||||
|
sleep 2
|
||||||
|
waited=$((waited + 2))
|
||||||
|
done
|
||||||
|
|
||||||
|
if ! docker ps --format '{{.Names}}' | grep -q "ac-worldserver"; then
|
||||||
|
echo "⚠️ Worldserver container not found, skipping module SQL staging"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "📦 Staging module SQL files to core updates directory..."
|
||||||
|
|
||||||
|
# Create core updates directories inside container
|
||||||
|
docker exec ac-worldserver bash -c "
|
||||||
|
mkdir -p /azerothcore/data/sql/updates/db_world \
|
||||||
|
/azerothcore/data/sql/updates/db_characters \
|
||||||
|
/azerothcore/data/sql/updates/db_auth
|
||||||
|
" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Stage SQL from all modules
|
||||||
|
local staged_count=0
|
||||||
|
local timestamp=$(date +"%Y_%m_%d_%H%M%S")
|
||||||
|
|
||||||
|
# Find all modules with SQL files
|
||||||
|
for db_type in db-world db-characters db-auth; do
|
||||||
|
local core_dir=""
|
||||||
|
case "$db_type" in
|
||||||
|
db-world) core_dir="db_world" ;;
|
||||||
|
db-characters) core_dir="db_characters" ;;
|
||||||
|
db-auth) core_dir="db_auth" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Copy SQL files from each module
|
||||||
|
docker exec ac-worldserver bash -c "
|
||||||
|
counter=0
|
||||||
|
for module_dir in /azerothcore/modules/*/data/sql/$db_type; do
|
||||||
|
if [ -d \"\$module_dir\" ]; then
|
||||||
|
module_name=\$(basename \$(dirname \$(dirname \$module_dir)))
|
||||||
|
for sql_file in \"\$module_dir\"/*.sql; do
|
||||||
|
if [ -f \"\$sql_file\" ]; then
|
||||||
|
base_name=\$(basename \"\$sql_file\" .sql)
|
||||||
|
target_name=\"${timestamp}_\${counter}_MODULE_\${module_name}_\${base_name}.sql\"
|
||||||
|
cp \"\$sql_file\" \"/azerothcore/data/sql/updates/$core_dir/\$target_name\"
|
||||||
|
echo \" ✓ Staged \$module_name/$db_type/\$(basename \$sql_file)\"
|
||||||
|
counter=\$((counter + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo \$counter
|
||||||
|
" 2>/dev/null | tee /tmp/stage-sql-output.txt || true
|
||||||
|
|
||||||
|
local count=$(tail -1 /tmp/stage-sql-output.txt 2>/dev/null || echo "0")
|
||||||
|
staged_count=$((staged_count + count))
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$staged_count" -gt 0 ]; then
|
||||||
|
echo "✅ Staged $staged_count module SQL files to core updates directory"
|
||||||
|
echo "🔄 Restart worldserver to apply: docker restart ac-worldserver"
|
||||||
|
else
|
||||||
|
echo "ℹ️ No module SQL files found to stage"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Stage module SQL (this will also start the containers)
|
||||||
|
stage_module_sql_to_core
|
||||||
|
|
||||||
printf '\n%b\n' "${GREEN}⚔️ Realm staging completed successfully! ⚔️${NC}"
|
printf '\n%b\n' "${GREEN}⚔️ Realm staging completed successfully! ⚔️${NC}"
|
||||||
printf '%b\n' "${GREEN}🏰 Profile: services-$TARGET_PROFILE${NC}"
|
printf '%b\n' "${GREEN}🏰 Profile: services-$TARGET_PROFILE${NC}"
|
||||||
|
|||||||
315
scripts/bash/test-phase1-integration.sh
Executable file
315
scripts/bash/test-phase1-integration.sh
Executable file
@@ -0,0 +1,315 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Phase 1 Integration Test Script
|
||||||
|
# Tests the complete Phase 1 implementation using build and deploy workflows
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BOLD='\033[1m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# Icons
|
||||||
|
ICON_SUCCESS="✅"
|
||||||
|
ICON_WARNING="⚠️"
|
||||||
|
ICON_ERROR="❌"
|
||||||
|
ICON_INFO="ℹ️"
|
||||||
|
ICON_TEST="🧪"
|
||||||
|
|
||||||
|
# Counters
|
||||||
|
TESTS_TOTAL=0
|
||||||
|
TESTS_PASSED=0
|
||||||
|
TESTS_FAILED=0
|
||||||
|
|
||||||
|
info() {
|
||||||
|
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
ok() {
|
||||||
|
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||||
|
((TESTS_PASSED++))
|
||||||
|
}
|
||||||
|
|
||||||
|
warn() {
|
||||||
|
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
err() {
|
||||||
|
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||||
|
((TESTS_FAILED++))
|
||||||
|
}
|
||||||
|
|
||||||
|
test_header() {
|
||||||
|
((TESTS_TOTAL++))
|
||||||
|
echo ""
|
||||||
|
echo -e "${BOLD}${ICON_TEST} Test $TESTS_TOTAL: $*${NC}"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
}
|
||||||
|
|
||||||
|
section_header() {
|
||||||
|
echo ""
|
||||||
|
echo ""
|
||||||
|
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||||
|
echo -e "${BOLD}${BLUE} $*${NC}"
|
||||||
|
echo -e "${BOLD}${BLUE}═══════════════════════════════════════════════════════${NC}"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Change to project root
|
||||||
|
cd "$PROJECT_ROOT"
|
||||||
|
|
||||||
|
section_header "Phase 1 Integration Test Suite"
|
||||||
|
|
||||||
|
info "Project root: $PROJECT_ROOT"
|
||||||
|
info "Test started: $(date)"
|
||||||
|
|
||||||
|
# Test 1: Verify .env exists
|
||||||
|
test_header "Environment Configuration Check"
|
||||||
|
if [ -f .env ]; then
|
||||||
|
ok ".env file exists"
|
||||||
|
|
||||||
|
# Count enabled modules
|
||||||
|
enabled_count=$(grep -c "^MODULE_.*=1" .env || echo "0")
|
||||||
|
info "Enabled modules: $enabled_count"
|
||||||
|
|
||||||
|
# Check for playerbots
|
||||||
|
if grep -q "^MODULE_PLAYERBOTS=1" .env; then
|
||||||
|
info "Playerbots module enabled"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err ".env file not found"
|
||||||
|
echo "Please run ./setup.sh first"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 2: Module manifest validation
|
||||||
|
test_header "Module Manifest Validation"
|
||||||
|
if [ -f config/module-manifest.json ]; then
|
||||||
|
ok "Module manifest exists"
|
||||||
|
|
||||||
|
# Validate JSON
|
||||||
|
if python3 -m json.tool config/module-manifest.json >/dev/null 2>&1; then
|
||||||
|
ok "Module manifest is valid JSON"
|
||||||
|
else
|
||||||
|
err "Module manifest has invalid JSON"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err "Module manifest not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 3: Generate module state with SQL discovery
|
||||||
|
test_header "Module State Generation (SQL Discovery)"
|
||||||
|
info "Running: python3 scripts/python/modules.py generate"
|
||||||
|
|
||||||
|
if python3 scripts/python/modules.py \
|
||||||
|
--env-path .env \
|
||||||
|
--manifest config/module-manifest.json \
|
||||||
|
generate --output-dir local-storage/modules > /tmp/phase1-modules-generate.log 2>&1; then
|
||||||
|
ok "Module state generation successful"
|
||||||
|
else
|
||||||
|
# Check if it's just warnings
|
||||||
|
if grep -q "warnings detected" /tmp/phase1-modules-generate.log 2>/dev/null; then
|
||||||
|
ok "Module state generation completed with warnings"
|
||||||
|
else
|
||||||
|
err "Module state generation failed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 4: Verify SQL manifest created
|
||||||
|
test_header "SQL Manifest Verification"
|
||||||
|
if [ -f local-storage/modules/.sql-manifest.json ]; then
|
||||||
|
ok "SQL manifest created: local-storage/modules/.sql-manifest.json"
|
||||||
|
|
||||||
|
# Check manifest structure
|
||||||
|
module_count=$(python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
|
||||||
|
info "Modules with SQL: $module_count"
|
||||||
|
|
||||||
|
if [ "$module_count" -gt 0 ]; then
|
||||||
|
ok "SQL manifest contains $module_count module(s)"
|
||||||
|
|
||||||
|
# Show first module
|
||||||
|
info "Sample module SQL info:"
|
||||||
|
python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); m=data['modules'][0] if data['modules'] else {}; print(f\" Name: {m.get('name', 'N/A')}\n SQL files: {len(m.get('sql_files', {}))}\") " 2>/dev/null || true
|
||||||
|
else
|
||||||
|
warn "No modules with SQL files (expected if modules not yet staged)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err "SQL manifest not created"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 5: Verify modules.env created
|
||||||
|
test_header "Module Environment File Check"
|
||||||
|
if [ -f local-storage/modules/modules.env ]; then
|
||||||
|
ok "modules.env created"
|
||||||
|
|
||||||
|
# Check for key exports
|
||||||
|
if grep -q "MODULES_ENABLED=" local-storage/modules/modules.env; then
|
||||||
|
ok "MODULES_ENABLED variable present"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q "MODULES_REQUIRES_CUSTOM_BUILD=" local-storage/modules/modules.env; then
|
||||||
|
ok "Build requirement flags present"
|
||||||
|
|
||||||
|
# Check if build required
|
||||||
|
source local-storage/modules/modules.env
|
||||||
|
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||||
|
info "Custom build required (C++ modules enabled)"
|
||||||
|
else
|
||||||
|
info "Standard build sufficient (no C++ modules)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err "modules.env not created"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 6: Check build requirement
|
||||||
|
test_header "Build Requirement Check"
|
||||||
|
if [ -f local-storage/modules/modules.env ]; then
|
||||||
|
source local-storage/modules/modules.env
|
||||||
|
|
||||||
|
info "MODULES_REQUIRES_CUSTOM_BUILD=${MODULES_REQUIRES_CUSTOM_BUILD:-0}"
|
||||||
|
info "MODULES_REQUIRES_PLAYERBOT_SOURCE=${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}"
|
||||||
|
|
||||||
|
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
|
||||||
|
ok "Build system correctly detected C++ modules"
|
||||||
|
BUILD_REQUIRED=1
|
||||||
|
else
|
||||||
|
ok "Build system correctly detected no C++ modules"
|
||||||
|
BUILD_REQUIRED=0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn "Cannot determine build requirements"
|
||||||
|
BUILD_REQUIRED=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 7: Verify new scripts exist and are executable
|
||||||
|
test_header "New Script Verification"
|
||||||
|
scripts=(
|
||||||
|
"scripts/bash/stage-module-sql.sh"
|
||||||
|
"scripts/bash/verify-sql-updates.sh"
|
||||||
|
"scripts/bash/backup-status.sh"
|
||||||
|
"scripts/bash/db-health-check.sh"
|
||||||
|
)
|
||||||
|
|
||||||
|
for script in "${scripts[@]}"; do
|
||||||
|
if [ -f "$script" ]; then
|
||||||
|
if [ -x "$script" ]; then
|
||||||
|
ok "$(basename "$script") - exists and executable"
|
||||||
|
else
|
||||||
|
warn "$(basename "$script") - exists but not executable"
|
||||||
|
chmod +x "$script"
|
||||||
|
ok "Fixed permissions for $(basename "$script")"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err "$(basename "$script") - not found"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Test 8: Test backup-status.sh (without running containers)
|
||||||
|
test_header "Backup Status Script Test"
|
||||||
|
if ./scripts/bash/backup-status.sh 2>&1 | head -10 | grep -q "BACKUP STATUS"; then
|
||||||
|
ok "backup-status.sh executes successfully"
|
||||||
|
else
|
||||||
|
err "backup-status.sh failed to execute"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 9: Test db-health-check.sh help
|
||||||
|
test_header "Database Health Check Script Test"
|
||||||
|
if ./scripts/bash/db-health-check.sh --help | grep -q "Check the health status"; then
|
||||||
|
ok "db-health-check.sh help working"
|
||||||
|
else
|
||||||
|
err "db-health-check.sh help failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 10: Check modified scripts for new functionality
|
||||||
|
test_header "Modified Script Verification"
|
||||||
|
|
||||||
|
# Check manage-modules.sh has staging function
|
||||||
|
if grep -q "stage_module_sql_files()" scripts/bash/manage-modules.sh; then
|
||||||
|
ok "manage-modules.sh contains SQL staging function"
|
||||||
|
else
|
||||||
|
err "manage-modules.sh missing SQL staging function"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check db-import-conditional.sh has playerbots support
|
||||||
|
if grep -q "PlayerbotsDatabaseInfo" scripts/bash/db-import-conditional.sh; then
|
||||||
|
ok "db-import-conditional.sh has playerbots database support"
|
||||||
|
else
|
||||||
|
err "db-import-conditional.sh missing playerbots support"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q "Updates.EnableDatabases = 15" scripts/bash/db-import-conditional.sh; then
|
||||||
|
ok "db-import-conditional.sh has correct EnableDatabases value (15)"
|
||||||
|
else
|
||||||
|
warn "db-import-conditional.sh may have incorrect EnableDatabases value"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for post-restore verification
|
||||||
|
if grep -q "verify_and_update_restored_databases" scripts/bash/db-import-conditional.sh; then
|
||||||
|
ok "db-import-conditional.sh has post-restore verification"
|
||||||
|
else
|
||||||
|
err "db-import-conditional.sh missing post-restore verification"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 11: Docker Compose configuration check
|
||||||
|
test_header "Docker Compose Configuration Check"
|
||||||
|
if [ -f docker-compose.yml ]; then
|
||||||
|
ok "docker-compose.yml exists"
|
||||||
|
|
||||||
|
# Check for required services
|
||||||
|
if grep -q "ac-mysql:" docker-compose.yml; then
|
||||||
|
ok "MySQL service configured"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q "ac-worldserver:" docker-compose.yml; then
|
||||||
|
ok "Worldserver service configured"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
err "docker-compose.yml not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test Summary
|
||||||
|
section_header "Test Summary"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BOLD}Tests Executed: $TESTS_TOTAL${NC}"
|
||||||
|
echo -e "${GREEN}${BOLD}Passed: $TESTS_PASSED${NC}"
|
||||||
|
if [ $TESTS_FAILED -gt 0 ]; then
|
||||||
|
echo -e "${RED}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}${BOLD}Failed: $TESTS_FAILED${NC}"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Calculate success rate
|
||||||
|
if [ $TESTS_TOTAL -gt 0 ]; then
|
||||||
|
success_rate=$((TESTS_PASSED * 100 / TESTS_TOTAL))
|
||||||
|
echo -e "${BOLD}Success Rate: ${success_rate}%${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ $TESTS_FAILED -eq 0 ]; then
|
||||||
|
echo -e "${GREEN}${BOLD}${ICON_SUCCESS} ALL TESTS PASSED${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Phase 1 implementation is working correctly!"
|
||||||
|
echo ""
|
||||||
|
echo "Next steps:"
|
||||||
|
echo " 1. Run './build.sh' if C++ modules are enabled"
|
||||||
|
echo " 2. Run './deploy.sh' to start containers"
|
||||||
|
echo " 3. Verify SQL staging with running containers"
|
||||||
|
echo " 4. Check database health with db-health-check.sh"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo -e "${RED}${BOLD}${ICON_ERROR} SOME TESTS FAILED${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Please review the failures above before proceeding."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
348
scripts/bash/verify-sql-updates.sh
Executable file
348
scripts/bash/verify-sql-updates.sh
Executable file
@@ -0,0 +1,348 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Verify SQL Updates
|
||||||
|
# Checks that SQL updates have been applied via the updates table
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
BOLD='\033[1m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# Icons
|
||||||
|
ICON_SUCCESS="✅"
|
||||||
|
ICON_WARNING="⚠️"
|
||||||
|
ICON_ERROR="❌"
|
||||||
|
ICON_INFO="ℹ️"
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
MODULE_NAME=""
|
||||||
|
DATABASE_NAME=""
|
||||||
|
SHOW_ALL=0
|
||||||
|
CHECK_HASH=0
|
||||||
|
CONTAINER_NAME="ac-mysql"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: ./verify-sql-updates.sh [options]
|
||||||
|
|
||||||
|
Verify that SQL updates have been applied via AzerothCore's updates table.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--module NAME Check specific module
|
||||||
|
--database NAME Check specific database (auth/world/characters)
|
||||||
|
--all Show all module updates
|
||||||
|
--check-hash Verify file hashes match database
|
||||||
|
--container NAME MySQL container name (default: ac-mysql)
|
||||||
|
-h, --help Show this help
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
./verify-sql-updates.sh --all
|
||||||
|
./verify-sql-updates.sh --module mod-aoe-loot
|
||||||
|
./verify-sql-updates.sh --database acore_world --all
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--module) MODULE_NAME="$2"; shift 2;;
|
||||||
|
--database) DATABASE_NAME="$2"; shift 2;;
|
||||||
|
--all) SHOW_ALL=1; shift;;
|
||||||
|
--check-hash) CHECK_HASH=1; shift;;
|
||||||
|
--container) CONTAINER_NAME="$2"; shift 2;;
|
||||||
|
-h|--help) usage; exit 0;;
|
||||||
|
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Load environment
|
||||||
|
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||||
|
set -a
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source "$PROJECT_ROOT/.env"
|
||||||
|
set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
|
||||||
|
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||||
|
MYSQL_USER="${MYSQL_USER:-root}"
|
||||||
|
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
|
||||||
|
DB_AUTH_NAME="${DB_AUTH_NAME:-acore_auth}"
|
||||||
|
DB_WORLD_NAME="${DB_WORLD_NAME:-acore_world}"
|
||||||
|
DB_CHARACTERS_NAME="${DB_CHARACTERS_NAME:-acore_characters}"
|
||||||
|
DB_PLAYERBOTS_NAME="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
|
||||||
|
|
||||||
|
# Logging functions
|
||||||
|
info() {
|
||||||
|
echo -e "${BLUE}${ICON_INFO}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
ok() {
|
||||||
|
echo -e "${GREEN}${ICON_SUCCESS}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
warn() {
|
||||||
|
echo -e "${YELLOW}${ICON_WARNING}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
err() {
|
||||||
|
echo -e "${RED}${ICON_ERROR}${NC} $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
# MySQL query helper
|
||||||
|
mysql_query() {
|
||||||
|
local database="${1:-}"
|
||||||
|
local query="$2"
|
||||||
|
|
||||||
|
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
err "MYSQL_ROOT_PASSWORD not set"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v docker >/dev/null 2>&1; then
|
||||||
|
if [ -n "$database" ]; then
|
||||||
|
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||||
|
else
|
||||||
|
docker exec "$CONTAINER_NAME" mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ -n "$database" ]; then
|
||||||
|
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" "$database" -N -B -e "$query" 2>/dev/null
|
||||||
|
else
|
||||||
|
mysql -h"$MYSQL_HOST" -P"$MYSQL_PORT" -u"$MYSQL_USER" -p"$MYSQL_ROOT_PASSWORD" -N -B -e "$query" 2>/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if database exists
|
||||||
|
db_exists() {
|
||||||
|
local db_name="$1"
|
||||||
|
local count
|
||||||
|
count=$(mysql_query "" "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME='$db_name'" 2>/dev/null || echo "0")
|
||||||
|
[ "$count" = "1" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verify module SQL in database
|
||||||
|
verify_module_sql() {
|
||||||
|
local module_name="$1"
|
||||||
|
local database_name="$2"
|
||||||
|
|
||||||
|
if ! db_exists "$database_name"; then
|
||||||
|
err "Database does not exist: $database_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Checking module updates in $database_name"
|
||||||
|
|
||||||
|
# Query updates table for module
|
||||||
|
local query="SELECT name, hash, state, timestamp, speed FROM updates WHERE name LIKE '%${module_name}%' AND state='MODULE' ORDER BY timestamp DESC"
|
||||||
|
local results
|
||||||
|
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$results" ]; then
|
||||||
|
warn "No updates found for module: $module_name in $database_name"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display results
|
||||||
|
echo
|
||||||
|
printf "${BOLD}${CYAN}Module Updates for %s in %s:${NC}\n" "$module_name" "$database_name"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
while IFS=$'\t' read -r name hash state timestamp speed; do
|
||||||
|
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||||
|
printf " Hash: %s\n" "${hash:0:12}..."
|
||||||
|
printf " Applied: %s\n" "$timestamp"
|
||||||
|
printf " Speed: %sms\n" "$speed"
|
||||||
|
echo
|
||||||
|
done <<< "$results"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# List all module updates
|
||||||
|
list_module_updates() {
|
||||||
|
local database_name="$1"
|
||||||
|
|
||||||
|
if ! db_exists "$database_name"; then
|
||||||
|
err "Database does not exist: $database_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Listing all module updates in $database_name"
|
||||||
|
|
||||||
|
# Query all module updates
|
||||||
|
local query="SELECT name, state, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC"
|
||||||
|
local results
|
||||||
|
results=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$results" ]; then
|
||||||
|
warn "No module updates found in $database_name"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display results
|
||||||
|
echo
|
||||||
|
printf "${BOLD}${CYAN}All Module Updates in %s:${NC}\n" "$database_name"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
while IFS=$'\t' read -r name state timestamp; do
|
||||||
|
printf "${GREEN}${ICON_SUCCESS}${NC} %s\n" "$name"
|
||||||
|
printf " Applied: %s\n" "$timestamp"
|
||||||
|
((count++))
|
||||||
|
done <<< "$results"
|
||||||
|
|
||||||
|
echo
|
||||||
|
ok "Total module updates: $count"
|
||||||
|
echo
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check update applied
|
||||||
|
check_update_applied() {
|
||||||
|
local filename="$1"
|
||||||
|
local database_name="$2"
|
||||||
|
local expected_hash="${3:-}"
|
||||||
|
|
||||||
|
if ! db_exists "$database_name"; then
|
||||||
|
err "Database does not exist: $database_name"
|
||||||
|
return 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Query for specific file
|
||||||
|
local query="SELECT hash, state, timestamp FROM updates WHERE name='$filename' LIMIT 1"
|
||||||
|
local result
|
||||||
|
result=$(mysql_query "$database_name" "$query" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$result" ]; then
|
||||||
|
warn "Update not found: $filename"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse result
|
||||||
|
IFS=$'\t' read -r hash state timestamp <<< "$result"
|
||||||
|
|
||||||
|
ok "Update applied: $filename"
|
||||||
|
printf " Hash: %s\n" "$hash"
|
||||||
|
printf " State: %s\n" "$state"
|
||||||
|
printf " Applied: %s\n" "$timestamp"
|
||||||
|
|
||||||
|
# Check hash if provided
|
||||||
|
if [ -n "$expected_hash" ] && [ "$expected_hash" != "$hash" ]; then
|
||||||
|
err "Hash mismatch!"
|
||||||
|
printf " Expected: %s\n" "$expected_hash"
|
||||||
|
printf " Actual: %s\n" "$hash"
|
||||||
|
return 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate verification report
|
||||||
|
generate_verification_report() {
|
||||||
|
echo
|
||||||
|
printf "${BOLD}${BLUE}🔍 Module SQL Verification Report${NC}\n"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo
|
||||||
|
|
||||||
|
local total_updates=0
|
||||||
|
local databases=("$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME")
|
||||||
|
|
||||||
|
# Add playerbots if it exists
|
||||||
|
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||||
|
databases+=("$DB_PLAYERBOTS_NAME")
|
||||||
|
fi
|
||||||
|
|
||||||
|
for db in "${databases[@]}"; do
|
||||||
|
if ! db_exists "$db"; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get count of module updates
|
||||||
|
local count
|
||||||
|
count=$(mysql_query "$db" "SELECT COUNT(*) FROM updates WHERE state='MODULE'" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
if [ "$count" != "0" ]; then
|
||||||
|
printf "${GREEN}${ICON_SUCCESS}${NC} ${BOLD}%s:${NC} %s module update(s)\n" "$db" "$count"
|
||||||
|
total_updates=$((total_updates + count))
|
||||||
|
|
||||||
|
if [ "$SHOW_ALL" = "1" ]; then
|
||||||
|
# Show recent updates
|
||||||
|
local query="SELECT name, timestamp FROM updates WHERE state='MODULE' ORDER BY timestamp DESC LIMIT 5"
|
||||||
|
local results
|
||||||
|
results=$(mysql_query "$db" "$query" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -n "$results" ]; then
|
||||||
|
while IFS=$'\t' read -r name timestamp; do
|
||||||
|
printf " - %s (%s)\n" "$name" "$timestamp"
|
||||||
|
done <<< "$results"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf "${YELLOW}${ICON_WARNING}${NC} ${BOLD}%s:${NC} No module updates\n" "$db"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
printf "${BOLD}Total: %s module update(s) applied${NC}\n" "$total_updates"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
echo
|
||||||
|
info "SQL Update Verification"
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Test MySQL connection
|
||||||
|
if ! mysql_query "" "SELECT 1" >/dev/null 2>&1; then
|
||||||
|
err "Cannot connect to MySQL server"
|
||||||
|
printf " Host: %s:%s\n" "$MYSQL_HOST" "$MYSQL_PORT"
|
||||||
|
printf " User: %s\n" "$MYSQL_USER"
|
||||||
|
printf " Container: %s\n\n" "$CONTAINER_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Execute based on options
|
||||||
|
if [ -n "$MODULE_NAME" ]; then
|
||||||
|
# Check specific module
|
||||||
|
if [ -n "$DATABASE_NAME" ]; then
|
||||||
|
verify_module_sql "$MODULE_NAME" "$DATABASE_NAME"
|
||||||
|
else
|
||||||
|
# Check all databases for this module
|
||||||
|
for db in "$DB_AUTH_NAME" "$DB_WORLD_NAME" "$DB_CHARACTERS_NAME"; do
|
||||||
|
if db_exists "$db"; then
|
||||||
|
verify_module_sql "$MODULE_NAME" "$db"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if db_exists "$DB_PLAYERBOTS_NAME"; then
|
||||||
|
verify_module_sql "$MODULE_NAME" "$DB_PLAYERBOTS_NAME"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
elif [ -n "$DATABASE_NAME" ]; then
|
||||||
|
# List all updates in specific database
|
||||||
|
list_module_updates "$DATABASE_NAME"
|
||||||
|
else
|
||||||
|
# Generate full report
|
||||||
|
generate_verification_report
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
ok "Verification complete"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
BIN
scripts/python/__pycache__/modules.cpython-312.pyc
Normal file
BIN
scripts/python/__pycache__/modules.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
@@ -82,6 +82,64 @@ def load_manifest(manifest_path: Path) -> List[Dict[str, object]]:
|
|||||||
return validated
|
return validated
|
||||||
|
|
||||||
|
|
||||||
|
def discover_sql_files(module_path: Path, module_name: str) -> Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
Scan module for SQL files.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping database type to list of SQL file paths
|
||||||
|
Example: {
|
||||||
|
'db_auth': [Path('file1.sql'), ...],
|
||||||
|
'db_world': [Path('file2.sql'), ...],
|
||||||
|
'db_characters': [Path('file3.sql'), ...]
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
sql_files: Dict[str, List[str]] = {}
|
||||||
|
sql_base = module_path / 'data' / 'sql'
|
||||||
|
|
||||||
|
if not sql_base.exists():
|
||||||
|
return sql_files
|
||||||
|
|
||||||
|
# Map to support both underscore and hyphen naming conventions
|
||||||
|
db_types = {
|
||||||
|
'db_auth': ['db_auth', 'db-auth'],
|
||||||
|
'db_world': ['db_world', 'db-world'],
|
||||||
|
'db_characters': ['db_characters', 'db-characters'],
|
||||||
|
'db_playerbots': ['db_playerbots', 'db-playerbots']
|
||||||
|
}
|
||||||
|
|
||||||
|
for canonical_name, variants in db_types.items():
|
||||||
|
# Check base/ with all variants
|
||||||
|
for variant in variants:
|
||||||
|
base_dir = sql_base / 'base' / variant
|
||||||
|
if base_dir.exists():
|
||||||
|
for sql_file in base_dir.glob('*.sql'):
|
||||||
|
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||||
|
|
||||||
|
# Check updates/ with all variants
|
||||||
|
for variant in variants:
|
||||||
|
updates_dir = sql_base / 'updates' / variant
|
||||||
|
if updates_dir.exists():
|
||||||
|
for sql_file in updates_dir.glob('*.sql'):
|
||||||
|
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||||
|
|
||||||
|
# Check custom/ with all variants
|
||||||
|
for variant in variants:
|
||||||
|
custom_dir = sql_base / 'custom' / variant
|
||||||
|
if custom_dir.exists():
|
||||||
|
for sql_file in custom_dir.glob('*.sql'):
|
||||||
|
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||||
|
|
||||||
|
# ALSO check direct db-type directories (legacy format used by many modules)
|
||||||
|
for variant in variants:
|
||||||
|
direct_dir = sql_base / variant
|
||||||
|
if direct_dir.exists():
|
||||||
|
for sql_file in direct_dir.glob('*.sql'):
|
||||||
|
sql_files.setdefault(canonical_name, []).append(str(sql_file.relative_to(module_path)))
|
||||||
|
|
||||||
|
return sql_files
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ModuleState:
|
class ModuleState:
|
||||||
key: str
|
key: str
|
||||||
@@ -103,6 +161,7 @@ class ModuleState:
|
|||||||
dependency_issues: List[str] = field(default_factory=list)
|
dependency_issues: List[str] = field(default_factory=list)
|
||||||
warnings: List[str] = field(default_factory=list)
|
warnings: List[str] = field(default_factory=list)
|
||||||
errors: List[str] = field(default_factory=list)
|
errors: List[str] = field(default_factory=list)
|
||||||
|
sql_files: Dict[str, List[str]] = field(default_factory=dict)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def blocked(self) -> bool:
|
def blocked(self) -> bool:
|
||||||
@@ -340,6 +399,30 @@ def write_outputs(state: ModuleCollectionState, output_dir: Path) -> None:
|
|||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Discover SQL files for all modules in output directory
|
||||||
|
for module in state.modules:
|
||||||
|
module_path = output_dir / module.name
|
||||||
|
if module_path.exists():
|
||||||
|
module.sql_files = discover_sql_files(module_path, module.name)
|
||||||
|
|
||||||
|
# Generate SQL manifest for enabled modules with SQL files
|
||||||
|
sql_manifest = {
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"name": module.name,
|
||||||
|
"key": module.key,
|
||||||
|
"sql_files": module.sql_files
|
||||||
|
}
|
||||||
|
for module in state.enabled_modules()
|
||||||
|
if module.sql_files
|
||||||
|
]
|
||||||
|
}
|
||||||
|
sql_manifest_path = output_dir / ".sql-manifest.json"
|
||||||
|
sql_manifest_path.write_text(
|
||||||
|
json.dumps(sql_manifest, indent=2) + "\n",
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def print_list(state: ModuleCollectionState, selector: str) -> None:
|
def print_list(state: ModuleCollectionState, selector: str) -> None:
|
||||||
if selector == "compile":
|
if selector == "compile":
|
||||||
|
|||||||
298
scripts/python/update_module_manifest.py
Executable file
298
scripts/python/update_module_manifest.py
Executable file
@@ -0,0 +1,298 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Generate or update config/module-manifest.json from GitHub topics.
|
||||||
|
|
||||||
|
The script queries the GitHub Search API for repositories tagged with
|
||||||
|
AzerothCore-specific topics (for example ``azerothcore-module`` or
|
||||||
|
``azerothcore-lua``) and merges the discovered projects into the existing
|
||||||
|
module manifest. It intentionally keeps all user-defined fields intact so the
|
||||||
|
script can be run safely in CI or locally to add new repositories as they are
|
||||||
|
published.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Dict, Iterable, List, Optional, Sequence
|
||||||
|
from urllib import error, parse, request
|
||||||
|
|
||||||
|
API_ROOT = "https://api.github.com"
|
||||||
|
DEFAULT_TOPICS = [
|
||||||
|
"azerothcore-module",
|
||||||
|
"azerothcore-module+ac-premium",
|
||||||
|
"azerothcore-tools",
|
||||||
|
"azerothcore-lua",
|
||||||
|
"azerothcore-sql",
|
||||||
|
]
|
||||||
|
# Map topic keywords to module ``type`` values used in the manifest.
|
||||||
|
TOPIC_TYPE_HINTS = {
|
||||||
|
"azerothcore-lua": "lua",
|
||||||
|
"lua": "lua",
|
||||||
|
"azerothcore-sql": "sql",
|
||||||
|
"sql": "sql",
|
||||||
|
"azerothcore-tools": "tool",
|
||||||
|
"tools": "tool",
|
||||||
|
}
|
||||||
|
CATEGORY_BY_TYPE = {
|
||||||
|
"lua": "scripting",
|
||||||
|
"sql": "database",
|
||||||
|
"tool": "tooling",
|
||||||
|
"data": "data",
|
||||||
|
"cpp": "uncategorized",
|
||||||
|
}
|
||||||
|
USER_AGENT = "acore-compose-module-manifest"
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args(argv: Sequence[str]) -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument(
|
||||||
|
"--manifest",
|
||||||
|
default="config/module-manifest.json",
|
||||||
|
help="Path to manifest JSON file (default: %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--topic",
|
||||||
|
action="append",
|
||||||
|
default=[],
|
||||||
|
dest="topics",
|
||||||
|
help="GitHub topic (or '+' separated topics) to scan. Defaults to core topics if not provided.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--token",
|
||||||
|
help="GitHub API token (defaults to $GITHUB_TOKEN or $GITHUB_API_TOKEN)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-pages",
|
||||||
|
type=int,
|
||||||
|
default=10,
|
||||||
|
help="Maximum pages (x100 results) to fetch per topic (default: %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--refresh-existing",
|
||||||
|
action="store_true",
|
||||||
|
help="Refresh name/description/type for repos already present in manifest",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="Fetch and display the summary without writing to disk",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--log",
|
||||||
|
action="store_true",
|
||||||
|
help="Print verbose progress information",
|
||||||
|
)
|
||||||
|
return parser.parse_args(argv)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RepoRecord:
|
||||||
|
data: dict
|
||||||
|
topic_expr: str
|
||||||
|
module_type: str
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubClient:
|
||||||
|
def __init__(self, token: Optional[str], verbose: bool = False) -> None:
|
||||||
|
self.token = token
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
def _request(self, url: str) -> dict:
|
||||||
|
req = request.Request(url)
|
||||||
|
req.add_header("Accept", "application/vnd.github+json")
|
||||||
|
req.add_header("User-Agent", USER_AGENT)
|
||||||
|
if self.token:
|
||||||
|
req.add_header("Authorization", f"Bearer {self.token}")
|
||||||
|
try:
|
||||||
|
with request.urlopen(req) as resp:
|
||||||
|
payload = resp.read().decode("utf-8")
|
||||||
|
return json.loads(payload)
|
||||||
|
except error.HTTPError as exc: # pragma: no cover - network failure path
|
||||||
|
detail = exc.read().decode("utf-8", errors="ignore")
|
||||||
|
raise RuntimeError(f"GitHub API request failed: {exc.code} {exc.reason}: {detail}") from exc
|
||||||
|
|
||||||
|
def search_repositories(self, topic_expr: str, max_pages: int) -> List[dict]:
|
||||||
|
query = build_topic_query(topic_expr)
|
||||||
|
results: List[dict] = []
|
||||||
|
for page in range(1, max_pages + 1):
|
||||||
|
url = (
|
||||||
|
f"{API_ROOT}/search/repositories?"
|
||||||
|
f"q={parse.quote(query)}&per_page=100&page={page}&sort=updated&order=desc"
|
||||||
|
)
|
||||||
|
data = self._request(url)
|
||||||
|
items = data.get("items", [])
|
||||||
|
if self.verbose:
|
||||||
|
print(f"Fetched {len(items)} repos for '{topic_expr}' (page {page})")
|
||||||
|
results.extend(items)
|
||||||
|
if len(items) < 100:
|
||||||
|
break
|
||||||
|
# Avoid secondary rate-limits.
|
||||||
|
time.sleep(0.5)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def build_topic_query(expr: str) -> str:
|
||||||
|
parts = [part.strip() for part in expr.split("+") if part.strip()]
|
||||||
|
if not parts:
|
||||||
|
raise ValueError("Topic expression must contain at least one topic")
|
||||||
|
return "+".join(f"topic:{part}" for part in parts)
|
||||||
|
|
||||||
|
|
||||||
|
def guess_module_type(expr: str) -> str:
|
||||||
|
parts = [part.strip().lower() for part in expr.split("+") if part.strip()]
|
||||||
|
for part in parts:
|
||||||
|
hint = TOPIC_TYPE_HINTS.get(part)
|
||||||
|
if hint:
|
||||||
|
return hint
|
||||||
|
return "cpp"
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_repo_url(url: str) -> str:
|
||||||
|
if url.endswith(".git"):
|
||||||
|
return url[:-4]
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
def repo_name_to_key(name: str) -> str:
|
||||||
|
sanitized = re.sub(r"[^A-Za-z0-9]+", "_", name).strip("_")
|
||||||
|
sanitized = sanitized.upper()
|
||||||
|
if not sanitized:
|
||||||
|
sanitized = "MODULE_UNKNOWN"
|
||||||
|
if not sanitized.startswith("MODULE_"):
|
||||||
|
sanitized = f"MODULE_{sanitized}"
|
||||||
|
return sanitized
|
||||||
|
|
||||||
|
|
||||||
|
def load_manifest(path: str) -> Dict[str, List[dict]]:
|
||||||
|
manifest_path = os.path.abspath(path)
|
||||||
|
if not os.path.exists(manifest_path):
|
||||||
|
return {"modules": []}
|
||||||
|
try:
|
||||||
|
with open(manifest_path, "r", encoding="utf-8") as handle:
|
||||||
|
return json.load(handle)
|
||||||
|
except json.JSONDecodeError as exc:
|
||||||
|
raise RuntimeError(f"Unable to parse manifest {path}: {exc}") from exc
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_defaults(entry: dict) -> None:
|
||||||
|
entry.setdefault("type", "cpp")
|
||||||
|
entry.setdefault("status", "active")
|
||||||
|
entry.setdefault("order", 5000)
|
||||||
|
entry.setdefault("requires", [])
|
||||||
|
entry.setdefault("post_install_hooks", [])
|
||||||
|
entry.setdefault("config_cleanup", [])
|
||||||
|
|
||||||
|
|
||||||
|
def update_entry_from_repo(entry: dict, repo: dict, repo_type: str, topic_expr: str, refresh: bool) -> None:
|
||||||
|
# Only overwrite descriptive fields when refresh is enabled or when they are missing.
|
||||||
|
if refresh or not entry.get("name"):
|
||||||
|
entry["name"] = repo.get("name") or entry.get("name")
|
||||||
|
if refresh or not entry.get("repo"):
|
||||||
|
entry["repo"] = repo.get("clone_url") or repo.get("html_url", entry.get("repo"))
|
||||||
|
if refresh or not entry.get("description"):
|
||||||
|
entry["description"] = repo.get("description") or entry.get("description", "")
|
||||||
|
if refresh or not entry.get("type"):
|
||||||
|
entry["type"] = repo_type
|
||||||
|
if refresh or not entry.get("category"):
|
||||||
|
entry["category"] = CATEGORY_BY_TYPE.get(repo_type, entry.get("category", "uncategorized"))
|
||||||
|
ensure_defaults(entry)
|
||||||
|
notes = entry.get("notes") or ""
|
||||||
|
tag_note = f"Discovered via GitHub topic '{topic_expr}'"
|
||||||
|
if tag_note not in notes:
|
||||||
|
entry["notes"] = (notes + " \n" + tag_note).strip()
|
||||||
|
|
||||||
|
|
||||||
|
def merge_repositories(
|
||||||
|
manifest: Dict[str, List[dict]],
|
||||||
|
repos: Iterable[RepoRecord],
|
||||||
|
refresh_existing: bool,
|
||||||
|
) -> tuple[int, int]:
|
||||||
|
modules = manifest.setdefault("modules", [])
|
||||||
|
by_key = {module.get("key"): module for module in modules if module.get("key")}
|
||||||
|
by_repo = {
|
||||||
|
normalize_repo_url(str(module.get("repo", ""))): module
|
||||||
|
for module in modules
|
||||||
|
if module.get("repo")
|
||||||
|
}
|
||||||
|
added = 0
|
||||||
|
updated = 0
|
||||||
|
|
||||||
|
for record in repos:
|
||||||
|
repo = record.data
|
||||||
|
repo_url = normalize_repo_url(repo.get("clone_url") or repo.get("html_url") or "")
|
||||||
|
existing = by_repo.get(repo_url)
|
||||||
|
key = repo_name_to_key(repo.get("name", ""))
|
||||||
|
if not existing:
|
||||||
|
existing = by_key.get(key)
|
||||||
|
if not existing:
|
||||||
|
existing = {
|
||||||
|
"key": key,
|
||||||
|
"name": repo.get("name", key),
|
||||||
|
"repo": repo.get("clone_url") or repo.get("html_url", ""),
|
||||||
|
"description": repo.get("description") or "",
|
||||||
|
"type": record.module_type,
|
||||||
|
"category": CATEGORY_BY_TYPE.get(record.module_type, "uncategorized"),
|
||||||
|
"notes": "",
|
||||||
|
}
|
||||||
|
ensure_defaults(existing)
|
||||||
|
modules.append(existing)
|
||||||
|
by_key[key] = existing
|
||||||
|
if repo_url:
|
||||||
|
by_repo[repo_url] = existing
|
||||||
|
added += 1
|
||||||
|
else:
|
||||||
|
updated += 1
|
||||||
|
update_entry_from_repo(existing, repo, record.module_type, record.topic_expr, refresh_existing)
|
||||||
|
|
||||||
|
return added, updated
|
||||||
|
|
||||||
|
|
||||||
|
def collect_repositories(
|
||||||
|
client: GitHubClient, topics: Sequence[str], max_pages: int
|
||||||
|
) -> List[RepoRecord]:
|
||||||
|
seen: Dict[str, RepoRecord] = {}
|
||||||
|
for expr in topics:
|
||||||
|
repos = client.search_repositories(expr, max_pages)
|
||||||
|
repo_type = guess_module_type(expr)
|
||||||
|
for repo in repos:
|
||||||
|
full_name = repo.get("full_name")
|
||||||
|
if not full_name:
|
||||||
|
continue
|
||||||
|
record = seen.get(full_name)
|
||||||
|
if record is None:
|
||||||
|
seen[full_name] = RepoRecord(repo, expr, repo_type)
|
||||||
|
else:
|
||||||
|
# Prefer the most specific type (non-default) if available.
|
||||||
|
if record.module_type == "cpp" and repo_type != "cpp":
|
||||||
|
record.module_type = repo_type
|
||||||
|
return list(seen.values())
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv: Sequence[str]) -> int:
|
||||||
|
args = parse_args(argv)
|
||||||
|
topics = args.topics or DEFAULT_TOPICS
|
||||||
|
token = args.token or os.environ.get("GITHUB_TOKEN") or os.environ.get("GITHUB_API_TOKEN")
|
||||||
|
client = GitHubClient(token, verbose=args.log)
|
||||||
|
|
||||||
|
manifest = load_manifest(args.manifest)
|
||||||
|
repos = collect_repositories(client, topics, args.max_pages)
|
||||||
|
added, updated = merge_repositories(manifest, repos, args.refresh_existing)
|
||||||
|
if args.dry_run:
|
||||||
|
print(f"Discovered {len(repos)} repositories (added={added}, updated={updated})")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
with open(args.manifest, "w", encoding="utf-8") as handle:
|
||||||
|
json.dump(manifest, handle, indent=2)
|
||||||
|
handle.write("\n")
|
||||||
|
|
||||||
|
print(f"Updated manifest {args.manifest}: added {added}, refreshed {updated}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main(sys.argv[1:]))
|
||||||
Reference in New Issue
Block a user