cleanup: validation and integrations for importing data

This commit is contained in:
uprightbass360
2025-11-22 16:49:01 -05:00
committed by Deckard
parent e6231bb4a4
commit 6ddfe9b2c7
17 changed files with 6797 additions and 369 deletions

View File

@@ -0,0 +1,265 @@
#!/usr/bin/env bash
#
# cleanup-orphaned-sql.sh
#
# Cleans up orphaned SQL update entries from the database.
# These are entries in the 'updates' table that reference files no longer on disk.
#
# This happens when:
# - Modules are removed/uninstalled
# - Modules are updated and old SQL files are deleted
# - Manual SQL cleanup occurs
#
# NOTE: These warnings are informational and don't affect server operation.
# This script is optional - it just cleans up the logs.
#
set -euo pipefail
# Configuration
MYSQL_CONTAINER="${MYSQL_CONTAINER:-ac-mysql}"
WORLDSERVER_CONTAINER="${WORLDSERVER_CONTAINER:-ac-worldserver}"
MYSQL_USER="${MYSQL_USER:-root}"
MYSQL_PASSWORD="${MYSQL_ROOT_PASSWORD:-}"
DRY_RUN=false
VERBOSE=false
DATABASES=("acore_world" "acore_characters" "acore_auth")
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Usage
usage() {
cat << EOF
Usage: $0 [OPTIONS]
Clean up orphaned SQL update entries from AzerothCore databases.
OPTIONS:
-p, --password PASSWORD MySQL root password (or use MYSQL_ROOT_PASSWORD env var)
-c, --container NAME MySQL container name (default: ac-mysql)
-w, --worldserver NAME Worldserver container name (default: ac-worldserver)
-d, --database DB Clean only specific database (world, characters, auth)
-n, --dry-run Show what would be cleaned without making changes
-v, --verbose Show detailed output
-h, --help Show this help message
EXAMPLES:
# Dry run to see what would be cleaned
$0 --dry-run
# Clean all databases
$0 --password yourpassword
# Clean only world database
$0 --password yourpassword --database world
# Verbose output
$0 --password yourpassword --verbose
NOTES:
- This script only removes entries from the 'updates' table
- It does NOT remove any actual data or tables
- It does NOT reverse any SQL that was applied
- This is safe to run and only cleans up tracking metadata
- Orphaned entries occur when modules are removed/updated
EOF
exit 0
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-p|--password)
MYSQL_PASSWORD="$2"
shift 2
;;
-c|--container)
MYSQL_CONTAINER="$2"
shift 2
;;
-w|--worldserver)
WORLDSERVER_CONTAINER="$2"
shift 2
;;
-d|--database)
case $2 in
world) DATABASES=("acore_world") ;;
characters) DATABASES=("acore_characters") ;;
auth) DATABASES=("acore_auth") ;;
*) echo -e "${RED}Error: Invalid database '$2'${NC}"; exit 1 ;;
esac
shift 2
;;
-n|--dry-run)
DRY_RUN=true
shift
;;
-v|--verbose)
VERBOSE=true
shift
;;
-h|--help)
usage
;;
*)
echo -e "${RED}Error: Unknown option '$1'${NC}"
usage
;;
esac
done
# Check password
if [[ -z "$MYSQL_PASSWORD" ]]; then
echo -e "${RED}Error: MySQL password required${NC}"
echo "Use --password or set MYSQL_ROOT_PASSWORD environment variable"
exit 1
fi
# Check containers exist
if ! docker ps --format '{{.Names}}' | grep -q "^${MYSQL_CONTAINER}$"; then
echo -e "${RED}Error: MySQL container '$MYSQL_CONTAINER' not found or not running${NC}"
exit 1
fi
if ! docker ps --format '{{.Names}}' | grep -q "^${WORLDSERVER_CONTAINER}$"; then
echo -e "${RED}Error: Worldserver container '$WORLDSERVER_CONTAINER' not found or not running${NC}"
exit 1
fi
echo -e "${BLUE}╔════════════════════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ AzerothCore Orphaned SQL Cleanup ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════════════════════════╝${NC}"
echo
if [[ "$DRY_RUN" == true ]]; then
echo -e "${YELLOW}DRY RUN MODE - No changes will be made${NC}"
echo
fi
# Function to get SQL files from worldserver container
get_sql_files() {
local db_type=$1
docker exec "$WORLDSERVER_CONTAINER" find "/azerothcore/data/sql/updates/${db_type}/" -name "*.sql" -type f 2>/dev/null | \
xargs -I {} basename {} 2>/dev/null || true
}
# Function to clean orphaned entries
clean_orphaned_entries() {
local database=$1
local db_type=$2
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${GREEN}Processing: $database${NC}"
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
# Get list of SQL files on disk
local sql_files
sql_files=$(get_sql_files "$db_type")
if [[ -z "$sql_files" ]]; then
echo -e "${YELLOW}⚠ No SQL files found in /azerothcore/data/sql/updates/${db_type}/${NC}"
echo
return
fi
local file_count
file_count=$(echo "$sql_files" | wc -l)
echo -e "📁 Found ${file_count} SQL files on disk"
# Get entries from updates table
local total_updates
total_updates=$(docker exec "$MYSQL_CONTAINER" mysql -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$database" -sN \
-e "SELECT COUNT(*) FROM updates" 2>/dev/null || echo "0")
echo -e "📊 Total updates in database: ${total_updates}"
if [[ "$total_updates" == "0" ]]; then
echo -e "${YELLOW}⚠ No updates found in database${NC}"
echo
return
fi
# Find orphaned entries (in DB but not on disk)
# We'll create a temp table with file names and do a LEFT JOIN
local orphaned_count=0
local orphaned_list=""
# Get all update names from DB
local db_updates
db_updates=$(docker exec "$MYSQL_CONTAINER" mysql -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$database" -sN \
-e "SELECT name FROM updates ORDER BY name" 2>/dev/null || true)
if [[ -n "$db_updates" ]]; then
# Check each DB entry against disk files
while IFS= read -r update_name; do
if ! echo "$sql_files" | grep -qF "$update_name"; then
((orphaned_count++))
if [[ "$VERBOSE" == true ]] || [[ "$DRY_RUN" == true ]]; then
orphaned_list="${orphaned_list}${update_name}\n"
fi
# Delete if not dry run
if [[ "$DRY_RUN" == false ]]; then
docker exec "$MYSQL_CONTAINER" mysql -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$database" -e \
"DELETE FROM updates WHERE name='${update_name}'" 2>/dev/null
fi
fi
done <<< "$db_updates"
fi
# Report results
if [[ $orphaned_count -gt 0 ]]; then
echo -e "${YELLOW}🗑️ Orphaned entries: ${orphaned_count}${NC}"
if [[ "$VERBOSE" == true ]] || [[ "$DRY_RUN" == true ]]; then
echo
echo -e "${YELLOW}Orphaned files:${NC}"
echo -e "$orphaned_list" | head -20
if [[ $orphaned_count -gt 20 ]]; then
echo -e "${YELLOW}... and $((orphaned_count - 20)) more${NC}"
fi
fi
if [[ "$DRY_RUN" == false ]]; then
echo -e "${GREEN}✅ Cleaned ${orphaned_count} orphaned entries${NC}"
else
echo -e "${YELLOW}Would clean ${orphaned_count} orphaned entries${NC}"
fi
else
echo -e "${GREEN}✅ No orphaned entries found${NC}"
fi
echo
}
# Process each database
for db in "${DATABASES[@]}"; do
case $db in
acore_world)
clean_orphaned_entries "$db" "db_world"
;;
acore_characters)
clean_orphaned_entries "$db" "db_characters"
;;
acore_auth)
clean_orphaned_entries "$db" "db_auth"
;;
esac
done
# Summary
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${GREEN}Cleanup Complete${NC}"
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
if [[ "$DRY_RUN" == true ]]; then
echo
echo -e "${YELLOW}This was a dry run. To actually clean orphaned entries, run:${NC}"
echo -e "${YELLOW} $0 --password yourpassword${NC}"
fi

View File

@@ -1,57 +1,167 @@
#!/bin/bash
# Fix item import for backup-merged characters
#
# Usage:
# fix-item-import.sh [OPTIONS]
#
# Options:
# --backup-dir DIR Path to backup directory (required)
# --account-ids IDS Comma-separated account IDs (e.g., "451,452")
# --char-guids GUIDS Comma-separated character GUIDs (e.g., "4501,4502,4503")
# --mysql-password PW MySQL root password (or use MYSQL_ROOT_PASSWORD env var)
# --mysql-container NAME MySQL container name (default: ac-mysql)
# --auth-db NAME Auth database name (default: acore_auth)
# --characters-db NAME Characters database name (default: acore_characters)
# -h, --help Show this help message
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
COLOR_RED='\033[0;31m'
COLOR_GREEN='\033[0;32m'
COLOR_YELLOW='\033[1;33m'
COLOR_BLUE='\033[0;34m'
COLOR_CYAN='\033[0;36m'
COLOR_RESET='\033[0m'
# Source common library
if [ -f "$SCRIPT_DIR/lib/common.sh" ]; then
source "$SCRIPT_DIR/lib/common.sh"
else
echo "ERROR: Common library not found at $SCRIPT_DIR/lib/common.sh" >&2
exit 1
fi
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
info(){ printf '%b\n' "${COLOR_CYAN}$*${COLOR_RESET}"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
# Default values (can be overridden by environment or command line)
BACKUP_DIR="${BACKUP_DIR:-}"
ACCOUNT_IDS="${ACCOUNT_IDS:-}"
CHAR_GUIDS="${CHAR_GUIDS:-}"
MYSQL_PW="${MYSQL_ROOT_PASSWORD:-}"
MYSQL_CONTAINER="${MYSQL_CONTAINER:-ac-mysql}"
AUTH_DB="${AUTH_DB:-acore_auth}"
CHARACTERS_DB="${CHARACTERS_DB:-acore_characters}"
MYSQL_PW="azerothcore123"
BACKUP_DIR="/nfs/containers/ac-backup"
AUTH_DB="acore_auth"
CHARACTERS_DB="acore_characters"
# Show help message
show_help() {
cat << EOF
Fix item import for backup-merged characters
# Verify parameters
[[ -d "$BACKUP_DIR" ]] || fatal "Backup directory not found: $BACKUP_DIR"
Usage:
fix-item-import.sh [OPTIONS]
Options:
--backup-dir DIR Path to backup directory (required)
--account-ids IDS Comma-separated account IDs (e.g., "451,452")
--char-guids GUIDS Comma-separated character GUIDs (e.g., "4501,4502,4503")
--mysql-password PW MySQL root password (or use MYSQL_ROOT_PASSWORD env var)
--mysql-container NAME MySQL container name (default: ac-mysql)
--auth-db NAME Auth database name (default: acore_auth)
--characters-db NAME Characters database name (default: acore_characters)
-h, --help Show this help message
Environment Variables:
BACKUP_DIR Alternative to --backup-dir
ACCOUNT_IDS Alternative to --account-ids
CHAR_GUIDS Alternative to --char-guids
MYSQL_ROOT_PASSWORD Alternative to --mysql-password
MYSQL_CONTAINER Alternative to --mysql-container
AUTH_DB Alternative to --auth-db
CHARACTERS_DB Alternative to --characters-db
Example:
fix-item-import.sh \\
--backup-dir /path/to/backup \\
--account-ids "451,452" \\
--char-guids "4501,4502,4503" \\
--mysql-password "azerothcore123"
EOF
exit 0
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--backup-dir)
BACKUP_DIR="$2"
shift 2
;;
--account-ids)
ACCOUNT_IDS="$2"
shift 2
;;
--char-guids)
CHAR_GUIDS="$2"
shift 2
;;
--mysql-password)
MYSQL_PW="$2"
shift 2
;;
--mysql-container)
MYSQL_CONTAINER="$2"
shift 2
;;
--auth-db)
AUTH_DB="$2"
shift 2
;;
--characters-db)
CHARACTERS_DB="$2"
shift 2
;;
-h|--help)
show_help
;;
*)
fatal "Unknown option: $1\nUse --help for usage information"
;;
esac
done
# Validate required parameters
if [ -z "$BACKUP_DIR" ]; then
fatal "Backup directory not specified. Use --backup-dir or set BACKUP_DIR environment variable."
fi
if [ ! -d "$BACKUP_DIR" ]; then
fatal "Backup directory not found: $BACKUP_DIR"
fi
if [ -z "$ACCOUNT_IDS" ]; then
fatal "Account IDs not specified. Use --account-ids or set ACCOUNT_IDS environment variable."
fi
if [ -z "$CHAR_GUIDS" ]; then
fatal "Character GUIDs not specified. Use --char-guids or set CHAR_GUIDS environment variable."
fi
if [ -z "$MYSQL_PW" ]; then
fatal "MySQL password not specified. Use --mysql-password or set MYSQL_ROOT_PASSWORD environment variable."
fi
# Setup temp directory
TEMP_DIR="$(mktemp -d)"
trap 'rm -rf "$TEMP_DIR"' EXIT
# MySQL connection helper
mysql_exec(){
# MySQL connection helpers (override common.sh defaults with script-specific values)
mysql_exec_local(){
local db="$1"
docker exec -i ac-mysql mysql -uroot -p"$MYSQL_PW" "$db" 2>/dev/null
docker exec -i "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" "$db" 2>/dev/null
}
mysql_query(){
mysql_query_local(){
local db="$1"
local query="$2"
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B "$db" -e "$query" 2>/dev/null
docker exec "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" -N -B "$db" -e "$query" 2>/dev/null
}
log "═══════════════════════════════════════════════════════════"
log " FIXING ITEM IMPORT FOR BACKUP-MERGED CHARACTERS"
log "═══════════════════════════════════════════════════════════"
# Find characters that were imported from the backup (accounts 451, 452)
# Find characters that were imported from the backup
log "Finding characters that need item restoration..."
IMPORTED_CHARS=$(mysql_query "$CHARACTERS_DB" "SELECT name, guid FROM characters WHERE account IN (451, 452);")
info "Looking for characters with account IDs: $ACCOUNT_IDS"
IMPORTED_CHARS=$(mysql_query_local "$CHARACTERS_DB" "SELECT name, guid FROM characters WHERE account IN ($ACCOUNT_IDS);")
if [[ -z "$IMPORTED_CHARS" ]]; then
fatal "No imported characters found (accounts 451, 452)"
fatal "No imported characters found with account IDs: $ACCOUNT_IDS"
fi
info "Found imported characters:"
@@ -60,7 +170,8 @@ echo "$IMPORTED_CHARS" | while read -r char_name char_guid; do
done
# Check current item count for these characters
CURRENT_ITEM_COUNT=$(mysql_query "$CHARACTERS_DB" "SELECT COUNT(*) FROM item_instance WHERE owner_guid IN (4501, 4502, 4503);")
info "Checking existing items for character GUIDs: $CHAR_GUIDS"
CURRENT_ITEM_COUNT=$(mysql_query_local "$CHARACTERS_DB" "SELECT COUNT(*) FROM item_instance WHERE owner_guid IN ($CHAR_GUIDS);")
info "Current items for imported characters: $CURRENT_ITEM_COUNT"
if [[ "$CURRENT_ITEM_COUNT" != "0" ]]; then
@@ -94,26 +205,26 @@ log "Creating staging database..."
STAGE_CHARS_DB="fix_stage_chars_$$"
# Drop any existing staging database
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -e "DROP DATABASE IF EXISTS $STAGE_CHARS_DB;" 2>/dev/null || true
docker exec "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" -e "DROP DATABASE IF EXISTS $STAGE_CHARS_DB;" 2>/dev/null || true
# Create staging database
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -e "CREATE DATABASE $STAGE_CHARS_DB;" 2>/dev/null
docker exec "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" -e "CREATE DATABASE $STAGE_CHARS_DB;" 2>/dev/null
# Cleanup staging database on exit
cleanup_staging(){
if [[ -n "${STAGE_CHARS_DB:-}" ]]; then
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -e "DROP DATABASE IF EXISTS $STAGE_CHARS_DB;" 2>/dev/null || true
docker exec "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" -e "DROP DATABASE IF EXISTS $STAGE_CHARS_DB;" 2>/dev/null || true
fi
}
trap 'cleanup_staging; rm -rf "$TEMP_DIR"' EXIT
# Load backup into staging database
info "Loading backup into staging database..."
sed "s/\`acore_characters\`/\`$STAGE_CHARS_DB\`/g; s/USE \`acore_characters\`;/USE \`$STAGE_CHARS_DB\`;/g" "$TEMP_DIR/characters.sql" | \
docker exec -i ac-mysql mysql -uroot -p"$MYSQL_PW" 2>/dev/null
sed "s/\`$CHARACTERS_DB\`/\`$STAGE_CHARS_DB\`/g; s/USE \`$CHARACTERS_DB\`;/USE \`$STAGE_CHARS_DB\`;/g" "$TEMP_DIR/characters.sql" | \
docker exec -i "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" 2>/dev/null
# Get current database state
CURRENT_MAX_ITEM_GUID=$(mysql_query "$CHARACTERS_DB" "SELECT COALESCE(MAX(guid), 0) FROM item_instance;")
CURRENT_MAX_ITEM_GUID=$(mysql_query_local "$CHARACTERS_DB" "SELECT COALESCE(MAX(guid), 0) FROM item_instance;")
ITEM_OFFSET=$((CURRENT_MAX_ITEM_GUID + 10000))
info "Current max item GUID: $CURRENT_MAX_ITEM_GUID"
@@ -121,22 +232,32 @@ info "Item GUID offset: +$ITEM_OFFSET"
# Create character mapping for the imported characters
log "Creating character mapping..."
mysql_exec "$STAGE_CHARS_DB" <<EOF
info "Building character GUID mapping from staging database..."
# Create mapping table dynamically based on imported characters
mysql_exec_local "$STAGE_CHARS_DB" <<EOF
CREATE TABLE character_guid_map (
old_guid INT UNSIGNED PRIMARY KEY,
new_guid INT UNSIGNED,
name VARCHAR(12)
);
EOF
# Populate mapping by matching character names from staging to current database
# This assumes character names are unique identifiers
mysql_exec_local "$STAGE_CHARS_DB" <<EOF
INSERT INTO character_guid_map (old_guid, new_guid, name)
VALUES
(1, 4501, 'Artimage'),
(2, 4502, 'Flombey'),
(3, 4503, 'Hammertime');
SELECT
s.guid as old_guid,
c.guid as new_guid,
c.name
FROM $STAGE_CHARS_DB.characters s
JOIN $CHARACTERS_DB.characters c ON s.name = c.name
WHERE c.account IN ($ACCOUNT_IDS);
EOF
# Create item GUID mapping
mysql_exec "$STAGE_CHARS_DB" <<EOF
mysql_exec_local "$STAGE_CHARS_DB" <<EOF
CREATE TABLE item_guid_map (
old_guid INT UNSIGNED PRIMARY KEY,
new_guid INT UNSIGNED,
@@ -153,7 +274,7 @@ INNER JOIN character_guid_map cm ON i.owner_guid = cm.old_guid;
EOF
# Check how many items will be imported
ITEMS_TO_IMPORT=$(mysql_query "$STAGE_CHARS_DB" "SELECT COUNT(*) FROM item_guid_map;")
ITEMS_TO_IMPORT=$(mysql_query_local "$STAGE_CHARS_DB" "SELECT COUNT(*) FROM item_guid_map;")
info "Items to import: $ITEMS_TO_IMPORT"
if [[ "$ITEMS_TO_IMPORT" == "0" ]]; then
@@ -195,7 +316,7 @@ EOSQL
)
ITEM_SQL_EXPANDED=$(echo "$ITEM_SQL" | sed "s/STAGE_CHARS_DB/$STAGE_CHARS_DB/g")
ITEM_RESULT=$(echo "$ITEM_SQL_EXPANDED" | docker exec -i ac-mysql mysql -uroot -p"$MYSQL_PW" "$CHARACTERS_DB" 2>&1)
ITEM_RESULT=$(echo "$ITEM_SQL_EXPANDED" | docker exec -i "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" "$CHARACTERS_DB" 2>&1)
if echo "$ITEM_RESULT" | grep -q "ERROR"; then
err "Item import failed:"
echo "$ITEM_RESULT" | grep "ERROR" >&2
@@ -217,7 +338,7 @@ EOSQL
)
INV_SQL_EXPANDED=$(echo "$INV_SQL" | sed "s/STAGE_CHARS_DB/$STAGE_CHARS_DB/g")
INV_RESULT=$(echo "$INV_SQL_EXPANDED" | docker exec -i ac-mysql mysql -uroot -p"$MYSQL_PW" "$CHARACTERS_DB" 2>&1)
INV_RESULT=$(echo "$INV_SQL_EXPANDED" | docker exec -i "$MYSQL_CONTAINER" mysql -uroot -p"$MYSQL_PW" "$CHARACTERS_DB" 2>&1)
if echo "$INV_RESULT" | grep -q "ERROR"; then
err "Inventory import failed:"
echo "$INV_RESULT" | grep "ERROR" >&2
@@ -225,8 +346,8 @@ if echo "$INV_RESULT" | grep -q "ERROR"; then
fi
# Report counts
ITEMS_IMPORTED=$(mysql_query "$CHARACTERS_DB" "SELECT COUNT(*) FROM item_instance WHERE owner_guid IN (4501, 4502, 4503);")
INV_IMPORTED=$(mysql_query "$CHARACTERS_DB" "SELECT COUNT(*) FROM character_inventory WHERE guid IN (4501, 4502, 4503);")
ITEMS_IMPORTED=$(mysql_query_local "$CHARACTERS_DB" "SELECT COUNT(*) FROM item_instance WHERE owner_guid IN ($CHAR_GUIDS);")
INV_IMPORTED=$(mysql_query_local "$CHARACTERS_DB" "SELECT COUNT(*) FROM character_inventory WHERE guid IN ($CHAR_GUIDS);")
info "Items imported: $ITEMS_IMPORTED"
info "Inventory slots imported: $INV_IMPORTED"

423
scripts/bash/lib/common.sh Normal file
View File

@@ -0,0 +1,423 @@
#!/bin/bash
#
# Common utilities library for AzerothCore RealmMaster scripts
# This library provides shared functions for environment variable reading,
# logging, error handling, and other common operations.
#
# Usage: source /path/to/scripts/bash/lib/common.sh
# Prevent multiple sourcing
if [ -n "${_COMMON_LIB_LOADED:-}" ]; then
return 0
fi
_COMMON_LIB_LOADED=1
# =============================================================================
# COLOR DEFINITIONS (Standardized across all scripts)
# =============================================================================
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Legacy color names for backward compatibility
COLOR_BLUE="$BLUE"
COLOR_GREEN="$GREEN"
COLOR_YELLOW="$YELLOW"
COLOR_RED="$RED"
COLOR_CYAN="$CYAN"
COLOR_RESET="$NC"
# =============================================================================
# LOGGING FUNCTIONS (Standardized with emoji)
# =============================================================================
# Log informational messages (blue with info icon)
info() {
printf '%b\n' "${BLUE} $*${NC}"
}
# Log success messages (green with checkmark)
ok() {
printf '%b\n' "${GREEN}$*${NC}"
}
# Log general messages (green, no icon - for clean output)
log() {
printf '%b\n' "${GREEN}$*${NC}"
}
# Log warning messages (yellow with warning icon)
warn() {
printf '%b\n' "${YELLOW}⚠️ $*${NC}"
}
# Log error messages (red with error icon, continues execution)
err() {
printf '%b\n' "${RED}$*${NC}" >&2
}
# Log fatal error and exit (red with error icon, exits with code 1)
fatal() {
printf '%b\n' "${RED}$*${NC}" >&2
exit 1
}
# =============================================================================
# ENVIRONMENT VARIABLE READING
# =============================================================================
# Read environment variable from .env file with fallback to default
# Handles various quote styles, comments, and whitespace
#
# Usage:
# read_env KEY [DEFAULT_VALUE]
# value=$(read_env "MYSQL_PASSWORD" "default_password")
#
# Features:
# - Reads from file specified by $ENV_PATH (or $DEFAULT_ENV_PATH)
# - Strips leading/trailing whitespace
# - Removes inline comments (everything after #)
# - Handles double quotes, single quotes, and unquoted values
# - Returns default value if key not found
# - Returns value from environment variable if already set
#
read_env() {
local key="$1"
local default="${2:-}"
local value=""
# Check if variable is already set in environment (takes precedence)
if [ -n "${!key:-}" ]; then
echo "${!key}"
return 0
fi
# Determine which .env file to use
local env_file="${ENV_PATH:-${DEFAULT_ENV_PATH:-}}"
# Read from .env file if it exists
if [ -f "$env_file" ]; then
# Extract value using grep and cut, handling various formats
value="$(grep -E "^${key}=" "$env_file" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
# Remove inline comments (everything after # that's not inside quotes)
# This is a simplified approach - doesn't handle quotes perfectly but works for most cases
value="$(echo "$value" | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
# Strip quotes if present
if [[ "$value" == \"*\" && "$value" == *\" ]]; then
# Double quotes
value="${value:1:-1}"
elif [[ "$value" == \'*\' && "$value" == *\' ]]; then
# Single quotes
value="${value:1:-1}"
fi
fi
# Use default if still empty
if [ -z "${value:-}" ]; then
value="$default"
fi
printf '%s\n' "${value}"
}
# Read value from .env.template file (used during setup)
# This is similar to read_env but specifically for template files
#
# Usage:
# get_template_value KEY [TEMPLATE_FILE]
# value=$(get_template_value "MYSQL_PASSWORD")
#
get_template_value() {
local key="$1"
local template_file="${2:-${TEMPLATE_FILE:-${TEMPLATE_PATH:-.env.template}}}"
if [ ! -f "$template_file" ]; then
fatal "Template file not found: $template_file"
fi
# Extract value, handling variable expansion syntax like ${VAR:-default}
local value
local raw_line
raw_line=$(grep "^${key}=" "$template_file" 2>/dev/null | head -1)
if [ -z "$raw_line" ]; then
err "Key '$key' not found in template: $template_file"
return 1
fi
value="${raw_line#*=}"
value=$(echo "$value" | sed 's/^"\(.*\)"$/\1/')
# Handle ${VAR:-default} syntax by extracting the default value
if [[ "$value" =~ ^\$\{[^}]*:-([^}]*)\}$ ]]; then
value="${BASH_REMATCH[1]}"
fi
echo "$value"
}
# Update or add environment variable in .env file
# Creates file if it doesn't exist
#
# Usage:
# update_env_value KEY VALUE [ENV_FILE]
# update_env_value "MYSQL_PASSWORD" "new_password"
#
update_env_value() {
local key="$1"
local value="$2"
local env_file="${3:-${ENV_PATH:-${DEFAULT_ENV_PATH:-.env}}}"
[ -n "$env_file" ] || return 0
# Create file if it doesn't exist
if [ ! -f "$env_file" ]; then
printf '%s=%s\n' "$key" "$value" >> "$env_file"
return 0
fi
# Update existing or append new
if grep -q "^${key}=" "$env_file"; then
# Use platform-appropriate sed in-place editing
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' "s|^${key}=.*|${key}=${value}|" "$env_file"
else
sed -i "s|^${key}=.*|${key}=${value}|" "$env_file"
fi
else
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
fi
}
# =============================================================================
# VALIDATION & REQUIREMENTS
# =============================================================================
# Require command to be available in PATH, exit with error if not found
#
# Usage:
# require_cmd docker
# require_cmd python3 jq git
#
require_cmd() {
for cmd in "$@"; do
if ! command -v "$cmd" >/dev/null 2>&1; then
fatal "Missing required command: $cmd"
fi
done
}
# Check if command exists (returns 0 if exists, 1 if not)
#
# Usage:
# if has_cmd docker; then
# echo "Docker is available"
# fi
#
has_cmd() {
command -v "$1" >/dev/null 2>&1
}
# =============================================================================
# MYSQL/DATABASE HELPERS
# =============================================================================
# Execute MySQL command in Docker container
# Reads MYSQL_PW and container name from environment
#
# Usage:
# mysql_exec DATABASE_NAME < script.sql
# echo "SELECT 1;" | mysql_exec acore_auth
#
mysql_exec() {
local db="$1"
local mysql_pw="${MYSQL_ROOT_PASSWORD:-${MYSQL_PW:-azerothcore}}"
local container="${MYSQL_CONTAINER:-ac-mysql}"
docker exec -i "$container" mysql -uroot -p"$mysql_pw" "$db"
}
# Execute MySQL query and return result
# Outputs in non-tabular format suitable for parsing
#
# Usage:
# count=$(mysql_query "acore_characters" "SELECT COUNT(*) FROM characters")
#
mysql_query() {
local db="$1"
local query="$2"
local mysql_pw="${MYSQL_ROOT_PASSWORD:-${MYSQL_PW:-azerothcore}}"
local container="${MYSQL_CONTAINER:-ac-mysql}"
docker exec "$container" mysql -uroot -p"$mysql_pw" -N -B "$db" -e "$query" 2>/dev/null
}
# Check if MySQL container is healthy and accepting connections
#
# Usage:
# if mysql_is_ready; then
# echo "MySQL is ready"
# fi
#
mysql_is_ready() {
local container="${MYSQL_CONTAINER:-ac-mysql}"
local mysql_pw="${MYSQL_ROOT_PASSWORD:-${MYSQL_PW:-azerothcore}}"
docker exec "$container" mysqladmin ping -uroot -p"$mysql_pw" >/dev/null 2>&1
}
# Wait for MySQL to be ready with timeout
#
# Usage:
# mysql_wait_ready 60 # Wait up to 60 seconds
#
mysql_wait_ready() {
local timeout="${1:-30}"
local elapsed=0
info "Waiting for MySQL to be ready..."
while [ $elapsed -lt $timeout ]; do
if mysql_is_ready; then
ok "MySQL is ready"
return 0
fi
sleep 2
elapsed=$((elapsed + 2))
done
err "MySQL did not become ready within ${timeout}s"
return 1
}
# =============================================================================
# FILE & DIRECTORY HELPERS
# =============================================================================
# Ensure directory exists and is writable
# Creates directory if needed and sets permissions
#
# Usage:
# ensure_writable_dir /path/to/directory
#
ensure_writable_dir() {
local dir="$1"
if [ ! -d "$dir" ]; then
mkdir -p "$dir" 2>/dev/null || {
err "Failed to create directory: $dir"
return 1
}
fi
if [ ! -w "$dir" ]; then
chmod u+w "$dir" 2>/dev/null || {
err "Directory not writable: $dir"
return 1
}
fi
return 0
}
# Create backup of file before modification
#
# Usage:
# backup_file /path/to/important.conf
# # Creates /path/to/important.conf.backup.TIMESTAMP
#
backup_file() {
local file="$1"
if [ ! -f "$file" ]; then
warn "File does not exist, skipping backup: $file"
return 0
fi
local backup="${file}.backup.$(date +%Y%m%d_%H%M%S)"
cp "$file" "$backup" || {
err "Failed to create backup: $backup"
return 1
}
info "Created backup: $backup"
return 0
}
# =============================================================================
# GIT HELPERS
# =============================================================================
# Configure git identity if not already set
#
# Usage:
# setup_git_config [USERNAME] [EMAIL]
#
setup_git_config() {
local git_user="${1:-${GIT_USERNAME:-AzerothCore RealmMaster}}"
local git_email="${2:-${GIT_EMAIL:-noreply@azerothcore.org}}"
if ! git config --global user.name >/dev/null 2>&1; then
info "Configuring git identity: $git_user <$git_email>"
git config --global user.name "$git_user" || true
git config --global user.email "$git_email" || true
fi
}
# =============================================================================
# ERROR HANDLING UTILITIES
# =============================================================================
# Retry command with exponential backoff
#
# Usage:
# retry 5 docker pull myimage:latest
# retry 3 2 mysql_query "acore_auth" "SELECT 1" # 3 retries with 2s initial delay
#
retry() {
local max_attempts="$1"
shift
local delay="${1:-1}"
# Check if delay is a number, if not treat it as part of the command
if ! [[ "$delay" =~ ^[0-9]+$ ]]; then
delay=1
else
shift
fi
local attempt=1
local exit_code=0
while [ $attempt -le "$max_attempts" ]; do
if "$@"; then
return 0
fi
exit_code=$?
if [ $attempt -lt "$max_attempts" ]; then
warn "Command failed (attempt $attempt/$max_attempts), retrying in ${delay}s..."
sleep "$delay"
delay=$((delay * 2)) # Exponential backoff
fi
attempt=$((attempt + 1))
done
err "Command failed after $max_attempts attempts"
return $exit_code
}
# =============================================================================
# INITIALIZATION
# =============================================================================
# Library loaded successfully
# Scripts can check for $_COMMON_LIB_LOADED to verify library is loaded

View File

@@ -7,52 +7,36 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Source common library for shared functions
if [ -f "$SCRIPT_DIR/lib/common.sh" ]; then
source "$SCRIPT_DIR/lib/common.sh"
else
echo "ERROR: Common library not found at $SCRIPT_DIR/lib/common.sh" >&2
exit 1
fi
# Source project name helper
source "$PROJECT_ROOT/scripts/bash/project_name.sh"
# Module-specific configuration
MODULE_HELPER="$PROJECT_ROOT/scripts/python/modules.py"
DEFAULT_ENV_PATH="$PROJECT_ROOT/.env"
ENV_PATH="${MODULES_ENV_PATH:-$DEFAULT_ENV_PATH}"
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
source "$PROJECT_ROOT/scripts/bash/project_name.sh"
# Default project name (read from .env or template)
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_PATH" "$TEMPLATE_FILE")"
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
# Module-specific state
PLAYERBOTS_DB_UPDATE_LOGGED=0
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; exit 1; }
# Declare module metadata arrays globally at script level
declare -A MODULE_NAME MODULE_REPO MODULE_REF MODULE_TYPE MODULE_ENABLED MODULE_NEEDS_BUILD MODULE_BLOCKED MODULE_POST_INSTALL MODULE_REQUIRES MODULE_CONFIG_CLEANUP MODULE_NOTES MODULE_STATUS MODULE_BLOCK_REASON
declare -a MODULE_KEYS
read_env_value(){
local key="$1" default="${2:-}" value="${!key:-}"
if [ -n "$value" ]; then
echo "$value"
return
fi
if [ -f "$ENV_PATH" ]; then
value="$(grep -E "^${key}=" "$ENV_PATH" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
value="$(echo "$value" | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
if [[ "$value" == \"*\" && "$value" == *\" ]]; then
value="${value:1:-1}"
elif [[ "$value" == \'*\' && "$value" == *\' ]]; then
value="${value:1:-1}"
fi
fi
if [ -z "${value:-}" ]; then
value="$default"
fi
printf '%s\n' "${value}"
}
ensure_python(){
if ! command -v python3 >/dev/null 2>&1; then
err "python3 is required but not installed in PATH"
fi
}
# Ensure Python is available
require_cmd python3
resolve_manifest_path(){
if [ -n "${MODULES_MANIFEST_PATH:-}" ] && [ -f "${MODULES_MANIFEST_PATH}" ]; then
@@ -567,10 +551,10 @@ track_module_state(){
}
main(){
ensure_python
# Python is already checked at script start via require_cmd
if [ "${MODULES_LOCAL_RUN:-0}" != "1" ]; then
cd /modules || err "Modules directory /modules not found"
cd /modules || fatal "Modules directory /modules not found"
fi
MODULES_ROOT="$(pwd)"

View File

@@ -39,10 +39,6 @@ ensure_host_writable(){
fi
}
seed_sql_ledger_if_needed(){
: # No-op; ledger removed
}
sync_local_staging(){
local src_root="$LOCAL_STORAGE_PATH"
local dest_root="$STORAGE_PATH"

View File

@@ -31,54 +31,127 @@ def parse_bool(value: str) -> bool:
def load_env_file(env_path: Path) -> Dict[str, str]:
"""
Load environment variables from .env file.
Args:
env_path: Path to .env file
Returns:
Dictionary of environment variable key-value pairs
Note:
Returns empty dict if file doesn't exist (not an error).
Handles quotes, comments, and export statements.
"""
if not env_path.exists():
return {}
env: Dict[str, str] = {}
for raw_line in env_path.read_text(encoding="utf-8").splitlines():
try:
content = env_path.read_text(encoding="utf-8")
except Exception as e:
print(f"Warning: Failed to read environment file {env_path}: {e}", file=sys.stderr)
return {}
for line_num, raw_line in enumerate(content.splitlines(), start=1):
line = raw_line.strip()
# Skip empty lines and comments
if not line or line.startswith("#"):
continue
# Remove 'export' prefix if present
if line.startswith("export "):
line = line[len("export ") :].strip()
# Skip lines without '='
if "=" not in line:
continue
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
elif value.startswith("'") and value.endswith("'"):
value = value[1:-1]
env[key] = value
try:
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
# Strip quotes
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
elif value.startswith("'") and value.endswith("'"):
value = value[1:-1]
env[key] = value
except Exception as e:
print(
f"Warning: Failed to parse line {line_num} in {env_path}: {raw_line}\n"
f" Error: {e}",
file=sys.stderr
)
continue
return env
def load_manifest(manifest_path: Path) -> List[Dict[str, object]]:
"""
Load and validate module manifest from JSON file.
Args:
manifest_path: Path to module-manifest.json file
Returns:
List of validated module dictionaries
Raises:
FileNotFoundError: If manifest file doesn't exist
json.JSONDecodeError: If manifest is not valid JSON
ValueError: If manifest structure is invalid
"""
if not manifest_path.exists():
raise FileNotFoundError(f"Manifest file not found: {manifest_path}")
with manifest_path.open("r", encoding="utf-8") as fh:
manifest = json.load(fh)
try:
with manifest_path.open("r", encoding="utf-8") as fh:
manifest = json.load(fh)
except json.JSONDecodeError as e:
raise ValueError(
f"Invalid JSON in manifest file {manifest_path}:\n"
f" Line {e.lineno}, Column {e.colno}: {e.msg}"
) from e
except Exception as e:
raise ValueError(f"Failed to read manifest file {manifest_path}: {e}") from e
modules = manifest.get("modules")
if not isinstance(modules, list):
raise ValueError("Manifest must define a top-level 'modules' array")
validated: List[Dict[str, object]] = []
seen_keys: set[str] = set()
for entry in modules:
for idx, entry in enumerate(modules):
if not isinstance(entry, dict):
raise ValueError("Each manifest entry must be an object")
raise ValueError(f"Manifest entry at index {idx} must be an object")
key = entry.get("key")
name = entry.get("name")
repo = entry.get("repo")
if not key or not isinstance(key, str):
raise ValueError("Manifest entry missing 'key'")
raise ValueError(f"Manifest entry at index {idx} missing 'key'")
if key in seen_keys:
raise ValueError(f"Duplicate manifest key detected: {key}")
raise ValueError(f"Duplicate manifest key detected: '{key}' (at index {idx})")
seen_keys.add(key)
if not name or not isinstance(name, str):
raise ValueError(f"Manifest entry {key} missing 'name'")
raise ValueError(f"Manifest entry '{key}' missing 'name' field")
if not repo or not isinstance(repo, str):
raise ValueError(f"Manifest entry {key} missing 'repo'")
raise ValueError(f"Manifest entry '{key}' missing 'repo' field")
validated.append(entry)
return validated

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env python3
"""Generate a categorized list of GitHub modules missing from the manifest.
The script reuses the discovery logic from ``update_module_manifest.py`` to
fetch repositories by topic, filters out entries already tracked in
``config/module-manifest.json`` and writes the remainder (including type,
category, and inferred dependency hints) to a JSON file.
"""
from __future__ import annotations
import argparse
import json
import os
import sys
from pathlib import Path
from typing import Dict, Iterable, List, Sequence, Tuple
from update_module_manifest import ( # type: ignore
CATEGORY_BY_TYPE,
DEFAULT_TOPICS,
GitHubClient,
collect_repositories,
load_manifest,
normalize_repo_url,
repo_name_to_key,
)
# heuristics used to surface potential dependency hints
DEPENDENCY_KEYWORDS: Tuple[Tuple[str, str], ...] = (
("playerbot", "MODULE_PLAYERBOTS"),
("ah-bot", "MODULE_PLAYERBOTS"),
("eluna", "MODULE_ELUNA"),
)
# keywords that help categorize entries that should probably stay hidden by default
SUPPRESSION_KEYWORDS: Tuple[Tuple[str, str], ...] = (
("virtual machine", "vm"),
(" vm ", "vm"),
(" docker", "docker"),
("container", "docker"),
("vagrant", "vagrant"),
("ansible", "automation"),
("terraform", "automation"),
("client", "client-distribution"),
("launcher", "client-distribution"),
)
def parse_args(argv: Sequence[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--manifest",
default="config/module-manifest.json",
help="Path to module manifest JSON (default: %(default)s)",
)
parser.add_argument(
"--output",
default="missing-modules.json",
help="Path to write the missing-module report JSON (default: %(default)s)",
)
parser.add_argument(
"--topic",
action="append",
default=[],
dest="topics",
help="GitHub topic (or '+' expression) to scan (defaults to built-in list).",
)
parser.add_argument(
"--max-pages",
type=int,
default=10,
help="Maximum pages (x100 results) to fetch per topic (default: %(default)s)",
)
parser.add_argument(
"--token",
help="GitHub API token (defaults to $GITHUB_TOKEN or $GITHUB_API_TOKEN)",
)
parser.add_argument(
"--log",
action="store_true",
help="Print verbose progress information",
)
return parser.parse_args(argv)
def implied_dependencies(module_type: str, text: str) -> List[str]:
deps: List[str] = []
if module_type == "lua":
deps.append("MODULE_ELUNA")
normalized = text.lower()
for keyword, dep in DEPENDENCY_KEYWORDS:
if keyword in normalized and dep not in deps:
deps.append(dep)
return deps
def suppression_flags(category: str, text: str) -> List[str]:
flags: List[str] = []
if category == "tooling":
flags.append("tooling")
normalized = text.lower()
for keyword, flag in SUPPRESSION_KEYWORDS:
if keyword in normalized and flag not in flags:
flags.append(flag)
return flags
def make_missing_entries(
manifest_modules: List[dict],
repos: Iterable,
) -> List[dict]:
by_key: Dict[str, dict] = {module.get("key"): module for module in manifest_modules if module.get("key")}
by_repo: Dict[str, dict] = {
normalize_repo_url(str(module.get("repo", ""))): module
for module in manifest_modules
if module.get("repo")
}
missing: List[dict] = []
for record in repos:
repo = record.data
repo_url = normalize_repo_url(repo.get("clone_url") or repo.get("html_url") or "")
existing = by_repo.get(repo_url)
key = repo_name_to_key(repo.get("name", ""))
if not existing:
existing = by_key.get(key)
if existing:
continue
module_type = record.module_type
category = CATEGORY_BY_TYPE.get(module_type, "uncategorized")
description = repo.get("description") or ""
combined_text = " ".join(
filter(
None,
[
repo.get("full_name"),
description,
" ".join(repo.get("topics") or []),
],
)
)
entry = {
"key": key,
"repo_name": repo.get("full_name"),
"topic": record.topic_expr,
"repo_url": repo.get("html_url") or repo.get("clone_url"),
"description": description,
"topics": repo.get("topics") or [],
"type": module_type,
"category": category,
"implied_dependencies": implied_dependencies(module_type, combined_text),
"flags": suppression_flags(category, combined_text),
}
missing.append(entry)
missing.sort(key=lambda item: item["key"])
return missing
def main(argv: Sequence[str]) -> int:
args = parse_args(argv)
topics = args.topics or DEFAULT_TOPICS
token = args.token or os.environ.get("GITHUB_TOKEN") or os.environ.get("GITHUB_API_TOKEN")
if not token:
print(
"Warning: no GitHub token provided, falling back to anonymous rate limit",
file=sys.stderr,
)
client = GitHubClient(token, verbose=args.log)
manifest = load_manifest(args.manifest)
repos = collect_repositories(client, topics, args.max_pages)
missing = make_missing_entries(manifest.get("modules", []), repos)
output_path = Path(args.output)
output_path.write_text(json.dumps(missing, indent=2))
print(f"Wrote {len(missing)} entries to {output_path}")
return 0
if __name__ == "__main__":
raise SystemExit(main(sys.argv[1:]))