feat: upgrade

This commit is contained in:
uprightbass360
2025-11-20 02:11:24 -05:00
parent 9deff01441
commit 5f7bdcb7e7
25 changed files with 1502 additions and 777 deletions

View File

@@ -100,7 +100,14 @@ else
# Skip core config files (already handled)
case "$filename" in
authserver.conf|worldserver.conf|dbimport.conf)
authserver.conf|worldserver.conf)
continue
;;
dbimport.conf)
if [ ! -f "$conffile" ] || grep -q "Updates.ExceptionShutdownDelay" "$conffile"; then
echo " 📝 Creating/refreshing $filename from $(basename "$file")"
cp "$file" "$conffile"
fi
continue
;;
esac
@@ -140,6 +147,14 @@ else
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/worldserver.conf || true
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/worldserver.conf || true
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/worldserver.conf || true
if [ -f "/azerothcore/config/dbimport.conf" ]; then
sed -i "s|^LoginDatabaseInfo *=.*|LoginDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^WorldDatabaseInfo *=.*|WorldDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^CharacterDatabaseInfo *=.*|CharacterDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^PlayerbotsDatabaseInfo *=.*|PlayerbotsDatabaseInfo = \"${MYSQL_HOST};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_PLAYERBOTS_NAME}\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^MySQLExecutable *=.*|MySQLExecutable = \"/usr/bin/mysql\"|" /azerothcore/config/dbimport.conf || true
sed -i "s|^TempDir *=.*|TempDir = \"/azerothcore/env/dist/temp\"|" /azerothcore/config/dbimport.conf || true
fi
update_playerbots_conf /azerothcore/config/modules/playerbots.conf
update_playerbots_conf /azerothcore/config/modules/playerbots.conf.dist

View File

@@ -165,14 +165,6 @@ EOF
EOF
fi
# Capture module SQL ledger snapshot if available
local ledger_src="/modules-meta/module-sql-ledger.txt"
if [ -f "$ledger_src" ]; then
cp "$ledger_src" "$target_dir/module-sql-ledger.txt"
else
log " Module SQL ledger not found (modules/meta missing); snapshot not included in this backup"
fi
# Create completion marker to indicate backup is finished
touch "$target_dir/.backup_complete"

178
scripts/bash/db-guard.sh Normal file
View File

@@ -0,0 +1,178 @@
#!/bin/bash
# Continuously ensure the MySQL runtime tmpfs contains the restored data.
# If the runtime tables are missing (for example after a host reboot),
# automatically rerun db-import-conditional to hydrate from backups.
set -euo pipefail
log(){ echo "🛡️ [db-guard] $*"; }
warn(){ echo "⚠️ [db-guard] $*" >&2; }
err(){ echo "❌ [db-guard] $*" >&2; }
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
MYSQL_PORT="${MYSQL_PORT:-3306}"
MYSQL_USER="${MYSQL_USER:-root}"
MYSQL_PASS="${MYSQL_ROOT_PASSWORD:-root}"
IMPORT_SCRIPT="${DB_GUARD_IMPORT_SCRIPT:-/tmp/db-import-conditional.sh}"
RECHECK_SECONDS="${DB_GUARD_RECHECK_SECONDS:-120}"
RETRY_SECONDS="${DB_GUARD_RETRY_SECONDS:-10}"
WAIT_ATTEMPTS="${DB_GUARD_WAIT_ATTEMPTS:-60}"
VERIFY_INTERVAL="${DB_GUARD_VERIFY_INTERVAL_SECONDS:-0}"
VERIFY_FILE="${DB_GUARD_VERIFY_FILE:-/tmp/db-guard.last-verify}"
HEALTH_FILE="${DB_GUARD_HEALTH_FILE:-/tmp/db-guard.ready}"
STATUS_FILE="${DB_GUARD_STATUS_FILE:-/tmp/db-guard.status}"
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
declare -a DB_SCHEMAS=()
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
value="${!var:-}"
if [ -n "$value" ]; then
DB_SCHEMAS+=("$value")
fi
done
if [ -n "${DB_GUARD_EXTRA_DATABASES:-}" ]; then
IFS=',' read -ra extra <<< "${DB_GUARD_EXTRA_DATABASES}"
for db in "${extra[@]}"; do
if [ -n "${db// }" ]; then
DB_SCHEMAS+=("${db// }")
fi
done
fi
if [ "${#DB_SCHEMAS[@]}" -eq 0 ]; then
DB_SCHEMAS=(acore_auth acore_world acore_characters)
fi
SCHEMA_LIST_SQL="$(printf "'%s'," "${DB_SCHEMAS[@]}")"
SCHEMA_LIST_SQL="${SCHEMA_LIST_SQL%,}"
mark_ready(){
mkdir -p "$(dirname "$HEALTH_FILE")" 2>/dev/null || true
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$STATUS_FILE" >/dev/null
: > "$ERROR_FILE"
printf '%s\n' "$*" > "$HEALTH_FILE"
}
mark_unhealthy(){
printf '%s\t%s\n' "$(date -Iseconds)" "$*" | tee "$ERROR_FILE" >&2
rm -f "$HEALTH_FILE" 2>/dev/null || true
}
wait_for_mysql(){
local attempts="$WAIT_ATTEMPTS"
while [ "$attempts" -gt 0 ]; do
if MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -e "SELECT 1" >/dev/null 2>&1; then
return 0
fi
attempts=$((attempts - 1))
sleep "$RETRY_SECONDS"
done
return 1
}
table_count(){
local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema IN (${SCHEMA_LIST_SQL});"
MYSQL_PWD="$MYSQL_PASS" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -N -B -e "$query"
}
rehydrate(){
if [ ! -x "$IMPORT_SCRIPT" ]; then
err "Import script not found at ${IMPORT_SCRIPT}"
return 1
fi
"$IMPORT_SCRIPT"
}
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
sync_host_stage_files(){
local host_root="${MODULE_SQL_HOST_PATH}"
[ -d "$host_root" ] || return 0
for dir in db_world db_characters db_auth db_playerbots; do
local src="$host_root/$dir"
local dest="/azerothcore/data/sql/updates/$dir"
mkdir -p "$dest"
rm -f "$dest"/MODULE_*.sql >/dev/null 2>&1 || true
if [ -d "$src" ]; then
cp -a "$src"/MODULE_*.sql "$dest"/ >/dev/null 2>&1 || true
fi
done
}
dbimport_verify(){
local bin_dir="/azerothcore/env/dist/bin"
ensure_dbimport_conf
sync_host_stage_files
if [ ! -x "${bin_dir}/dbimport" ]; then
warn "dbimport binary not found at ${bin_dir}/dbimport"
return 1
fi
log "Running dbimport verification sweep..."
if (cd "$bin_dir" && ./dbimport); then
log "dbimport verification finished successfully"
return 0
fi
warn "dbimport verification reported issues - review dbimport logs"
return 1
}
maybe_run_verification(){
if [ "${VERIFY_INTERVAL}" -lt 0 ]; then
return 0
fi
local now last_run=0
now="$(date +%s)"
if [ -f "$VERIFY_FILE" ]; then
last_run="$(cat "$VERIFY_FILE" 2>/dev/null || echo 0)"
if [ "$VERIFY_INTERVAL" -eq 0 ]; then
return 0
fi
if [ $((now - last_run)) -lt "${VERIFY_INTERVAL}" ]; then
return 0
fi
fi
if dbimport_verify; then
echo "$now" > "$VERIFY_FILE"
else
warn "dbimport verification failed; will retry in ${VERIFY_INTERVAL}s"
fi
}
log "Watching MySQL (${MYSQL_HOST}:${MYSQL_PORT}) for ${#DB_SCHEMAS[@]} schemas: ${DB_SCHEMAS[*]}"
while true; do
if ! wait_for_mysql; then
mark_unhealthy "MySQL is unreachable after ${WAIT_ATTEMPTS} attempts"
sleep "$RETRY_SECONDS"
continue
fi
count="$(table_count 2>/dev/null || echo "")"
if [ -n "$count" ]; then
if [ "$count" -gt 0 ] 2>/dev/null; then
mark_ready "Detected ${count} tables across tracked schemas"
maybe_run_verification
sleep "$RECHECK_SECONDS"
continue
fi
fi
warn "No tables detected across ${DB_SCHEMAS[*]}; running rehydrate workflow..."
if rehydrate; then
log "Rehydrate complete - rechecking tables"
sleep 5
continue
fi
mark_unhealthy "Rehydrate workflow failed - retrying in ${RETRY_SECONDS}s"
sleep "$RETRY_SECONDS"
done

View File

@@ -63,6 +63,33 @@ verify_databases_populated() {
return 1
}
wait_for_mysql(){
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
local mysql_port="${MYSQL_PORT:-3306}"
local mysql_user="${MYSQL_USER:-root}"
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
local max_attempts=30
local delay=2
while [ $max_attempts -gt 0 ]; do
if MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -e "SELECT 1" >/dev/null 2>&1; then
return 0
fi
max_attempts=$((max_attempts - 1))
sleep "$delay"
done
echo "❌ Unable to connect to MySQL at ${mysql_host}:${mysql_port} after multiple attempts"
return 1
}
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
case "${1:-}" in
-h|--help)
print_help
@@ -79,6 +106,11 @@ esac
echo "🔧 Conditional AzerothCore Database Import"
echo "========================================"
if ! wait_for_mysql; then
echo "❌ MySQL service is unavailable; aborting database import"
exit 1
fi
# Restoration status markers - use writable location
RESTORE_STATUS_DIR="/var/lib/mysql-persistent"
MARKER_STATUS_DIR="/tmp"
@@ -325,24 +357,7 @@ if [ -n "$backup_path" ]; then
return 0
fi
# Create dbimport config for verification
echo "📝 Creating dbimport configuration for verification..."
mkdir -p /azerothcore/env/dist/etc
TEMP_DIR="/azerothcore/env/dist/temp"
mkdir -p "$TEMP_DIR"
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
Updates.EnableDatabases = 15
Updates.AutoSetup = 1
TempDir = "${TEMP_DIR}"
MySQLExecutable = "${MYSQL_EXECUTABLE}"
Updates.AllowedModules = "all"
SourceDirectory = "/azerothcore"
EOF
ensure_dbimport_conf
cd /azerothcore/env/dist/bin
echo "🔄 Running dbimport to apply any missing updates..."
@@ -416,30 +431,7 @@ CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COL
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
echo "✅ Fresh databases created - proceeding with schema import"
echo "📝 Creating dbimport configuration..."
mkdir -p /azerothcore/env/dist/etc
TEMP_DIR="/azerothcore/env/dist/temp"
mkdir -p "$TEMP_DIR"
MYSQL_EXECUTABLE="$(command -v mysql || echo '/usr/bin/mysql')"
cat > /azerothcore/env/dist/etc/dbimport.conf <<EOF
LoginDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
WorldDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
CharacterDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
PlayerbotsDatabaseInfo = "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};acore_playerbots"
Updates.EnableDatabases = 15
Updates.AutoSetup = 1
TempDir = "${TEMP_DIR}"
MySQLExecutable = "${MYSQL_EXECUTABLE}"
Updates.AllowedModules = "all"
LoginDatabase.WorkerThreads = 1
LoginDatabase.SynchThreads = 1
WorldDatabase.WorkerThreads = 1
WorldDatabase.SynchThreads = 1
CharacterDatabase.WorkerThreads = 1
CharacterDatabase.SynchThreads = 1
SourceDirectory = "/azerothcore"
Updates.ExceptionShutdownDelay = 10000
EOF
ensure_dbimport_conf
echo "🚀 Running database import..."
cd /azerothcore/env/dist/bin

View File

@@ -21,6 +21,8 @@ fi
STORAGE_PATH="${STORAGE_PATH:-./storage}"
STORAGE_PATH_LOCAL="${STORAGE_PATH_LOCAL:-./local-storage}"
BACKUP_ROOT="${STORAGE_PATH}/backups"
MYSQL_DATA_VOLUME_NAME="${MYSQL_DATA_VOLUME_NAME:-mysql-data}"
ALPINE_IMAGE="${ALPINE_IMAGE:-alpine:latest}"
shopt -s nullglob
sql_files=("$IMPORT_DIR"/*.sql "$IMPORT_DIR"/*.sql.gz)
@@ -32,7 +34,25 @@ if [ ! -d "$IMPORT_DIR" ] || [ ${#sql_files[@]} -eq 0 ]; then
fi
# Exit if backup system already has databases restored
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
has_restore_marker(){
# Prefer Docker volume marker (post-migration), fall back to legacy host path
if command -v docker >/dev/null 2>&1; then
if docker volume inspect "$MYSQL_DATA_VOLUME_NAME" >/dev/null 2>&1; then
if docker run --rm \
-v "${MYSQL_DATA_VOLUME_NAME}:/var/lib/mysql-persistent" \
"$ALPINE_IMAGE" \
sh -c 'test -f /var/lib/mysql-persistent/.restore-completed' >/dev/null 2>&1; then
return 0
fi
fi
fi
if [ -f "${STORAGE_PATH_LOCAL}/mysql-data/.restore-completed" ]; then
return 0
fi
return 1
}
if has_restore_marker; then
echo "✅ Database already restored - skipping import"
exit 0
fi

View File

@@ -75,13 +75,6 @@ for db in "${dbs[@]}"; do
echo "[manual] ✅ ${db}"
done
ledger_src="/modules-meta/module-sql-ledger.txt"
if [ -f "${ledger_src}" ]; then
cp "${ledger_src}" "${TARGET_DIR}/module-sql-ledger.txt"
else
echo "[manual] Module SQL ledger not found; snapshot not included"
fi
size="$(du -sh "${TARGET_DIR}" | cut -f1)"
cat > "${TARGET_DIR}/manifest.json" <<EOF
{

View File

@@ -0,0 +1,139 @@
#!/bin/bash
# Normalize permissions across storage/ and local-storage/ so host processes
# (and CI tools) can read/write module metadata without manual chown.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
ENV_FILE="$PROJECT_ROOT/.env"
TEMPLATE_FILE="$PROJECT_ROOT/.env.template"
usage(){
cat <<'EOF'
Usage: repair-storage-permissions.sh [options]
Ensures common storage directories are writable by the current host user.
Options:
--path <dir> Additional directory to fix (can be passed multiple times)
--silent Reduce output (only errors/warnings)
-h, --help Show this help message
EOF
}
read_env(){
local key="$1" default="$2" env_path="$ENV_FILE" value=""
if [ -f "$env_path" ]; then
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ] && [ -f "$TEMPLATE_FILE" ]; then
value="$(grep -E "^${key}=" "$TEMPLATE_FILE" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then
value="$default"
fi
printf '%s\n' "$value"
}
silent=0
declare -a extra_paths=()
while [ $# -gt 0 ]; do
case "$1" in
--path)
shift
[ $# -gt 0 ] || { echo "Missing value for --path" >&2; exit 1; }
extra_paths+=("$1")
;;
--silent)
silent=1
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
shift
done
log(){ [ "$silent" -eq 1 ] || echo "$*"; }
warn(){ echo "⚠️ $*" >&2; }
resolve_path(){
local path="$1"
if [[ "$path" != /* ]]; then
path="${path#./}"
path="$PROJECT_ROOT/$path"
fi
printf '%s\n' "$(cd "$(dirname "$path")" 2>/dev/null && pwd 2>/dev/null)/$(basename "$path")"
}
ensure_host_writable(){
local target="$1"
[ -n "$target" ] || return 0
mkdir -p "$target" 2>/dev/null || true
[ -d "$target" ] || { warn "Path not found: $target"; return 0; }
local uid gid
uid="$(id -u)"
gid="$(id -g)"
if chown -R "$uid":"$gid" "$target" 2>/dev/null; then
:
elif command -v docker >/dev/null 2>&1; then
local helper_image
helper_image="$(read_env ALPINE_IMAGE "alpine:latest")"
if ! docker run --rm -u 0:0 -v "$target":/workspace "$helper_image" \
sh -c "chown -R ${uid}:${gid} /workspace" >/dev/null 2>&1; then
warn "Failed to adjust ownership for $target"
return 1
fi
else
warn "Cannot adjust ownership for $target (docker unavailable)"
return 1
fi
chmod -R ug+rwX "$target" 2>/dev/null || true
return 0
}
STORAGE_PATH="$(read_env STORAGE_PATH "./storage")"
LOCAL_STORAGE_PATH="$(read_env STORAGE_PATH_LOCAL "./local-storage")"
declare -a targets=(
"$STORAGE_PATH"
"$STORAGE_PATH/modules"
"$STORAGE_PATH/modules/.modules-meta"
"$STORAGE_PATH/backups"
"$STORAGE_PATH/logs"
"$STORAGE_PATH/lua_scripts"
"$STORAGE_PATH/install-markers"
"$STORAGE_PATH/client-data"
"$STORAGE_PATH/config"
"$LOCAL_STORAGE_PATH"
"$LOCAL_STORAGE_PATH/modules"
"$LOCAL_STORAGE_PATH/client-data-cache"
"$LOCAL_STORAGE_PATH/source"
"$LOCAL_STORAGE_PATH/images"
)
targets+=("${extra_paths[@]}")
declare -A seen=()
for raw in "${targets[@]}"; do
[ -n "$raw" ] || continue
resolved="$(resolve_path "$raw")"
if [ -n "${seen[$resolved]:-}" ]; then
continue
fi
seen["$resolved"]=1
log "🔧 Fixing permissions for $resolved"
ensure_host_writable "$resolved"
done
log "✅ Storage permissions refreshed"

View File

@@ -1,103 +1,22 @@
#!/bin/bash
# Refresh the module SQL ledger after a database restore so the runtime staging
# flow knows exactly which files to copy into /azerothcore/data/sql/updates/*.
# Refresh the module metadata after a database restore so runtime staging knows
# to re-copy SQL files.
set -euo pipefail
info(){ echo "🔧 [restore-stage] $*"; }
warn(){ echo "⚠️ [restore-stage] $*" >&2; }
MODULES_DIR="${MODULES_DIR:-/modules}"
RESTORE_SOURCE_DIR="${RESTORE_SOURCE_DIR:-}"
MODULES_META_DIR="${MODULES_DIR}/.modules-meta"
LEDGER_FILE="${MODULES_META_DIR}/module-sql-ledger.txt"
RESTORE_FLAG="${MODULES_META_DIR}/.restore-prestaged"
SNAPSHOT_FILE=""
ensure_modules_dir(){
if [ ! -d "$MODULES_DIR" ]; then
warn "Modules directory not found at ${MODULES_DIR}; skipping restore-time staging prep."
exit 0
fi
}
hash_sql_file(){
local sql_file="$1"
if command -v sha1sum >/dev/null 2>&1; then
sha1sum "$sql_file" | awk '{print $1}'
elif command -v md5sum >/dev/null 2>&1; then
md5sum "$sql_file" | awk '{print $1}'
else
return 1
fi
}
collect_sql_files(){
local db_type="$1" legacy="$2"
local -a patterns=(
"$MODULES_DIR"/*/data/sql/"$db_type"/*.sql
"$MODULES_DIR"/*/data/sql/"$db_type"/base/*.sql
"$MODULES_DIR"/*/data/sql/"$db_type"/updates/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/base/*.sql
"$MODULES_DIR"/*/data/sql/"$legacy"/updates/*.sql
)
declare -A seen=()
local -a files=()
for pattern in "${patterns[@]}"; do
for path in $pattern; do
[ -f "$path" ] || continue
if [ -z "${seen[$path]:-}" ]; then
seen["$path"]=1
files+=("$path")
fi
done
done
if [ ${#files[@]} -eq 0 ]; then
return 0
fi
printf '%s\n' "${files[@]}" | sort
}
rebuild_ledger(){
local tmp_file
tmp_file="$(mktemp)"
for db_type in db-world db-characters db-auth; do
local legacy=""
case "$db_type" in
db-world) legacy="world" ;;
db-characters) legacy="characters" ;;
db-auth) legacy="auth" ;;
esac
while IFS= read -r sql_file; do
[ -n "$sql_file" ] || continue
[ -f "$sql_file" ] || continue
local module_name base_name hash
module_name="$(echo "$sql_file" | sed 's|.*/modules/||' | cut -d'/' -f1)"
base_name="$(basename "$sql_file" .sql)"
if ! hash="$(hash_sql_file "$sql_file")"; then
continue
fi
printf '%s|%s|%s|%s\n' "$db_type" "$module_name" "$base_name" "$hash" >> "$tmp_file"
done < <(collect_sql_files "$db_type" "$legacy")
done
sort -u "$tmp_file" > "$LEDGER_FILE"
rm -f "$tmp_file"
}
ensure_modules_dir
mkdir -p "$MODULES_META_DIR" 2>/dev/null || true
if [ -n "$RESTORE_SOURCE_DIR" ] && [ -f "${RESTORE_SOURCE_DIR}/module-sql-ledger.txt" ]; then
SNAPSHOT_FILE="${RESTORE_SOURCE_DIR}/module-sql-ledger.txt"
info "Snapshot found in backup (${SNAPSHOT_FILE}); syncing to host ledger."
cp "$SNAPSHOT_FILE" "$LEDGER_FILE"
else
warn "Module SQL snapshot not found in backup; rebuilding ledger from module sources."
rebuild_ledger
if [ ! -d "$MODULES_DIR" ]; then
warn "Modules directory not found at ${MODULES_DIR}; skipping restore-time staging prep."
exit 0
fi
mkdir -p "$MODULES_META_DIR" 2>/dev/null || true
touch "$RESTORE_FLAG"
echo "restore_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" > "$RESTORE_FLAG"
info "Ledger ready at ${LEDGER_FILE}; runtime staging will copy SQL before worldserver starts."
info "Flagged ${RESTORE_FLAG} to force staging on next ./scripts/bash/stage-modules.sh run."

View File

@@ -40,72 +40,7 @@ ensure_host_writable(){
}
seed_sql_ledger_if_needed(){
local sentinel="$1" ledger="$2"
mkdir -p "$(dirname "$ledger")" 2>/dev/null || true
local need_seed=0
local reason=""
if [ ! -f "$ledger" ] || [ ! -s "$ledger" ]; then
need_seed=1
reason="Module SQL ledger missing; rebuilding."
elif [ -f "$sentinel" ] && [ "$sentinel" -nt "$ledger" ]; then
need_seed=1
reason="Database restore detected; seeding module SQL ledger."
fi
if [ "$need_seed" -ne 1 ]; then
touch "$ledger" 2>/dev/null || true
return 0
fi
echo "${reason}"
local tmp_file="${ledger}.tmp"
> "$tmp_file"
shopt -s nullglob
for db_type in db-world db-characters db-auth; do
local legacy_name=""
case "$db_type" in
db-world) legacy_name="world" ;;
db-characters) legacy_name="characters" ;;
db-auth) legacy_name="auth" ;;
esac
local search_paths=(
"$MODULES_DIR"/*/data/sql/"$db_type"
"$MODULES_DIR"/*/data/sql/"$db_type"/base
"$MODULES_DIR"/*/data/sql/"$db_type"/updates
"$MODULES_DIR"/*/data/sql/"$legacy_name"
"$MODULES_DIR"/*/data/sql/"$legacy_name"/base
)
for module_dir in "${search_paths[@]}"; do
for sql_file in "$module_dir"/*.sql; do
[ -e "$sql_file" ] || continue
local module_name
module_name="$(echo "$sql_file" | sed 's|.*/modules/||' | cut -d'/' -f1)"
local base_name
base_name="$(basename "$sql_file" .sql)"
local hash_cmd=""
if command -v sha1sum >/dev/null 2>&1; then
hash_cmd="sha1sum"
elif command -v md5sum >/dev/null 2>&1; then
hash_cmd="md5sum"
fi
local file_hash=""
if [ -n "$hash_cmd" ]; then
file_hash=$($hash_cmd "$sql_file" | awk '{print $1}')
fi
[ -n "$file_hash" ] || continue
printf '%s|%s|%s|%s\n' "$db_type" "$module_name" "$base_name" "$file_hash" >> "$tmp_file"
done
done
done
shopt -u nullglob
sort -u "$tmp_file" > "$ledger"
rm -f "$tmp_file"
: # No-op; ledger removed
}
sync_local_staging(){
@@ -323,11 +258,20 @@ if [[ "$LOCAL_STORAGE_PATH" != /* ]]; then
LOCAL_STORAGE_PATH="$PROJECT_DIR/$LOCAL_STORAGE_PATH"
fi
LOCAL_STORAGE_PATH="$(canonical_path "$LOCAL_STORAGE_PATH")"
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_PATH"
SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
MODULES_SQL_LEDGER_HOST="$MODULES_META_DIR/module-sql-ledger.txt"
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
MODULE_SQL_STAGE_PATH="$(read_env MODULE_SQL_STAGE_PATH "$STORAGE_PATH/module-sql-updates")"
MODULE_SQL_STAGE_PATH="$(eval "echo \"$MODULE_SQL_STAGE_PATH\"")"
if [[ "$MODULE_SQL_STAGE_PATH" != /* ]]; then
MODULE_SQL_STAGE_PATH="$PROJECT_DIR/$MODULE_SQL_STAGE_PATH"
fi
MODULE_SQL_STAGE_PATH="$(canonical_path "$MODULE_SQL_STAGE_PATH")"
mkdir -p "$MODULE_SQL_STAGE_PATH"
ensure_host_writable "$MODULE_SQL_STAGE_PATH"
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
declare -A ENABLED_MODULES=()
@@ -353,6 +297,9 @@ module_is_enabled(){
return 1
}
# Load the enabled module list (if present) so staging respects disabled modules.
load_enabled_modules
# Define module mappings (from rebuild-with-modules.sh)
declare -A MODULE_REPO_MAP=(
[MODULE_AOE_LOOT]=mod-aoe-loot
@@ -474,8 +421,6 @@ sync_local_staging
echo "🎬 Staging services with profile: services-$TARGET_PROFILE"
echo "⏳ Pulling images and starting containers; this can take several minutes on first run."
load_enabled_modules
# Stop any currently running services
echo "🛑 Stopping current services..."
docker compose \
@@ -496,6 +441,36 @@ case "$TARGET_PROFILE" in
esac
# Stage module SQL to core updates directory (after containers start)
host_stage_clear(){
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
}
host_stage_reset_dir(){
local dir="$1"
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
}
copy_to_host_stage(){
local file_path="$1"
local core_dir="$2"
local target_name="$3"
local src_dir
src_dir="$(dirname "$file_path")"
local base_name
base_name="$(basename "$file_path")"
docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \
-v "$src_dir":/src \
"$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1
}
stage_module_sql_to_core() {
show_staging_step "Module SQL Staging" "Preparing module database updates"
@@ -524,6 +499,7 @@ stage_module_sql_to_core() {
fi
echo "📦 Staging module SQL files to core updates directory..."
host_stage_clear
# Create core updates directories inside container
docker exec ac-worldserver bash -c "
@@ -536,13 +512,10 @@ stage_module_sql_to_core() {
local staged_count=0
local total_skipped=0
local total_failed=0
local RESTORE_SENTINEL="$LOCAL_STORAGE_PATH/mysql-data/.restore-completed"
ensure_host_writable "$MODULES_META_DIR"
seed_sql_ledger_if_needed "$RESTORE_SENTINEL" "$MODULES_SQL_LEDGER_HOST"
docker exec ac-worldserver bash -c "find /azerothcore/data/sql/updates -name '*_MODULE_*.sql' -delete" >/dev/null 2>&1 || true
shopt -s nullglob
for db_type in db-world db-characters db-auth; do
for db_type in db-world db-characters db-auth db-playerbots; do
local core_dir=""
local legacy_name=""
case "$db_type" in
@@ -558,9 +531,14 @@ stage_module_sql_to_core() {
core_dir="db_auth"
legacy_name="auth"
;;
db-playerbots)
core_dir="db_playerbots"
legacy_name="playerbots"
;;
esac
docker exec ac-worldserver bash -c "mkdir -p /azerothcore/data/sql/updates/$core_dir" >/dev/null 2>&1 || true
host_stage_reset_dir "$core_dir"
local counter=0
local skipped=0
@@ -602,28 +580,15 @@ stage_module_sql_to_core() {
continue
fi
local hash_cmd=""
if command -v sha1sum >/dev/null 2>&1; then
hash_cmd="sha1sum"
elif command -v md5sum >/dev/null 2>&1; then
hash_cmd="md5sum"
fi
local file_hash=""
if [ -n "$hash_cmd" ]; then
file_hash=$($hash_cmd "$sql_file" | awk '{print $1}')
fi
local ledger_key="$db_type|$module_name|$base_name"
local target_name="MODULE_${module_name}_${base_name}.sql"
if ! copy_to_host_stage "$sql_file" "$core_dir" "$target_name"; then
echo " ❌ Failed to copy to host staging: $module_name/$db_type/$(basename "$sql_file")"
failed=$((failed + 1))
continue
fi
if docker cp "$sql_file" "ac-worldserver:/azerothcore/data/sql/updates/$core_dir/$target_name" >/dev/null; then
echo " ✓ Staged $module_name/$db_type/$(basename "$sql_file")"
counter=$((counter + 1))
if [ -n "$file_hash" ]; then
local tmp_file="${MODULES_SQL_LEDGER_HOST}.tmp"
grep -Fv "${ledger_key}|" "$MODULES_SQL_LEDGER_HOST" > "$tmp_file" 2>/dev/null || true
printf '%s|%s\n' "$ledger_key" "$file_hash" >> "$tmp_file"
mv "$tmp_file" "$MODULES_SQL_LEDGER_HOST" 2>/dev/null || true
fi
else
echo " ❌ Failed to copy: $module_name/$(basename "$sql_file")"
failed=$((failed + 1))

293
scripts/bash/statusjson.sh Executable file
View File

@@ -0,0 +1,293 @@
#!/usr/bin/env python3
import json
import os
import re
import socket
import subprocess
import time
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parents[2]
ENV_FILE = PROJECT_DIR / ".env"
def load_env():
env = {}
if ENV_FILE.exists():
for line in ENV_FILE.read_text().splitlines():
if not line or line.strip().startswith('#'):
continue
if '=' not in line:
continue
key, val = line.split('=', 1)
val = val.split('#', 1)[0].strip()
env[key.strip()] = val
return env
def read_env(env, key, default=""):
return env.get(key, default)
def docker_exists(name):
result = subprocess.run([
"docker", "ps", "-a", "--format", "{{.Names}}"
], capture_output=True, text=True)
names = set(result.stdout.split())
return name in names
def docker_inspect(name, template):
try:
result = subprocess.run([
"docker", "inspect", f"--format={template}", name
], capture_output=True, text=True, check=True)
return result.stdout.strip()
except subprocess.CalledProcessError:
return ""
def service_snapshot(name, label):
status = "missing"
health = "none"
started = ""
image = ""
exit_code = ""
if docker_exists(name):
status = docker_inspect(name, "{{.State.Status}}") or status
health = docker_inspect(name, "{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}") or health
started = docker_inspect(name, "{{.State.StartedAt}}") or ""
image = docker_inspect(name, "{{.Config.Image}}") or ""
exit_code = docker_inspect(name, "{{.State.ExitCode}}") or "0"
return {
"name": name,
"label": label,
"status": status,
"health": health,
"started_at": started,
"image": image,
"exit_code": exit_code,
}
def port_reachable(port):
if not port:
return False
try:
port = int(port)
except ValueError:
return False
try:
with socket.create_connection(("127.0.0.1", port), timeout=1):
return True
except OSError:
return False
def module_list(env):
import json
from pathlib import Path
# Load module manifest
manifest_path = PROJECT_DIR / "config" / "module-manifest.json"
manifest_map = {}
if manifest_path.exists():
try:
manifest_data = json.loads(manifest_path.read_text())
for mod in manifest_data.get("modules", []):
manifest_map[mod["key"]] = mod
except Exception:
pass
modules = []
pattern = re.compile(r"^MODULE_([A-Z0-9_]+)=1$")
if ENV_FILE.exists():
for line in ENV_FILE.read_text().splitlines():
m = pattern.match(line.strip())
if m:
key = "MODULE_" + m.group(1)
raw = m.group(1).lower().replace('_', ' ')
title = raw.title()
# Look up manifest info
mod_info = manifest_map.get(key, {})
modules.append({
"name": title,
"key": key,
"description": mod_info.get("description", "No description available"),
"category": mod_info.get("category", "unknown"),
"type": mod_info.get("type", "unknown")
})
return modules
def dir_info(path):
p = Path(path)
exists = p.exists()
size = "--"
if exists:
try:
result = subprocess.run(
["du", "-sh", str(p)],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
check=False,
)
if result.stdout:
size = result.stdout.split()[0]
except Exception:
size = "--"
return {"path": str(p), "exists": exists, "size": size}
def volume_info(name, fallback=None):
candidates = [name]
if fallback:
candidates.append(fallback)
for cand in candidates:
result = subprocess.run(["docker", "volume", "inspect", cand], capture_output=True, text=True)
if result.returncode == 0:
try:
data = json.loads(result.stdout)[0]
return {
"name": cand,
"exists": True,
"mountpoint": data.get("Mountpoint", "-")
}
except Exception:
pass
return {"name": name, "exists": False, "mountpoint": "-"}
def expand_path(value, env):
storage = read_env(env, "STORAGE_PATH", "./storage")
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
value = value.replace('${STORAGE_PATH}', storage)
value = value.replace('${STORAGE_PATH_LOCAL}', local_storage)
return value
def mysql_query(env, database, query):
password = read_env(env, "MYSQL_ROOT_PASSWORD")
user = read_env(env, "MYSQL_USER", "root")
if not password or not database:
return 0
cmd = [
"docker", "exec", "ac-mysql",
"mysql", "-N", "-B",
f"-u{user}", f"-p{password}", database,
"-e", query
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
value = result.stdout.strip().splitlines()[-1]
return int(value)
except Exception:
return 0
def user_stats(env):
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;")
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;")
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;")
return {
"accounts": accounts,
"online": online,
"characters": characters,
"active7d": active,
}
def docker_stats():
"""Get CPU and memory stats for running containers"""
try:
result = subprocess.run([
"docker", "stats", "--no-stream", "--no-trunc",
"--format", "{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}"
], capture_output=True, text=True, check=True, timeout=4)
stats = {}
for line in result.stdout.strip().splitlines():
parts = line.split('\t')
if len(parts) == 4:
name, cpu, mem_usage, mem_perc = parts
# Parse CPU percentage (e.g., "0.50%" -> 0.50)
cpu_val = cpu.replace('%', '').strip()
try:
cpu_float = float(cpu_val)
except ValueError:
cpu_float = 0.0
# Parse memory percentage
mem_perc_val = mem_perc.replace('%', '').strip()
try:
mem_perc_float = float(mem_perc_val)
except ValueError:
mem_perc_float = 0.0
stats[name] = {
"cpu": cpu_float,
"memory": mem_usage.strip(),
"memory_percent": mem_perc_float
}
return stats
except Exception:
return {}
def main():
env = load_env()
project = read_env(env, "COMPOSE_PROJECT_NAME", "acore-compose")
network = read_env(env, "NETWORK_NAME", "azerothcore")
services = [
("ac-mysql", "MySQL"),
("ac-backup", "Backup"),
("ac-volume-init", "Volume Init"),
("ac-storage-init", "Storage Init"),
("ac-db-init", "DB Init"),
("ac-db-import", "DB Import"),
("ac-authserver", "Auth Server"),
("ac-worldserver", "World Server"),
("ac-client-data", "Client Data"),
("ac-modules", "Module Manager"),
("ac-post-install", "Post Install"),
("ac-phpmyadmin", "phpMyAdmin"),
("ac-keira3", "Keira3"),
]
service_data = [service_snapshot(name, label) for name, label in services]
port_entries = [
{"name": "Auth", "port": read_env(env, "AUTH_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "AUTH_EXTERNAL_PORT"))},
{"name": "World", "port": read_env(env, "WORLD_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "WORLD_EXTERNAL_PORT"))},
{"name": "SOAP", "port": read_env(env, "SOAP_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "SOAP_EXTERNAL_PORT"))},
{"name": "MySQL", "port": read_env(env, "MYSQL_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "MYSQL_EXTERNAL_PORT")) if read_env(env, "COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED", "0") == "1" else False},
{"name": "phpMyAdmin", "port": read_env(env, "PMA_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "PMA_EXTERNAL_PORT"))},
{"name": "Keira3", "port": read_env(env, "KEIRA3_EXTERNAL_PORT"), "reachable": port_reachable(read_env(env, "KEIRA3_EXTERNAL_PORT"))},
]
storage_path = expand_path(read_env(env, "STORAGE_PATH", "./storage"), env)
local_storage_path = expand_path(read_env(env, "STORAGE_PATH_LOCAL", "./local-storage"), env)
client_data_path = expand_path(read_env(env, "CLIENT_DATA_PATH", f"{storage_path}/client-data"), env)
storage_info = {
"storage": dir_info(storage_path),
"local_storage": dir_info(local_storage_path),
"client_data": dir_info(client_data_path),
"modules": dir_info(os.path.join(storage_path, "modules")),
"local_modules": dir_info(os.path.join(local_storage_path, "modules")),
}
volumes = {
"client_cache": volume_info(f"{project}_client-data-cache"),
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
}
data = {
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"project": project,
"network": network,
"services": service_data,
"ports": port_entries,
"modules": module_list(env),
"storage": storage_info,
"volumes": volumes,
"users": user_stats(env),
"stats": docker_stats(),
}
print(json.dumps(data))
if __name__ == "__main__":
main()

View File

@@ -69,6 +69,12 @@ section_header "Phase 1 Integration Test Suite"
info "Project root: $PROJECT_ROOT"
info "Test started: $(date)"
# Ensure storage directories are writable before generating module state
if [ -x "$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" ]; then
info "Normalizing storage permissions"
"$PROJECT_ROOT/scripts/bash/repair-storage-permissions.sh" --silent || true
fi
# Test 1: Verify .env exists
test_header "Environment Configuration Check"
if [ -f .env ]; then
@@ -273,11 +279,10 @@ fi
# Test 11: Restore + Module Staging Automation
test_header "Restore + Module Staging Automation"
if grep -q "restore-and-stage.sh" docker-compose.yml && \
grep -q ".restore-prestaged" scripts/bash/restore-and-stage.sh && \
grep -q "module-sql-ledger" scripts/bash/restore-and-stage.sh; then
ok "restore-and-stage.sh wired into compose, refreshes ledger snapshot, and flags staging"
grep -q ".restore-prestaged" scripts/bash/restore-and-stage.sh; then
ok "restore-and-stage.sh wired into compose and flags stage-modules to recopy SQL"
else
err "restore-and-stage.sh missing compose wiring or ledger/flag handling"
err "restore-and-stage.sh missing compose wiring or flag handling"
fi
# Test 12: Docker Compose configuration check

10
scripts/go/go.mod Normal file
View File

@@ -0,0 +1,10 @@
module acore-compose/statusdash
go 1.22.2
require (
github.com/gizak/termui/v3 v3.1.0 // indirect
github.com/mattn/go-runewidth v0.0.2 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect
)

8
scripts/go/go.sum Normal file
View File

@@ -0,0 +1,8 @@
github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc=
github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY=
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=

373
scripts/go/statusdash.go Normal file
View File

@@ -0,0 +1,373 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os/exec"
"strings"
"time"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
type Service struct {
Name string `json:"name"`
Label string `json:"label"`
Status string `json:"status"`
Health string `json:"health"`
StartedAt string `json:"started_at"`
Image string `json:"image"`
ExitCode string `json:"exit_code"`
}
type ContainerStats struct {
CPU float64 `json:"cpu"`
Memory string `json:"memory"`
MemoryPercent float64 `json:"memory_percent"`
}
type Port struct {
Name string `json:"name"`
Port string `json:"port"`
Reachable bool `json:"reachable"`
}
type DirInfo struct {
Path string `json:"path"`
Exists bool `json:"exists"`
Size string `json:"size"`
}
type VolumeInfo struct {
Name string `json:"name"`
Exists bool `json:"exists"`
Mountpoint string `json:"mountpoint"`
}
type UserStats struct {
Accounts int `json:"accounts"`
Online int `json:"online"`
Characters int `json:"characters"`
Active7d int `json:"active7d"`
}
type Module struct {
Name string `json:"name"`
Key string `json:"key"`
Description string `json:"description"`
Category string `json:"category"`
Type string `json:"type"`
}
type Snapshot struct {
Timestamp string `json:"timestamp"`
Project string `json:"project"`
Network string `json:"network"`
Services []Service `json:"services"`
Ports []Port `json:"ports"`
Modules []Module `json:"modules"`
Storage map[string]DirInfo `json:"storage"`
Volumes map[string]VolumeInfo `json:"volumes"`
Users UserStats `json:"users"`
Stats map[string]ContainerStats `json:"stats"`
}
func runSnapshot() (*Snapshot, error) {
cmd := exec.Command("./scripts/bash/statusjson.sh")
output, err := cmd.Output()
if err != nil {
return nil, err
}
snap := &Snapshot{}
if err := json.Unmarshal(output, snap); err != nil {
return nil, err
}
return snap, nil
}
func buildServicesTable(s *Snapshot) *TableNoCol {
table := NewTableNoCol()
rows := [][]string{{"Service", "Status", "Health", "CPU%", "Memory"}}
for _, svc := range s.Services {
cpu := "-"
mem := "-"
if stats, ok := s.Stats[svc.Name]; ok {
cpu = fmt.Sprintf("%.1f", stats.CPU)
mem = strings.Split(stats.Memory, " / ")[0] // Just show used, not total
}
// Combine health with exit code for stopped containers
health := svc.Health
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
}
rows = append(rows, []string{svc.Label, svc.Status, health, cpu, mem})
}
table.Rows = rows
table.RowSeparator = false
table.Border = true
table.Title = "Services"
return table
}
func buildPortsTable(s *Snapshot) *TableNoCol {
table := NewTableNoCol()
rows := [][]string{{"Port", "Number", "Reachable"}}
for _, p := range s.Ports {
state := "down"
if p.Reachable {
state = "up"
}
rows = append(rows, []string{p.Name, p.Port, state})
}
table.Rows = rows
table.RowSeparator = true
table.Border = true
table.Title = "Ports"
return table
}
func buildModulesList(s *Snapshot) *widgets.List {
list := widgets.NewList()
list.Title = fmt.Sprintf("Modules (%d)", len(s.Modules))
rows := make([]string, len(s.Modules))
for i, mod := range s.Modules {
rows[i] = mod.Name
}
list.Rows = rows
list.WrapText = false
list.Border = true
list.BorderStyle = ui.NewStyle(ui.ColorCyan)
list.SelectedRowStyle = ui.NewStyle(ui.ColorCyan)
return list
}
func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder
fmt.Fprintf(&b, "STORAGE:\n")
entries := []struct {
Key string
Label string
}{
{"storage", "Storage"},
{"local_storage", "Local Storage"},
{"client_data", "Client Data"},
{"modules", "Modules"},
{"local_modules", "Local Modules"},
}
for _, item := range entries {
info, ok := s.Storage[item.Key]
if !ok {
continue
}
mark := "○"
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-15s %s %s (%s)\n", item.Label, mark, info.Path, info.Size)
}
par := widgets.NewParagraph()
par.Title = "Storage"
par.Text = b.String()
par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
return par
}
func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder
fmt.Fprintf(&b, "VOLUMES:\n")
entries := []struct {
Key string
Label string
}{
{"client_cache", "Client Cache"},
{"mysql_data", "MySQL Data"},
}
for _, item := range entries {
info, ok := s.Volumes[item.Key]
if !ok {
continue
}
mark := "○"
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-13s %s %s\n", item.Label, mark, info.Mountpoint)
}
par := widgets.NewParagraph()
par.Title = "Volumes"
par.Text = b.String()
par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
return par
}
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
servicesTable := buildServicesTable(s)
for i := 1; i < len(servicesTable.Rows); i++ {
if servicesTable.RowStyles == nil {
servicesTable.RowStyles = make(map[int]ui.Style)
}
state := strings.ToLower(servicesTable.Rows[i][1])
switch state {
case "running", "healthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
case "restarting", "unhealthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
case "exited":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
default:
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorWhite)
}
}
portsTable := buildPortsTable(s)
for i := 1; i < len(portsTable.Rows); i++ {
if portsTable.RowStyles == nil {
portsTable.RowStyles = make(map[int]ui.Style)
}
if portsTable.Rows[i][2] == "up" {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
} else {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
}
}
modulesList := buildModulesList(s)
if selectedModule >= 0 && selectedModule < len(modulesList.Rows) {
modulesList.SelectedRow = selectedModule
}
helpPar := widgets.NewParagraph()
helpPar.Title = "Controls"
helpPar.Text = " ↓ : Down\n ↑ : Up"
helpPar.Border = true
helpPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
moduleInfoPar := widgets.NewParagraph()
moduleInfoPar.Title = "Module Info"
if selectedModule >= 0 && selectedModule < len(s.Modules) {
mod := s.Modules[selectedModule]
moduleInfoPar.Text = fmt.Sprintf("%s\n\nCategory: %s\nType: %s", mod.Description, mod.Category, mod.Type)
} else {
moduleInfoPar.Text = "Select a module to view info"
}
moduleInfoPar.Border = true
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
storagePar := buildStorageParagraph(s)
storagePar.Border = true
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
storagePar.PaddingLeft = 1
storagePar.PaddingRight = 1
volumesPar := buildVolumesParagraph(s)
header := widgets.NewParagraph()
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp)
header.Border = true
usersPar := widgets.NewParagraph()
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d)
usersPar.Border = true
grid := ui.NewGrid()
termWidth, termHeight := ui.TerminalDimensions()
grid.SetRect(0, 0, termWidth, termHeight)
grid.Set(
ui.NewRow(0.18,
ui.NewCol(0.6, header),
ui.NewCol(0.4, usersPar),
),
ui.NewRow(0.42,
ui.NewCol(0.6, servicesTable),
ui.NewCol(0.4, portsTable),
),
ui.NewRow(0.40,
ui.NewCol(0.25, modulesList),
ui.NewCol(0.15,
ui.NewRow(0.30, helpPar),
ui.NewRow(0.70, moduleInfoPar),
),
ui.NewCol(0.6,
ui.NewRow(0.55,
ui.NewCol(1.0, storagePar),
),
ui.NewRow(0.45,
ui.NewCol(1.0, volumesPar),
),
),
),
)
ui.Render(grid)
return modulesList, grid
}
func main() {
if err := ui.Init(); err != nil {
log.Fatalf("failed to init termui: %v", err)
}
defer ui.Close()
snapshot, err := runSnapshot()
if err != nil {
log.Fatalf("failed to fetch snapshot: %v", err)
}
selectedModule := 0
modulesWidget, currentGrid := renderSnapshot(snapshot, selectedModule)
snapCh := make(chan *Snapshot, 1)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for range ticker.C {
snap, err := runSnapshot()
if err != nil {
log.Printf("snapshot error: %v", err)
continue
}
select {
case snapCh <- snap:
default:
}
}
}()
events := ui.PollEvents()
for {
select {
case e := <-events:
switch e.ID {
case "q", "<C-c>":
return
case "<Down>", "j":
if selectedModule < len(snapshot.Modules)-1 {
selectedModule++
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
case "<Up>", "k":
if selectedModule > 0 {
selectedModule--
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
case "<Resize>":
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
continue
}
if modulesWidget != nil {
if selectedModule >= 0 && selectedModule < len(modulesWidget.Rows) {
modulesWidget.SelectedRow = selectedModule
}
}
if currentGrid != nil {
ui.Render(currentGrid)
}
case snap := <-snapCh:
snapshot = snap
if selectedModule >= len(snapshot.Modules) {
selectedModule = len(snapshot.Modules) - 1
if selectedModule < 0 {
selectedModule = 0
}
}
modulesWidget, currentGrid = renderSnapshot(snapshot, selectedModule)
}
}
}

101
scripts/go/table_nocol.go Normal file
View File

@@ -0,0 +1,101 @@
package main
import (
"image"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
// TableNoCol is a modified table widget that doesn't draw column separators
type TableNoCol struct {
widgets.Table
}
func NewTableNoCol() *TableNoCol {
t := &TableNoCol{}
t.Table = *widgets.NewTable()
return t
}
// Draw overrides the default Draw to skip column separators
func (self *TableNoCol) Draw(buf *ui.Buffer) {
self.Block.Draw(buf)
if len(self.Rows) == 0 {
return
}
self.ColumnResizer()
columnWidths := self.ColumnWidths
if len(columnWidths) == 0 {
columnCount := len(self.Rows[0])
columnWidth := self.Inner.Dx() / columnCount
for i := 0; i < columnCount; i++ {
columnWidths = append(columnWidths, columnWidth)
}
}
yCoordinate := self.Inner.Min.Y
// draw rows
for i := 0; i < len(self.Rows) && yCoordinate < self.Inner.Max.Y; i++ {
row := self.Rows[i]
colXCoordinate := self.Inner.Min.X
rowStyle := self.TextStyle
// get the row style if one exists
if style, ok := self.RowStyles[i]; ok {
rowStyle = style
}
if self.FillRow {
blankCell := ui.NewCell(' ', rowStyle)
buf.Fill(blankCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
}
// draw row cells
for j := 0; j < len(row); j++ {
col := ui.ParseStyles(row[j], rowStyle)
// draw row cell
if len(col) > columnWidths[j] || self.TextAlignment == ui.AlignLeft {
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
if k == columnWidths[j] || colXCoordinate+k == self.Inner.Max.X {
cell.Rune = ui.ELLIPSES
buf.SetCell(cell, image.Pt(colXCoordinate+k-1, yCoordinate))
break
} else {
buf.SetCell(cell, image.Pt(colXCoordinate+k, yCoordinate))
}
}
} else if self.TextAlignment == ui.AlignCenter {
xCoordinateOffset := (columnWidths[j] - len(col)) / 2
stringXCoordinate := xCoordinateOffset + colXCoordinate
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
}
} else if self.TextAlignment == ui.AlignRight {
stringXCoordinate := ui.MinInt(colXCoordinate+columnWidths[j], self.Inner.Max.X) - len(col)
for _, cx := range ui.BuildCellWithXArray(col) {
k, cell := cx.X, cx.Cell
buf.SetCell(cell, image.Pt(stringXCoordinate+k, yCoordinate))
}
}
colXCoordinate += columnWidths[j] + 1
}
// SKIP drawing vertical separators - this is the key change
yCoordinate++
// draw horizontal separator
horizontalCell := ui.NewCell(ui.HORIZONTAL_LINE, self.Block.BorderStyle)
if self.RowSeparator && yCoordinate < self.Inner.Max.Y && i != len(self.Rows)-1 {
buf.Fill(horizontalCell, image.Rect(self.Inner.Min.X, yCoordinate, self.Inner.Max.X, yCoordinate+1))
yCoordinate++
}
}
}