17 Commits

Author SHA1 Message Date
uprightbass360
3b11e23546 refactor and compress code 2025-12-02 21:43:05 -05:00
uprightbass360
4596320856 add log bind mounts 2025-12-02 01:26:14 -05:00
uprightbass360
d11b9f4089 break apart paths for easier management 2025-11-30 23:21:09 -05:00
uprightbass360
82a5104e87 profile updates 2025-11-27 01:06:48 -05:00
uprightbass360
251b5d8f9f update port display for clarity 2025-11-26 15:37:41 -05:00
uprightbass360
5620fbae91 fix size computing for nested container 2025-11-26 15:19:41 -05:00
uprightbass360
319da1a553 remove test config 2025-11-26 15:00:08 -05:00
uprightbass360
681da2767b exclude bots from stats 2025-11-26 01:58:06 -05:00
uprightbass360
d38c7557e0 status info 2025-11-26 01:31:00 -05:00
uprightbass360
df7689f26a cleanup 2025-11-25 22:11:47 -05:00
uprightbass360
b62e33bb03 docs 2025-11-25 17:45:42 -05:00
uprightbass360
44f9beff71 cleanup hard-coded vars 2025-11-25 17:45:17 -05:00
uprightbass360
e1dc98f1e7 deploy updates 2025-11-23 16:42:50 -05:00
uprightbass360
7e9e6e1b4f setup hardening 2025-11-23 16:05:00 -05:00
uprightbass360
3d0e88e9f6 add status info for new containers 2025-11-23 16:04:29 -05:00
uprightbass360
b3019eb603 directory staging 2025-11-23 13:05:08 -05:00
uprightbass360
327774c0df tagging new modules and images 2025-11-22 22:08:07 -05:00
48 changed files with 4107 additions and 801 deletions

View File

@@ -21,6 +21,15 @@ COMPOSE_PROJECT_NAME=azerothcore-stack
# ===================== # =====================
STORAGE_PATH=./storage STORAGE_PATH=./storage
STORAGE_PATH_LOCAL=./local-storage STORAGE_PATH_LOCAL=./local-storage
STORAGE_CONFIG_PATH=${STORAGE_PATH}/config
STORAGE_LOGS_PATH=${STORAGE_PATH}/logs
STORAGE_MODULES_PATH=${STORAGE_PATH}/modules
STORAGE_LUA_SCRIPTS_PATH=${STORAGE_PATH}/lua_scripts
STORAGE_MODULES_META_PATH=${STORAGE_MODULES_PATH}/.modules-meta
STORAGE_MODULE_SQL_PATH=${STORAGE_PATH}/module-sql-updates
STORAGE_INSTALL_MARKERS_PATH=${STORAGE_PATH}/install-markers
STORAGE_CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
STORAGE_LOCAL_SOURCE_PATH=${STORAGE_PATH_LOCAL}/source
BACKUP_PATH=${STORAGE_PATH}/backups BACKUP_PATH=${STORAGE_PATH}/backups
HOST_ZONEINFO_PATH=/usr/share/zoneinfo HOST_ZONEINFO_PATH=/usr/share/zoneinfo
TZ=UTC TZ=UTC
@@ -65,12 +74,12 @@ DB_GUARD_VERIFY_INTERVAL_SECONDS=86400
# ===================== # =====================
# Module SQL staging # Module SQL staging
# ===================== # =====================
MODULE_SQL_STAGE_PATH=${STORAGE_PATH_LOCAL}/module-sql-updates STAGE_PATH_MODULE_SQL=${STORAGE_MODULE_SQL_PATH}
# ===================== # =====================
# SQL Source Overlay # SQL Source Overlay
# ===================== # =====================
AC_SQL_SOURCE_PATH=${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql AC_SQL_SOURCE_PATH=${STORAGE_LOCAL_SOURCE_PATH}/azerothcore-playerbots/data/sql
# ===================== # =====================
# Images # Images
@@ -141,7 +150,7 @@ MYSQL_INNODB_LOG_FILE_SIZE=64M
MYSQL_INNODB_REDO_LOG_CAPACITY=512M MYSQL_INNODB_REDO_LOG_CAPACITY=512M
MYSQL_RUNTIME_TMPFS_SIZE=8G MYSQL_RUNTIME_TMPFS_SIZE=8G
MYSQL_DISABLE_BINLOG=1 MYSQL_DISABLE_BINLOG=1
MYSQL_CONFIG_DIR=${STORAGE_PATH}/config/mysql/conf.d MYSQL_CONFIG_DIR=${STORAGE_CONFIG_PATH}/mysql/conf.d
DB_WAIT_RETRIES=60 DB_WAIT_RETRIES=60
DB_WAIT_SLEEP=10 DB_WAIT_SLEEP=10
@@ -180,6 +189,7 @@ DB_CHARACTER_SYNCH_THREADS=1
BACKUP_RETENTION_DAYS=3 BACKUP_RETENTION_DAYS=3
BACKUP_RETENTION_HOURS=6 BACKUP_RETENTION_HOURS=6
BACKUP_DAILY_TIME=09 BACKUP_DAILY_TIME=09
BACKUP_INTERVAL_MINUTES=60
# Optional comma/space separated schemas to include in automated backups # Optional comma/space separated schemas to include in automated backups
BACKUP_EXTRA_DATABASES= BACKUP_EXTRA_DATABASES=
BACKUP_HEALTHCHECK_MAX_MINUTES=1440 BACKUP_HEALTHCHECK_MAX_MINUTES=1440
@@ -217,7 +227,6 @@ CLIENT_DATA_VERSION=
# Available: none, blizzlike, fast-leveling, hardcore-pvp, casual-pve # Available: none, blizzlike, fast-leveling, hardcore-pvp, casual-pve
SERVER_CONFIG_PRESET=none SERVER_CONFIG_PRESET=none
CLIENT_DATA_CACHE_PATH=${STORAGE_PATH_LOCAL}/client-data-cache CLIENT_DATA_CACHE_PATH=${STORAGE_PATH_LOCAL}/client-data-cache
CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
# ===================== # =====================
# Module toggles (0/1) # Module toggles (0/1)

1
.gitignore vendored
View File

@@ -20,3 +20,4 @@ todo.md
.gocache/ .gocache/
.module-ledger/ .module-ledger/
deploy.log deploy.log
statusdash

372
CLEANUP_TODO.md Normal file
View File

@@ -0,0 +1,372 @@
# AzerothCore RealmMaster - Cleanup TODO
## Overview
This document outlines systematic cleanup opportunities using the proven methodology from our successful consolidation. Each phase must be validated and tested incrementally without breaking existing functionality.
## Methodology
1. **Analyze** - Map dependencies and usage patterns
2. **Consolidate** - Create shared libraries/templates
3. **Replace** - Update scripts to use centralized versions
4. **Test** - Validate each change incrementally
5. **Document** - Track changes and dependencies
---
## Phase 1: Complete Script Function Consolidation
**Priority: HIGH** | **Risk: LOW** | **Impact: HIGH**
### Status
**Completed**: Master scripts (deploy.sh, build.sh, cleanup.sh) + 4 critical scripts
🔄 **Remaining**: 10+ scripts with duplicate logging functions
### Remaining Scripts to Consolidate
```bash
# Root level scripts
./changelog.sh # Has: info(), warn(), err()
./update-latest.sh # Has: info(), ok(), warn(), err()
# Backup system scripts
./scripts/bash/backup-export.sh # Has: info(), ok(), warn(), err()
./scripts/bash/backup-import.sh # Has: info(), ok(), warn(), err()
# Database scripts
./scripts/bash/db-guard.sh # Has: info(), warn(), err()
./scripts/bash/db-health-check.sh # Has: info(), ok(), warn(), err()
# Module & verification scripts
./scripts/bash/verify-sql-updates.sh # Has: info(), warn(), err()
./scripts/bash/manage-modules.sh # Has: info(), ok(), warn(), err()
./scripts/bash/repair-storage-permissions.sh # Has: info(), warn(), err()
./scripts/bash/test-phase1-integration.sh # Has: info(), ok(), warn(), err()
```
### Implementation Plan
**Step 1.1**: Consolidate Root Level Scripts (changelog.sh, update-latest.sh)
- Add lib/common.sh sourcing with error handling
- Remove duplicate function definitions
- Test functionality with `--help` flags
**Step 1.2**: Consolidate Backup System Scripts
- Update backup-export.sh and backup-import.sh
- Ensure backup operations still work correctly
- Test with dry-run flags where available
**Step 1.3**: Consolidate Database Scripts
- Update db-guard.sh and db-health-check.sh
- **CRITICAL**: These run in containers - verify mount paths work
- Test with existing database connections
**Step 1.4**: Consolidate Module & Verification Scripts
- Update manage-modules.sh, verify-sql-updates.sh, repair-storage-permissions.sh
- Test module staging and SQL verification workflows
- Verify test-phase1-integration.sh still functions
### Validation Tests
```bash
# Test each script category after consolidation
./changelog.sh --help
./update-latest.sh --help
./scripts/bash/backup-export.sh --dry-run
./scripts/bash/manage-modules.sh --list
```
---
## Phase 2: Docker Compose YAML Anchor Completion
**Priority: HIGH** | **Risk: MEDIUM** | **Impact: HIGH**
### Status
**Completed**: Basic YAML anchors, 2 authserver services consolidated
🔄 **Remaining**: 4 worldserver services, database services, volume patterns
### Current Docker Compose Analysis
```yaml
# Services needing consolidation:
- ac-worldserver-standard # ~45 lines → can reduce to ~10
- ac-worldserver-playerbots # ~45 lines → can reduce to ~10
- ac-worldserver-modules # ~45 lines → can reduce to ~10
- ac-authserver-modules # ~30 lines → can reduce to ~8
# Database services with repeated patterns:
- ac-db-import # Repeated volume mounts
- ac-db-guard # Similar environment variables
- ac-db-init # Similar MySQL connection patterns
# Volume mount patterns repeated 15+ times:
- ${STORAGE_CONFIG_PATH}:/azerothcore/env/dist/etc
- ${STORAGE_LOGS_PATH}:/azerothcore/logs
- ${BACKUP_PATH}:/backups
```
### Implementation Plan
**Step 2.1**: Complete Worldserver Service Consolidation
- Extend x-worldserver-common anchor to cover all variants
- Consolidate ac-worldserver-standard, ac-worldserver-playerbots, ac-worldserver-modules
- Test each Docker profile: `docker compose --profile services-standard config`
**Step 2.2**: Database Services Consolidation
- Create x-database-common anchor for shared database configurations
- Create x-database-volumes anchor for repeated volume patterns
- Update ac-db-import, ac-db-guard, ac-db-init services
**Step 2.3**: Complete Authserver Consolidation
- Consolidate remaining ac-authserver-modules service
- Verify all three profiles work: standard, playerbots, modules
### Validation Tests
```bash
# Test all profiles generate valid configurations
docker compose --profile services-standard config --quiet
docker compose --profile services-playerbots config --quiet
docker compose --profile services-modules config --quiet
# Test actual deployment (non-destructive)
docker compose --profile services-standard up --dry-run
```
---
## Phase 3: Utility Function Libraries
**Priority: MEDIUM** | **Risk: MEDIUM** | **Impact: MEDIUM**
### Status
**Completed**: All three utility libraries created and tested
**Completed**: Integration with backup-import.sh as proof of concept
🔄 **Remaining**: Update remaining 14+ scripts to use new libraries
### Created Libraries
**✅ scripts/bash/lib/mysql-utils.sh** - COMPLETED
- MySQL connection management: `mysql_test_connection()`, `mysql_wait_for_connection()`
- Query execution: `mysql_exec_with_retry()`, `mysql_query()`, `docker_mysql_query()`
- Database utilities: `mysql_database_exists()`, `mysql_get_table_count()`
- Backup/restore: `mysql_backup_database()`, `mysql_restore_database()`
- Configuration: `mysql_validate_configuration()`, `mysql_print_configuration()`
**✅ scripts/bash/lib/docker-utils.sh** - COMPLETED
- Container management: `docker_get_container_status()`, `docker_wait_for_container_state()`
- Execution: `docker_exec_with_retry()`, `docker_is_container_running()`
- Project management: `docker_get_project_name()`, `docker_list_project_containers()`
- Image operations: `docker_get_container_image()`, `docker_pull_image_with_retry()`
- Compose integration: `docker_compose_validate()`, `docker_compose_deploy()`
- System utilities: `docker_check_daemon()`, `docker_cleanup_system()`
**✅ scripts/bash/lib/env-utils.sh** - COMPLETED
- Environment management: `env_read_with_fallback()`, `env_read_typed()`, `env_update_value()`
- Path utilities: `path_resolve_absolute()`, `file_ensure_writable_dir()`
- File operations: `file_create_backup()`, `file_set_permissions()`
- Configuration: `config_read_template_value()`, `config_validate_env()`
- System detection: `system_detect_os()`, `system_check_requirements()`
### Integration Status
**✅ Proof of Concept**: backup-import.sh updated with fallback compatibility
- Uses new utility functions when available
- Maintains backward compatibility with graceful fallbacks
- Tested and functional
### Remaining Implementation
**Step 3.4**: Update High-Priority Scripts
- backup-export.sh: Use mysql-utils and env-utils functions
- db-guard.sh: Use mysql-utils for database operations
- deploy-tools.sh: Use docker-utils for container management
- verify-deployment.sh: Use docker-utils for status checking
**Step 3.5**: Update Database Scripts
- db-health-check.sh: Use mysql-utils for health validation
- db-import-conditional.sh: Use mysql-utils and env-utils
- manual-backup.sh: Use mysql-utils backup functions
**Step 3.6**: Update Deployment Scripts
- migrate-stack.sh: Use docker-utils for remote operations
- stage-modules.sh: Use env-utils for path management
- rebuild-with-modules.sh: Use docker-utils for build operations
### Validation Tests - COMPLETED ✅
```bash
# Test MySQL utilities
source scripts/bash/lib/mysql-utils.sh
mysql_print_configuration # ✅ PASSED
# Test Docker utilities
source scripts/bash/lib/docker-utils.sh
docker_print_system_info # ✅ PASSED
# Test Environment utilities
source scripts/bash/lib/env-utils.sh
env_utils_validate # ✅ PASSED
# Test integrated script
./backup-import.sh --help # ✅ PASSED with new libraries
```
### Next Steps
- Continue with Step 3.4: Update backup-export.sh, db-guard.sh, deploy-tools.sh
- Implement progressive rollout with testing after each script update
- Complete remaining 11 scripts in dependency order
---
## Phase 4: Error Handling Standardization
**Priority: MEDIUM** | **Risk: LOW** | **Impact: MEDIUM**
### Analysis
**Current State**: Mixed error handling patterns across scripts
```bash
# Found patterns:
set -e # 45 scripts
set -euo pipefail # 23 scripts
set -eu # 8 scripts
(no error handling) # 12 scripts
```
### Implementation Plan
**Step 4.1**: Standardize Error Handling
- Add `set -euo pipefail` to all scripts where safe
- Add error traps for cleanup in critical scripts
- Implement consistent exit codes
**Step 4.2**: Add Script Validation Framework
- Create validation helper functions
- Add dependency checking to critical scripts
- Implement graceful degradation where possible
### Target Pattern
```bash
#!/bin/bash
set -euo pipefail
# Error handling setup
trap 'echo "❌ Error on line $LINENO" >&2' ERR
trap 'cleanup_on_exit' EXIT
# Source libraries with validation
source_lib_or_exit() {
local lib_path="$1"
if ! source "$lib_path" 2>/dev/null; then
echo "❌ FATAL: Cannot load $lib_path" >&2
exit 1
fi
}
```
---
## Phase 5: Configuration Template Consolidation
**Priority: LOW** | **Risk: LOW** | **Impact: LOW**
### Analysis
**Found**: 71 instances of duplicate color definitions across scripts
**Found**: Multiple .env template patterns that could be standardized
### Implementation Plan
**Step 5.1**: Color Definition Consolidation
- Ensure all scripts use lib/common.sh colors exclusively
- Remove remaining duplicate color definitions
- Add color theme support (optional)
**Step 5.2**: Configuration Template Cleanup
- Consolidate environment variable patterns
- Create shared configuration validation
- Standardize default value patterns
---
## Implementation Priority Order
### **Week 1: High Impact, Low Risk**
- [ ] Phase 1.1-1.2: Consolidate remaining root and backup scripts
- [ ] Phase 2.1: Complete worldserver YAML anchor consolidation
- [ ] Validate: All major scripts and Docker profiles work
### **Week 2: Complete Core Consolidation**
- [ ] Phase 1.3-1.4: Consolidate database and module scripts
- [ ] Phase 2.2-2.3: Complete database service and authserver consolidation
- [ ] Validate: Full deployment pipeline works end-to-end
### **Week 3: Utility Libraries**
- [ ] Phase 3.1: Create and implement MySQL utility library
- [ ] Phase 3.2: Create and implement Docker utility library
- [ ] Validate: Scripts using new libraries function correctly
### **Week 4: Polish and Standardization**
- [ ] Phase 3.3: Complete environment utility library
- [ ] Phase 4.1-4.2: Standardize error handling
- [ ] Phase 5.1-5.2: Final cleanup of colors and configs
- [ ] Validate: Complete system testing
---
## Validation Framework
### **Incremental Testing**
Each phase must pass these tests before proceeding:
**Script Functionality Tests:**
```bash
# Master scripts
./deploy.sh --help && ./build.sh --help && ./cleanup.sh --help
# Docker compose validation
docker compose config --quiet
# Profile validation
for profile in services-standard services-playerbots services-modules; do
docker compose --profile $profile config --quiet
done
```
**Integration Tests:**
```bash
# End-to-end validation (non-destructive)
./deploy.sh --profile services-standard --dry-run --no-watch
./scripts/bash/verify-deployment.sh --profile services-standard
```
**Regression Prevention:**
- Git commit after each completed phase
- Tag successful consolidations
- Maintain rollback procedures
---
## Risk Mitigation
### **Container Script Dependencies**
- **High Risk**: Scripts mounted into containers (db-guard.sh, backup-scheduler.sh)
- **Mitigation**: Test container mounting before consolidating
- **Validation**: Verify scripts work inside container environment
### **Remote Deployment Impact**
- **Medium Risk**: SSH deployment scripts (migrate-stack.sh)
- **Mitigation**: Test remote deployment on non-production host
- **Validation**: Verify remote script sourcing works correctly
### **Docker Compose Version Compatibility**
- **Medium Risk**: Advanced YAML anchors may not work on older versions
- **Mitigation**: Add version detection and warnings
- **Validation**: Test on minimum supported Docker Compose version
---
## Success Metrics
### **Quantitative Goals**
- Reduce duplicate logging functions from 14 → 0 scripts
- Reduce Docker compose file from ~1000 → ~600 lines
- Reduce color definitions from 71 → 1 centralized location
- Consolidate MySQL connection patterns from 22 → 1 library
### **Qualitative Goals**
- Single source of truth for common functionality
- Consistent user experience across all scripts
- Maintainable and extensible architecture
- Clear dependency relationships
- Robust error handling and validation
### **Completion Criteria**
- [ ] All scripts source centralized libraries exclusively
- [ ] No duplicate function definitions remain
- [ ] Docker compose uses YAML anchors for all repeated patterns
- [ ] Comprehensive test suite validates all functionality
- [ ] Documentation updated to reflect new architecture

View File

@@ -4,7 +4,7 @@
# AzerothCore RealmMaster # AzerothCore RealmMaster
A complete containerized deployment of AzerothCore WoW 3.3.5a (Wrath of the Lich King) private server with 93+ enhanced modules and intelligent automation. A complete containerized deployment of AzerothCore WoW 3.3.5a (Wrath of the Lich King) private server with **hundreds** of supported modules and intelligent automations to allow for easy setup, deployment and management.
## Table of Contents ## Table of Contents
@@ -23,10 +23,10 @@ A complete containerized deployment of AzerothCore WoW 3.3.5a (Wrath of the Lich
## Quick Start ## Quick Start
### Prerequisites ### Reccomendations
- **Docker** with Docker Compose - **Docker** with Docker Compose 2
- **16GB+ RAM** and **32GB+ storage** - **16GB+ RAM** and **64GB+ storage**
- **Linux/macOS/WSL2** (Windows with WSL2 recommended) - **Linux/macOS/WSL2** Fully tested with Ubuntu 24.04 and Debian 12
### Three Simple Steps ### Three Simple Steps
@@ -50,17 +50,15 @@ See [Getting Started](#getting-started) for detailed walkthrough.
## What You Get ## What You Get
### ✅ Core Server Components ### ✅ Core Server Components
- **AzerothCore 3.3.5a** - WotLK server application with 93+ enhanced modules - **AzerothCore 3.3.5a** - WotLK server application with 348 modules in the manifest (221 currently supported)
- **MySQL 8.0** - Database with intelligent initialization and restoration - **MySQL 8.0** - Database with intelligent initialization and restoration
- **Smart Module System** - Automated module management and source builds - **Smart Module System** - Automated module management and source builds
- **phpMyAdmin** - Web-based database administration - **phpMyAdmin** - Web-based database administration
- **Keira3** - Game content editor and developer tools - **Keira3** - Game content editor and developer tools
### ✅ Automated Configuration ### ✅ Automated Configuration
- **Intelligent Database Setup** - Smart backup detection, restoration, and conditional schema import - **Intelligent Database Setup** - Smart backup detection, restoration, and conditional schema import (details in [docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md))
- **Restore Safety Checks** - The import job now validates the live MySQL runtime before honoring restore markers so stale tmpfs volumes cant trick automation into skipping a needed restore (see [docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md)) - **Restore-Aware Backups & SQL** - Restore-aware SQL staging and snapshot safety checks keep modules in sync after restores ([docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md))
- **Backup Management** - Automated hourly/daily backups with intelligent restoration
- **Restore-Aware Module SQL** - After a backup restore the ledger snapshot from that backup is synced into shared storage and `stage-modules.sh` recopies every enabled SQL file into `/azerothcore/data/sql/updates/*` so the worldservers built-in updater reapplies anything the database still needs (see [docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md))
- **Module Integration** - Automatic source builds when C++ modules are enabled - **Module Integration** - Automatic source builds when C++ modules are enabled
- **Service Orchestration** - Profile-based deployment (standard/playerbots/modules) - **Service Orchestration** - Profile-based deployment (standard/playerbots/modules)
@@ -79,7 +77,9 @@ For complete local and remote deployment guides, see **[docs/GETTING_STARTED.md]
## Complete Module Catalog ## Complete Module Catalog
Choose from **93+ enhanced modules** spanning automation, quality-of-life improvements, gameplay enhancements, PvP features, and more. All modules are automatically downloaded, configured, and integrated during deployment. Choose from **hundreds of enhanced modules** spanning automation, quality-of-life improvements, gameplay enhancements, PvP features, and more. The manifest contains 348 modules (221 marked supported/active); the default RealmMaster preset enables 33 that are exercised in testing. All modules are automatically downloaded, configured, and integrated during deployment when selected.
Want a shortcut? Use a preset (`RealmMaster`, `suggested-modules`, `playerbots-suggested-modules`, `azerothcore-vanilla`, `playerbots-only`, `all-modules`) from `config/module-profiles/`—see [docs/GETTING_STARTED.md#module-presets](docs/GETTING_STARTED.md#module-presets).
**Popular Categories:** **Popular Categories:**
- **Automation** - Playerbots, AI chat, level management - **Automation** - Playerbots, AI chat, level management
@@ -93,23 +93,13 @@ Browse the complete catalog with descriptions at **[docs/MODULES.md](docs/MODULE
## Custom NPCs Guide ## Custom NPCs Guide
The server includes **14 custom NPCs** providing enhanced functionality including profession training, enchantments, arena services, and more. All NPCs are spawnable through GM commands and designed for permanent placement. The server includes **14 custom NPCs** spanning services, buffs, PvP, and guild support. Full spawn commands, coordinates, and functions are in **[docs/NPCS.md](docs/NPCS.md)**.
**Available NPCs:**
- **Service NPCs** - Profession training, reagent banking, instance resets
- **Enhancement NPCs** - Enchanting, buffing, pet management, transmog
- **PvP NPCs** - 1v1 arena battlemaster
- **Guild House NPCs** - Property management and services
For complete spawn commands, coordinates, and functionality details, see **[docs/NPCS.md](docs/NPCS.md)**.
--- ---
## Management & Operations ## Management & Operations
For common workflows, management commands, and database operations, see **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**. For common workflows, management commands, and database operations, see **[docs/GETTING_STARTED.md](docs/GETTING_STARTED.md)**. For script details (including module manifest auto-sync), see **[docs/SCRIPTS.md](docs/SCRIPTS.md)**.
- Keep the module catalog current with `scripts/python/update_module_manifest.py` or trigger the scheduled **Sync Module Manifest** GitHub Action to auto-open a PR with the latest AzerothCore topic repos.
--- ---
@@ -149,10 +139,8 @@ This project builds upon:
-**Comprehensive Documentation** - Clear setup and troubleshooting guides -**Comprehensive Documentation** - Clear setup and troubleshooting guides
### Next Steps After Installation ### Next Steps After Installation
**Essential First Steps:**
1. **Create admin account**: `docker attach ac-worldserver``account create admin password``account set gmlevel admin 3 -1`
2. **Test your setup**: Connect with WoW 3.3.5a client using `set realmlist 127.0.0.1`
3. **Access web tools**: phpMyAdmin (port 8081) and Keira3 (port 4201)
**For detailed server administration, monitoring, backup configuration, and performance tuning, see [docs/GETTING_STARTED.md](docs/GETTING_STARTED.md).** **For detailed server administration, monitoring, backup configuration, and performance tuning, see [docs/GETTING_STARTED.md](docs/GETTING_STARTED.md).**
- **Create admin account** - Attach to worldserver and create a GM user (commands in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**).
- **Point your client** - Update `realmlist.wtf` to your host/ports (defaults in the same section above).
- **Open services** - phpMyAdmin and Keira3 URLs/ports are listed in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**.

View File

@@ -9,6 +9,13 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_PATH="$ROOT_DIR/.env" ENV_PATH="$ROOT_DIR/.env"
TEMPLATE_PATH="$ROOT_DIR/.env.template" TEMPLATE_PATH="$ROOT_DIR/.env.template"
# Source common library with proper error handling
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
echo "This library is required for build.sh to function." >&2
exit 1
fi
source "$ROOT_DIR/scripts/bash/project_name.sh" source "$ROOT_DIR/scripts/bash/project_name.sh"
# Default project name (read from .env or template) # Default project name (read from .env or template)
@@ -17,11 +24,7 @@ ASSUME_YES=0
FORCE_REBUILD=0 FORCE_REBUILD=0
SKIP_SOURCE_SETUP=0 SKIP_SOURCE_SETUP=0
CUSTOM_SOURCE_PATH="" CUSTOM_SOURCE_PATH=""
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m' # Color definitions and logging functions now provided by lib/common.sh
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; }
show_build_header(){ show_build_header(){
printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}" printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}"
@@ -137,11 +140,18 @@ generate_module_state(){
# Check if blocked modules were detected in warnings # Check if blocked modules were detected in warnings
if echo "$validation_output" | grep -q "is blocked:"; then if echo "$validation_output" | grep -q "is blocked:"; then
# Gather blocked module keys for display
local blocked_modules
blocked_modules=$(echo "$validation_output" | grep -oE 'MODULE_[A-Za-z0-9_]+' | sort -u | tr '\n' ' ')
# Blocked modules detected - show warning and ask for confirmation # Blocked modules detected - show warning and ask for confirmation
echo echo
warn "════════════════════════════════════════════════════════════════" warn "════════════════════════════════════════════════════════════════"
warn "⚠️ BLOCKED MODULES DETECTED ⚠️" warn "⚠️ BLOCKED MODULES DETECTED ⚠️"
warn "════════════════════════════════════════════════════════════════" warn "════════════════════════════════════════════════════════════════"
if [ -n "$blocked_modules" ]; then
warn "Affected modules: ${blocked_modules}"
fi
warn "Some enabled modules are marked as blocked due to compatibility" warn "Some enabled modules are marked as blocked due to compatibility"
warn "issues. These modules will be SKIPPED during the build process." warn "issues. These modules will be SKIPPED during the build process."
warn "" warn ""

View File

@@ -7,6 +7,12 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$SCRIPT_DIR" PROJECT_ROOT="$SCRIPT_DIR"
cd "$PROJECT_ROOT" cd "$PROJECT_ROOT"
# Source common library for standardized logging
if ! source "$SCRIPT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $SCRIPT_DIR/scripts/bash/lib/common.sh" >&2
exit 1
fi
# Load environment configuration (available on deployed servers) # Load environment configuration (available on deployed servers)
if [ -f ".env" ]; then if [ -f ".env" ]; then
set -a set -a
@@ -20,11 +26,10 @@ OUTPUT_DIR="${CHANGELOG_OUTPUT_DIR:-./changelogs}"
DAYS_BACK="${CHANGELOG_DAYS_BACK:-7}" DAYS_BACK="${CHANGELOG_DAYS_BACK:-7}"
FORMAT="${CHANGELOG_FORMAT:-markdown}" FORMAT="${CHANGELOG_FORMAT:-markdown}"
# Colors for output # Specialized logging with timestamp for changelog context
GREEN='\033[0;32m'; BLUE='\033[0;34m'; YELLOW='\033[1;33m'; NC='\033[0m' log() { info "[$(date '+%H:%M:%S')] $*"; }
log() { echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $*" >&2; } success() { ok "$*"; }
success() { echo -e "${GREEN}${NC} $*" >&2; } # warn() function already provided by lib/common.sh
warn() { echo -e "${YELLOW}⚠️${NC} $*" >&2; }
usage() { usage() {
cat <<EOF cat <<EOF

View File

@@ -14,6 +14,13 @@ PROJECT_DIR="${SCRIPT_DIR}"
DEFAULT_COMPOSE_FILE="${PROJECT_DIR}/docker-compose.yml" DEFAULT_COMPOSE_FILE="${PROJECT_DIR}/docker-compose.yml"
ENV_FILE="${PROJECT_DIR}/.env" ENV_FILE="${PROJECT_DIR}/.env"
TEMPLATE_FILE="${PROJECT_DIR}/.env.template" TEMPLATE_FILE="${PROJECT_DIR}/.env.template"
# Source common library with proper error handling
if ! source "${PROJECT_DIR}/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load ${PROJECT_DIR}/scripts/bash/lib/common.sh" >&2
echo "This library is required for cleanup.sh to function." >&2
exit 1
fi
source "${PROJECT_DIR}/scripts/bash/project_name.sh" source "${PROJECT_DIR}/scripts/bash/project_name.sh"
# Default project name (read from .env or template) # Default project name (read from .env or template)
@@ -21,17 +28,16 @@ DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
source "${PROJECT_DIR}/scripts/bash/compose_overrides.sh" source "${PROJECT_DIR}/scripts/bash/compose_overrides.sh"
declare -a COMPOSE_FILE_ARGS=() declare -a COMPOSE_FILE_ARGS=()
# Colors # Color definitions now provided by lib/common.sh
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BLUE='\033[0;34m'; MAGENTA='\033[0;35m'; NC='\033[0m' # Legacy print_status function for cleanup.sh compatibility
print_status() { print_status() {
case "$1" in case "$1" in
INFO) echo -e "${BLUE} ${2}${NC}";; INFO) info "${2}";;
SUCCESS) echo -e "${GREEN}${2}${NC}";; SUCCESS) ok "${2}";;
WARNING) echo -e "${YELLOW}⚠️ ${2}${NC}";; WARNING) warn "${2}";;
ERROR) echo -e "${RED}${2}${NC}";; ERROR) err "${2}";;
DANGER) echo -e "${RED}💀 ${2}${NC}";; DANGER) printf '%b\n' "${RED}💀 ${2}${NC}";;
HEADER) echo -e "\n${MAGENTA}=== ${2} ===${NC}";; HEADER) printf '\n%b\n' "${CYAN}=== ${2} ===${NC}";;
esac esac
} }

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@
"MODULE_ACCOUNT_ACHIEVEMENTS", "MODULE_ACCOUNT_ACHIEVEMENTS",
"MODULE_AUTO_REVIVE", "MODULE_AUTO_REVIVE",
"MODULE_GAIN_HONOR_GUARD", "MODULE_GAIN_HONOR_GUARD",
"MODULE_ELUNA",
"MODULE_TIME_IS_TIME", "MODULE_TIME_IS_TIME",
"MODULE_RANDOM_ENCHANTS", "MODULE_RANDOM_ENCHANTS",
"MODULE_SOLOCRAFT", "MODULE_SOLOCRAFT",
@@ -24,6 +23,7 @@
"MODULE_REAGENT_BANK", "MODULE_REAGENT_BANK",
"MODULE_BLACK_MARKET_AUCTION_HOUSE", "MODULE_BLACK_MARKET_AUCTION_HOUSE",
"MODULE_ELUNA_TS", "MODULE_ELUNA_TS",
"MODULE_ELUNA",
"MODULE_AIO", "MODULE_AIO",
"MODULE_ELUNA_SCRIPTS", "MODULE_ELUNA_SCRIPTS",
"MODULE_EVENT_SCRIPTS", "MODULE_EVENT_SCRIPTS",
@@ -34,7 +34,7 @@
"MODULE_ITEM_LEVEL_UP", "MODULE_ITEM_LEVEL_UP",
"MODULE_GLOBAL_CHAT" "MODULE_GLOBAL_CHAT"
], ],
"label": "\ud83e\udde9 Sam", "label": "\ud83e\udde9 RealmMaster",
"description": "Sam's playerbot-centric preset (use high bot counts)", "description": "RealmMaster suggested build (33 enabled modules)",
"order": 7 "order": 0
} }

View File

@@ -342,6 +342,6 @@
"MODULE_CLASSIC_MODE" "MODULE_CLASSIC_MODE"
], ],
"label": "\ud83e\udde9 All Modules", "label": "\ud83e\udde9 All Modules",
"description": "Enable every optional module in the repository", "description": "Enable every optional module in the repository - NOT RECOMMENDED",
"order": 5 "order": 7
} }

View File

@@ -0,0 +1,8 @@
{
"modules": [
],
"label": "\u2b50 AzerothCore Main - Mod Free",
"description": "Pure AzerothCore with no optional modules enabled",
"order": 3
}

View File

@@ -6,5 +6,5 @@
], ],
"label": "\ud83e\udde9 Playerbots Only", "label": "\ud83e\udde9 Playerbots Only",
"description": "Minimal preset that only enables playerbot prerequisites", "description": "Minimal preset that only enables playerbot prerequisites",
"order": 6 "order": 4
} }

View File

@@ -7,9 +7,12 @@
"MODULE_TRANSMOG", "MODULE_TRANSMOG",
"MODULE_NPC_BUFFER", "MODULE_NPC_BUFFER",
"MODULE_LEARN_SPELLS", "MODULE_LEARN_SPELLS",
"MODULE_FIREWORKS" "MODULE_FIREWORKS",
"MODULE_ELUNA_TS",
"MODULE_ELUNA",
"MODULE_AIO"
], ],
"label": "\ud83e\udd16 Playerbots + Suggested modules", "label": "\ud83e\udd16 Playerbots + Suggested modules",
"description": "Suggested stack plus playerbots enabled", "description": "Suggested stack plus playerbots enabled",
"order": 2 "order": 1
} }

View File

@@ -1,6 +1,8 @@
{ {
"modules": [ "modules": [
"MODULE_ELUNA_TS",
"MODULE_ELUNA", "MODULE_ELUNA",
"MODULE_AIO",
"MODULE_SOLO_LFG", "MODULE_SOLO_LFG",
"MODULE_SOLOCRAFT", "MODULE_SOLOCRAFT",
"MODULE_AUTOBALANCE", "MODULE_AUTOBALANCE",
@@ -10,6 +12,6 @@
"MODULE_FIREWORKS" "MODULE_FIREWORKS"
], ],
"label": "\u2b50 Suggested Modules", "label": "\u2b50 Suggested Modules",
"description": "Baseline solo-friendly quality of life mix", "description": "Baseline solo-friendly quality of life mix (no playerbots)",
"order": 1 "order": 2
} }

136
deploy.sh
View File

@@ -12,6 +12,13 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml" DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
ENV_PATH="$ROOT_DIR/.env" ENV_PATH="$ROOT_DIR/.env"
TEMPLATE_PATH="$ROOT_DIR/.env.template" TEMPLATE_PATH="$ROOT_DIR/.env.template"
# Source common library with proper error handling
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
echo "This library is required for deploy.sh to function." >&2
exit 1
fi
source "$ROOT_DIR/scripts/bash/project_name.sh" source "$ROOT_DIR/scripts/bash/project_name.sh"
# Default project name (read from .env or template) # Default project name (read from .env or template)
@@ -34,18 +41,19 @@ REMOTE_SKIP_STORAGE=0
REMOTE_COPY_SOURCE=0 REMOTE_COPY_SOURCE=0
REMOTE_ARGS_PROVIDED=0 REMOTE_ARGS_PROVIDED=0
REMOTE_AUTO_DEPLOY=0 REMOTE_AUTO_DEPLOY=0
REMOTE_AUTO_DEPLOY=0 REMOTE_CLEAN_CONTAINERS=0
REMOTE_STORAGE_OVERRIDE=""
REMOTE_CONTAINER_USER_OVERRIDE=""
REMOTE_ENV_FILE=""
REMOTE_SKIP_ENV=0
REMOTE_PRESERVE_CONTAINERS=0
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py" MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
MODULE_STATE_INITIALIZED=0 MODULE_STATE_INITIALIZED=0
declare -a MODULES_COMPILE_LIST=() declare -a MODULES_COMPILE_LIST=()
declare -a COMPOSE_FILE_ARGS=() declare -a COMPOSE_FILE_ARGS=()
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m' # Color definitions and logging functions now provided by lib/common.sh
info(){ printf '%b\n' "${BLUE} $*${NC}"; }
ok(){ printf '%b\n' "${GREEN}$*${NC}"; }
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
err(){ printf '%b\n' "${RED}$*${NC}"; }
show_deployment_header(){ show_deployment_header(){
printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}" printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}"
@@ -164,6 +172,43 @@ collect_remote_details(){
*) REMOTE_SKIP_STORAGE=0 ;; *) REMOTE_SKIP_STORAGE=0 ;;
esac esac
fi fi
if [ "$interactive" -eq 1 ] && [ "$REMOTE_ARGS_PROVIDED" -eq 0 ]; then
local cleanup_answer
read -rp "Stop/remove remote containers & project images during migration? [y/N]: " cleanup_answer
cleanup_answer="${cleanup_answer:-n}"
case "${cleanup_answer,,}" in
y|yes) REMOTE_CLEAN_CONTAINERS=1 ;;
*)
REMOTE_CLEAN_CONTAINERS=0
# Offer explicit preservation when declining cleanup
local preserve_answer
read -rp "Preserve remote containers/images (skip cleanup)? [Y/n]: " preserve_answer
preserve_answer="${preserve_answer:-Y}"
case "${preserve_answer,,}" in
n|no) REMOTE_PRESERVE_CONTAINERS=0 ;;
*) REMOTE_PRESERVE_CONTAINERS=1 ;;
esac
;;
esac
fi
# Optional remote env overrides (default to current values)
local storage_default container_user_default
storage_default="$(read_env STORAGE_PATH "./storage")"
container_user_default="$(read_env CONTAINER_USER "$(id -u):$(id -g)")"
if [ -z "$REMOTE_STORAGE_OVERRIDE" ] && [ "$interactive" -eq 1 ]; then
local storage_input
read -rp "Remote storage path (STORAGE_PATH) [${storage_default}]: " storage_input
REMOTE_STORAGE_OVERRIDE="${storage_input:-$storage_default}"
fi
if [ -z "$REMOTE_CONTAINER_USER_OVERRIDE" ] && [ "$interactive" -eq 1 ]; then
local cu_input
read -rp "Remote container user (CONTAINER_USER) [${container_user_default}]: " cu_input
REMOTE_CONTAINER_USER_OVERRIDE="${cu_input:-$container_user_default}"
fi
} }
validate_remote_configuration(){ validate_remote_configuration(){
@@ -220,6 +265,11 @@ Options:
--remote-skip-storage Skip syncing the storage directory during migration --remote-skip-storage Skip syncing the storage directory during migration
--remote-copy-source Copy the local project directory to remote instead of relying on git --remote-copy-source Copy the local project directory to remote instead of relying on git
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration --remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
--remote-clean-containers Stop/remove remote containers & project images during migration
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
--remote-container-user USER[:GROUP] Override CONTAINER_USER in the remote .env
--remote-skip-env Do not upload .env to the remote host
--remote-preserve-containers Skip stopping/removing remote containers during migration
--skip-config Skip applying server configuration preset --skip-config Skip applying server configuration preset
-h, --help Show this help -h, --help Show this help
@@ -248,12 +298,22 @@ while [[ $# -gt 0 ]]; do
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;; --remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;; --remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;; --remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-clean-containers) REMOTE_CLEAN_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
--remote-container-user) REMOTE_CONTAINER_USER_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
--remote-skip-env) REMOTE_SKIP_ENV=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--remote-preserve-containers) REMOTE_PRESERVE_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
--skip-config) SKIP_CONFIG=1; shift;; --skip-config) SKIP_CONFIG=1; shift;;
-h|--help) usage; exit 0;; -h|--help) usage; exit 0;;
*) err "Unknown option: $1"; usage; exit 1;; *) err "Unknown option: $1"; usage; exit 1;;
esac esac
done done
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ] && [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
err "Cannot combine --remote-clean-containers with --remote-preserve-containers."
exit 1
fi
require_cmd(){ require_cmd(){
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; } command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
} }
@@ -515,6 +575,27 @@ prompt_build_if_needed(){
local build_reasons_output local build_reasons_output
build_reasons_output=$(detect_build_needed) build_reasons_output=$(detect_build_needed)
if [ -z "$build_reasons_output" ]; then
# Belt-and-suspenders: if C++ modules are enabled but module images missing, warn
ensure_module_state
if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
local authserver_modules_image
local worldserver_modules_image
authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")"
worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
local missing_images=()
if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then
missing_images+=("$authserver_modules_image")
fi
if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then
missing_images+=("$worldserver_modules_image")
fi
if [ ${#missing_images[@]} -gt 0 ]; then
build_reasons_output=$(printf "C++ modules enabled but module images missing: %s\n" "${missing_images[*]}")
fi
fi
fi
if [ -z "$build_reasons_output" ]; then if [ -z "$build_reasons_output" ]; then
return 0 # No build needed return 0 # No build needed
fi fi
@@ -607,6 +688,33 @@ determine_profile(){
} }
run_remote_migration(){ run_remote_migration(){
if [ -z "$REMOTE_ENV_FILE" ] && { [ -n "$REMOTE_STORAGE_OVERRIDE" ] || [ -n "$REMOTE_CONTAINER_USER_OVERRIDE" ]; }; then
local base_env=""
if [ -f "$ENV_PATH" ]; then
base_env="$ENV_PATH"
elif [ -f "$TEMPLATE_PATH" ]; then
base_env="$TEMPLATE_PATH"
fi
REMOTE_ENV_FILE="$(mktemp)"
if [ -n "$base_env" ]; then
cp "$base_env" "$REMOTE_ENV_FILE"
else
: > "$REMOTE_ENV_FILE"
fi
if [ -n "$REMOTE_STORAGE_OVERRIDE" ]; then
{
echo
echo "STORAGE_PATH=$REMOTE_STORAGE_OVERRIDE"
} >>"$REMOTE_ENV_FILE"
fi
if [ -n "$REMOTE_CONTAINER_USER_OVERRIDE" ]; then
{
echo
echo "CONTAINER_USER=$REMOTE_CONTAINER_USER_OVERRIDE"
} >>"$REMOTE_ENV_FILE"
fi
fi
local args=(--host "$REMOTE_HOST" --user "$REMOTE_USER") local args=(--host "$REMOTE_HOST" --user "$REMOTE_USER")
if [ -n "$REMOTE_PORT" ] && [ "$REMOTE_PORT" != "22" ]; then if [ -n "$REMOTE_PORT" ] && [ "$REMOTE_PORT" != "22" ]; then
@@ -629,10 +737,26 @@ run_remote_migration(){
args+=(--copy-source) args+=(--copy-source)
fi fi
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ]; then
args+=(--clean-containers)
fi
if [ "$ASSUME_YES" -eq 1 ]; then if [ "$ASSUME_YES" -eq 1 ]; then
args+=(--yes) args+=(--yes)
fi fi
if [ "$REMOTE_SKIP_ENV" -eq 1 ]; then
args+=(--skip-env)
fi
if [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
args+=(--preserve-containers)
fi
if [ -n "$REMOTE_ENV_FILE" ]; then
args+=(--env-file "$REMOTE_ENV_FILE")
fi
(cd "$ROOT_DIR" && ./scripts/bash/migrate-stack.sh "${args[@]}") (cd "$ROOT_DIR" && ./scripts/bash/migrate-stack.sh "${args[@]}")
} }

View File

@@ -1,4 +1,110 @@
name: ${COMPOSE_PROJECT_NAME} name: ${COMPOSE_PROJECT_NAME}
# =============================================================================
# YAML ANCHORS - Shared Configuration Templates
# =============================================================================
x-logging: &logging-default
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Common database connection environment variables
x-database-config: &database-config
CONTAINER_MYSQL: ${CONTAINER_MYSQL}
MYSQL_PORT: ${MYSQL_PORT}
MYSQL_USER: ${MYSQL_USER}
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
DB_AUTH_NAME: ${DB_AUTH_NAME}
DB_WORLD_NAME: ${DB_WORLD_NAME}
DB_CHARACTERS_NAME: ${DB_CHARACTERS_NAME}
DB_RECONNECT_SECONDS: ${DB_RECONNECT_SECONDS}
DB_RECONNECT_ATTEMPTS: ${DB_RECONNECT_ATTEMPTS}
# AzerothCore database connection strings
x-azerothcore-databases: &azerothcore-databases
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
# Common storage volume mounts
x-storage-volumes: &storage-volumes
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
# Authserver common configuration
x-authserver-common: &authserver-common
user: "${CONTAINER_USER}"
environment:
<<: *azerothcore-databases
AC_UPDATES_ENABLE_DATABASES: "0"
AC_BIND_IP: "0.0.0.0"
AC_LOG_LEVEL: "1"
AC_LOGGER_ROOT_CONFIG: "1,Console"
AC_LOGGER_SERVER_CONFIG: "1,Console"
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
volumes: *storage-volumes
ports:
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
restart: unless-stopped
logging: *logging-default
networks:
- azerothcore
cap_add: ["SYS_NICE"]
healthcheck: &auth-healthcheck
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
interval: ${AUTH_HEALTHCHECK_INTERVAL}
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
retries: ${AUTH_HEALTHCHECK_RETRIES}
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
# Worldserver common configuration
x-worldserver-common: &worldserver-common
user: "${CONTAINER_USER}"
stdin_open: true
tty: true
environment:
<<: *azerothcore-databases
AC_UPDATES_ENABLE_DATABASES: "7"
AC_BIND_IP: "0.0.0.0"
AC_DATA_DIR: "/azerothcore/data"
AC_SOAP_PORT: "${SOAP_PORT}"
AC_PROCESS_PRIORITY: "0"
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
AC_ELUNA_AUTO_RELOAD: "${AC_ELUNA_AUTO_RELOAD}"
AC_ELUNA_BYTECODE_CACHE: "${AC_ELUNA_BYTECODE_CACHE}"
AC_ELUNA_SCRIPT_PATH: "${AC_ELUNA_SCRIPT_PATH}"
AC_ELUNA_REQUIRE_PATHS: "${AC_ELUNA_REQUIRE_PATHS}"
AC_ELUNA_REQUIRE_CPATHS: "${AC_ELUNA_REQUIRE_CPATHS}"
AC_ELUNA_AUTO_RELOAD_INTERVAL: "${AC_ELUNA_AUTO_RELOAD_INTERVAL}"
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
AC_LOG_LEVEL: "2"
ports:
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/azerothcore/modules
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/azerothcore/lua_scripts
restart: unless-stopped
logging: *logging-default
networks:
- azerothcore
cap_add: ["SYS_NICE"]
healthcheck: &world-healthcheck
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
interval: ${WORLD_HEALTHCHECK_INTERVAL}
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
retries: ${WORLD_HEALTHCHECK_RETRIES}
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
services: services:
# ===================== # =====================
# Database Layer (db) # Database Layer (db)
@@ -26,7 +132,7 @@ services:
- mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro - ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro
- ${MYSQL_CONFIG_DIR:-${STORAGE_PATH}/config/mysql/conf.d}:/etc/mysql/conf.d - ${MYSQL_CONFIG_DIR:-${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}/mysql/conf.d}:/etc/mysql/conf.d
tmpfs: tmpfs:
- /var/lib/mysql-runtime:size=${MYSQL_RUNTIME_TMPFS_SIZE} - /var/lib/mysql-runtime:size=${MYSQL_RUNTIME_TMPFS_SIZE}
command: command:
@@ -40,7 +146,7 @@ services:
- --innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE} - --innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE}
- --innodb-redo-log-capacity=${MYSQL_INNODB_REDO_LOG_CAPACITY} - --innodb-redo-log-capacity=${MYSQL_INNODB_REDO_LOG_CAPACITY}
restart: unless-stopped restart: unless-stopped
logging: logging: *logging-default
healthcheck: healthcheck:
test: ["CMD", "sh", "-c", "mysqladmin ping -h localhost -u ${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} --silent || exit 1"] test: ["CMD", "sh", "-c", "mysqladmin ping -h localhost -u ${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} --silent || exit 1"]
interval: ${MYSQL_HEALTHCHECK_INTERVAL} interval: ${MYSQL_HEALTHCHECK_INTERVAL}
@@ -64,14 +170,16 @@ services:
networks: networks:
- azerothcore - azerothcore
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql - ${AC_SQL_SOURCE_PATH:-${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}}:/modules-sql
- mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${STORAGE_PATH}/modules:/modules - ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro - ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro - ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
environment: environment:
AC_DATA_DIR: "/azerothcore/data" AC_DATA_DIR: "/azerothcore/data"
@@ -128,14 +236,16 @@ services:
networks: networks:
- azerothcore - azerothcore
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
- ${MODULE_SQL_STAGE_PATH:-${STORAGE_PATH}/module-sql-updates}:/modules-sql - ${AC_SQL_SOURCE_PATH:-${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}}:/modules-sql
- mysql-data:/var/lib/mysql-persistent - mysql-data:/var/lib/mysql-persistent
- ${STORAGE_PATH}/modules:/modules - ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro - ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
- ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro - ./scripts/bash/restore-and-stage.sh:/tmp/restore-and-stage.sh:ro
- ./scripts/bash/db-guard.sh:/tmp/db-guard.sh:ro - ./scripts/bash/db-guard.sh:/tmp/db-guard.sh:ro
environment: environment:
@@ -258,7 +368,7 @@ services:
CONTAINER_USER: ${CONTAINER_USER} CONTAINER_USER: ${CONTAINER_USER}
volumes: volumes:
- ${BACKUP_PATH}:/backups - ${BACKUP_PATH}:/backups
- ${STORAGE_PATH}/modules/.modules-meta:/modules-meta:ro - ${STORAGE_MODULES_META_PATH:-${STORAGE_PATH}/modules/.modules-meta}:/modules-meta:ro
- ./scripts:/tmp/scripts:ro - ./scripts:/tmp/scripts:ro
working_dir: /tmp working_dir: /tmp
command: command:
@@ -325,9 +435,9 @@ services:
profiles: ["client-data", "client-data-bots"] profiles: ["client-data", "client-data-bots"]
image: ${ALPINE_IMAGE} image: ${ALPINE_IMAGE}
container_name: ac-volume-init container_name: ac-volume-init
user: "${CONTAINER_USER}" user: "0:0"
volumes: volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data - ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
- client-data-cache:/cache - client-data-cache:/cache
command: command:
- sh - sh
@@ -351,10 +461,20 @@ services:
profiles: ["db", "modules"] profiles: ["db", "modules"]
image: ${ALPINE_IMAGE} image: ${ALPINE_IMAGE}
container_name: ac-storage-init container_name: ac-storage-init
user: "${CONTAINER_USER}" user: "0:0"
volumes: volumes:
- ${STORAGE_PATH}:/storage-root - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/storage-root/config
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/storage-root/logs
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/storage-root/modules
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/storage-root/lua_scripts
- ${STORAGE_INSTALL_MARKERS_PATH:-${STORAGE_PATH}/install-markers}:/storage-root/install-markers
- ${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}:/storage-root/module-sql-updates
- ${STORAGE_MODULES_META_PATH:-${STORAGE_PATH}/modules/.modules-meta}:/storage-root/modules/.modules-meta
- ${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/storage-root/client-data
- ${BACKUP_PATH}:/storage-root/backups
- ${STORAGE_PATH_LOCAL}:/local-storage-root - ${STORAGE_PATH_LOCAL}:/local-storage-root
- ${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}:/local-storage-root/source
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
command: command:
- sh - sh
- -c - -c
@@ -362,13 +482,51 @@ services:
echo "🔧 Initializing storage directories with proper permissions..." echo "🔧 Initializing storage directories with proper permissions..."
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
mkdir -p /storage-root/config/mysql/conf.d mkdir -p /storage-root/config/mysql/conf.d
mkdir -p /storage-root/module-sql-updates /storage-root/modules/.modules-meta
mkdir -p /storage-root/client-data mkdir -p /storage-root/client-data
mkdir -p /storage-root/backups mkdir -p /storage-root/backups
# Copy core config files if they don't exist
if [ -f "/local-storage-root/source/azerothcore-playerbots/src/tools/dbimport/dbimport.conf.dist" ] && [ ! -f "/storage-root/config/dbimport.conf.dist" ]; then # Copy core AzerothCore config template files (.dist) to config directory
echo "📄 Copying dbimport.conf.dist..." echo "📄 Copying AzerothCore configuration templates..."
cp /local-storage-root/source/azerothcore-playerbots/src/tools/dbimport/dbimport.conf.dist /storage-root/config/ SOURCE_DIR="${SOURCE_DIR:-/local-storage-root/source/azerothcore-playerbots}"
if [ ! -d "$SOURCE_DIR" ] && [ -d "/local-storage-root/source/azerothcore-wotlk" ]; then
SOURCE_DIR="/local-storage-root/source/azerothcore-wotlk"
fi fi
# Seed dbimport.conf with a shared helper (fallback to a simple copy if missing)
if [ -f "/tmp/seed-dbimport-conf.sh" ]; then
echo "🧩 Seeding dbimport.conf"
DBIMPORT_CONF_DIR="/storage-root/config" \
DBIMPORT_SOURCE_ROOT="$SOURCE_DIR" \
sh -c '. /tmp/seed-dbimport-conf.sh && seed_dbimport_conf' || true
else
if [ -f "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" ]; then
cp -n "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" /storage-root/config/ 2>/dev/null || true
if [ ! -f "/storage-root/config/dbimport.conf" ]; then
cp "$SOURCE_DIR/src/tools/dbimport/dbimport.conf.dist" /storage-root/config/dbimport.conf
echo " ✓ Created dbimport.conf"
fi
fi
fi
# Copy authserver.conf.dist
if [ -f "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" ]; then
cp -n "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" /storage-root/config/ 2>/dev/null || true
if [ ! -f "/storage-root/config/authserver.conf" ]; then
cp "$SOURCE_DIR/env/dist/etc/authserver.conf.dist" /storage-root/config/authserver.conf
echo " ✓ Created authserver.conf"
fi
fi
# Copy worldserver.conf.dist
if [ -f "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" ]; then
cp -n "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" /storage-root/config/ 2>/dev/null || true
if [ ! -f "/storage-root/config/worldserver.conf" ]; then
cp "$SOURCE_DIR/env/dist/etc/worldserver.conf.dist" /storage-root/config/worldserver.conf
echo " ✓ Created worldserver.conf"
fi
fi
mkdir -p /storage-root/config/temp
# Fix ownership of root directories and all contents # Fix ownership of root directories and all contents
if [ "$(id -u)" -eq 0 ]; then if [ "$(id -u)" -eq 0 ]; then
chown -R ${CONTAINER_USER} /storage-root /local-storage-root chown -R ${CONTAINER_USER} /storage-root /local-storage-root
@@ -393,7 +551,7 @@ services:
ac-volume-init: ac-volume-init:
condition: service_completed_successfully condition: service_completed_successfully
volumes: volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data - ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
- client-data-cache:/cache - client-data-cache:/cache
- ./scripts:/tmp/scripts:ro - ./scripts:/tmp/scripts:ro
working_dir: /tmp working_dir: /tmp
@@ -424,7 +582,7 @@ services:
ac-volume-init: ac-volume-init:
condition: service_completed_successfully condition: service_completed_successfully
volumes: volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data - ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
- client-data-cache:/cache - client-data-cache:/cache
- ./scripts:/tmp/scripts:ro - ./scripts:/tmp/scripts:ro
working_dir: /tmp working_dir: /tmp
@@ -456,10 +614,10 @@ services:
# Services - Standard (services-standard) # Services - Standard (services-standard)
# ===================== # =====================
ac-authserver-standard: ac-authserver-standard:
<<: *authserver-common
profiles: ["services-standard"] profiles: ["services-standard"]
image: ${AC_AUTHSERVER_IMAGE} image: ${AC_AUTHSERVER_IMAGE}
container_name: ac-authserver container_name: ac-authserver
user: "${CONTAINER_USER}"
depends_on: depends_on:
ac-mysql: ac-mysql:
condition: service_healthy condition: service_healthy
@@ -467,91 +625,26 @@ services:
condition: service_completed_successfully condition: service_completed_successfully
ac-db-init: ac-db-init:
condition: service_completed_successfully condition: service_completed_successfully
environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_UPDATES_ENABLE_DATABASES: "0"
AC_BIND_IP: "0.0.0.0"
AC_LOG_LEVEL: "1"
AC_LOGGER_ROOT_CONFIG: "1,Console"
AC_LOGGER_SERVER_CONFIG: "1,Console"
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
ports:
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
restart: unless-stopped
logging:
networks:
- azerothcore
volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
cap_add: ["SYS_NICE"]
healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
interval: ${AUTH_HEALTHCHECK_INTERVAL}
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
retries: ${AUTH_HEALTHCHECK_RETRIES}
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
ac-worldserver-standard: ac-worldserver-standard:
<<: *worldserver-common
profiles: ["services-standard"] profiles: ["services-standard"]
image: ${AC_WORLDSERVER_IMAGE} image: ${AC_WORLDSERVER_IMAGE}
container_name: ac-worldserver container_name: ac-worldserver
user: "${CONTAINER_USER}"
stdin_open: true
tty: true
depends_on: depends_on:
ac-authserver-standard: ac-authserver-standard:
condition: service_healthy condition: service_healthy
ac-client-data-standard: ac-client-data-standard:
condition: service_completed_successfully condition: service_completed_successfully
environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
AC_UPDATES_ENABLE_DATABASES: "7"
AC_BIND_IP: "0.0.0.0"
AC_DATA_DIR: "/azerothcore/data"
AC_SOAP_PORT: "7878"
AC_PROCESS_PRIORITY: "0"
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
AC_ELUNA_AUTO_RELOAD: "${AC_ELUNA_AUTO_RELOAD}"
AC_ELUNA_BYTECODE_CACHE: "${AC_ELUNA_BYTECODE_CACHE}"
AC_ELUNA_SCRIPT_PATH: "${AC_ELUNA_SCRIPT_PATH}"
AC_ELUNA_REQUIRE_PATHS: "${AC_ELUNA_REQUIRE_PATHS}"
AC_ELUNA_REQUIRE_CPATHS: "${AC_ELUNA_REQUIRE_CPATHS}"
AC_ELUNA_AUTO_RELOAD_INTERVAL: "${AC_ELUNA_AUTO_RELOAD_INTERVAL}"
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
AC_LOG_LEVEL: "2"
ports:
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs
- ${STORAGE_PATH}/modules:/azerothcore/modules
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
restart: unless-stopped
logging:
networks:
- azerothcore
cap_add: ["SYS_NICE"]
healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
interval: ${WORLD_HEALTHCHECK_INTERVAL}
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
retries: ${WORLD_HEALTHCHECK_RETRIES}
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
# ===================== # =====================
# Services - Playerbots (services-playerbots) # Services - Playerbots (services-playerbots)
# ===================== # =====================
ac-authserver-playerbots: ac-authserver-playerbots:
<<: *authserver-common
profiles: ["services-playerbots"] profiles: ["services-playerbots"]
image: ${AC_AUTHSERVER_IMAGE_PLAYERBOTS} image: ${AC_AUTHSERVER_IMAGE_PLAYERBOTS}
container_name: ac-authserver container_name: ac-authserver
user: "${CONTAINER_USER}"
depends_on: depends_on:
ac-mysql: ac-mysql:
condition: service_healthy condition: service_healthy
@@ -560,7 +653,7 @@ services:
ac-db-init: ac-db-init:
condition: service_completed_successfully condition: service_completed_successfully
environment: environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}" <<: *azerothcore-databases
AC_UPDATES_ENABLE_DATABASES: "0" AC_UPDATES_ENABLE_DATABASES: "0"
AC_BIND_IP: "0.0.0.0" AC_BIND_IP: "0.0.0.0"
TZ: "${TZ}" TZ: "${TZ}"
@@ -568,25 +661,6 @@ services:
AC_LOGGER_ROOT_CONFIG: "1,Console" AC_LOGGER_ROOT_CONFIG: "1,Console"
AC_LOGGER_SERVER_CONFIG: "1,Console" AC_LOGGER_SERVER_CONFIG: "1,Console"
AC_APPENDER_CONSOLE_CONFIG: "1,2,0" AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
ports:
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
restart: unless-stopped
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
networks:
- azerothcore
volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
cap_add: ["SYS_NICE"]
healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
interval: ${AUTH_HEALTHCHECK_INTERVAL}
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
retries: ${AUTH_HEALTHCHECK_RETRIES}
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
ac-authserver-modules: ac-authserver-modules:
profiles: ["services-modules"] profiles: ["services-modules"]
@@ -611,11 +685,11 @@ services:
ports: ports:
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}" - "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
restart: unless-stopped restart: unless-stopped
logging: logging: *logging-default
networks: networks:
- azerothcore - azerothcore
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
cap_add: ["SYS_NICE"] cap_add: ["SYS_NICE"]
healthcheck: healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"] test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
@@ -625,12 +699,10 @@ services:
start_period: ${AUTH_HEALTHCHECK_START_PERIOD} start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
ac-worldserver-playerbots: ac-worldserver-playerbots:
<<: *worldserver-common
profiles: ["services-playerbots"] profiles: ["services-playerbots"]
image: ${AC_WORLDSERVER_IMAGE_PLAYERBOTS} image: ${AC_WORLDSERVER_IMAGE_PLAYERBOTS}
container_name: ac-worldserver container_name: ac-worldserver
user: "${CONTAINER_USER}"
stdin_open: true
tty: true
depends_on: depends_on:
ac-authserver-playerbots: ac-authserver-playerbots:
condition: service_healthy condition: service_healthy
@@ -639,13 +711,11 @@ services:
ac-db-guard: ac-db-guard:
condition: service_healthy condition: service_healthy
environment: environment:
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}" <<: *azerothcore-databases
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
AC_UPDATES_ENABLE_DATABASES: "7" AC_UPDATES_ENABLE_DATABASES: "7"
AC_BIND_IP: "0.0.0.0" AC_BIND_IP: "0.0.0.0"
AC_DATA_DIR: "/azerothcore/data" AC_DATA_DIR: "/azerothcore/data"
AC_SOAP_PORT: "7878" AC_SOAP_PORT: "${SOAP_PORT}"
AC_PROCESS_PRIORITY: "0" AC_PROCESS_PRIORITY: "0"
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}" AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}" AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
@@ -659,26 +729,6 @@ services:
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}" PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}" PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
AC_LOG_LEVEL: "2" AC_LOG_LEVEL: "2"
ports:
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs
- ${STORAGE_PATH}/modules:/azerothcore/modules
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
restart: unless-stopped
logging:
networks:
- azerothcore
cap_add: ["SYS_NICE"]
healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
interval: ${WORLD_HEALTHCHECK_INTERVAL}
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
retries: ${WORLD_HEALTHCHECK_RETRIES}
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
ac-worldserver-modules: ac-worldserver-modules:
profiles: ["services-modules"] profiles: ["services-modules"]
@@ -701,7 +751,7 @@ services:
AC_UPDATES_ENABLE_DATABASES: "7" AC_UPDATES_ENABLE_DATABASES: "7"
AC_BIND_IP: "0.0.0.0" AC_BIND_IP: "0.0.0.0"
AC_DATA_DIR: "/azerothcore/data" AC_DATA_DIR: "/azerothcore/data"
AC_SOAP_PORT: "7878" AC_SOAP_PORT: "${SOAP_PORT}"
AC_PROCESS_PRIORITY: "0" AC_PROCESS_PRIORITY: "0"
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}" AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}" AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
@@ -715,22 +765,19 @@ services:
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}" PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
AC_LOG_LEVEL: "2" AC_LOG_LEVEL: "2"
volumes: volumes:
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data - ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ${STORAGE_PATH}/logs:/azerothcore/logs - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
- ${STORAGE_PATH}/modules:/azerothcore/modules - ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts - ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/azerothcore/modules
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/azerothcore/lua_scripts
networks: networks:
- azerothcore - azerothcore
ports: ports:
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}" - "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}" - "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
restart: unless-stopped restart: unless-stopped
logging: logging: *logging-default
driver: json-file
options:
max-size: "10m"
max-file: "3"
cap_add: ["SYS_NICE"] cap_add: ["SYS_NICE"]
healthcheck: healthcheck:
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"] test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
@@ -757,8 +804,8 @@ services:
ac-storage-init: ac-storage-init:
condition: service_completed_successfully condition: service_completed_successfully
volumes: volumes:
- ${STORAGE_PATH}/modules:/modules - ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
- ./scripts:/tmp/scripts:ro - ./scripts:/tmp/scripts:ro
- ./config:/tmp/config:ro - ./config:/tmp/config:ro
env_file: env_file:
@@ -783,8 +830,8 @@ services:
container_name: ${CONTAINER_POST_INSTALL} container_name: ${CONTAINER_POST_INSTALL}
user: "0:0" user: "0:0"
volumes: volumes:
- ${STORAGE_PATH}/config:/azerothcore/config - ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/config
- ${STORAGE_PATH}/install-markers:/install-markers - ${STORAGE_INSTALL_MARKERS_PATH:-${STORAGE_PATH}/install-markers}:/install-markers
- ./scripts:/tmp/scripts:ro - ./scripts:/tmp/scripts:ro
- /var/run/docker.sock:/var/run/docker.sock:rw - /var/run/docker.sock:/var/run/docker.sock:rw
working_dir: /tmp working_dir: /tmp
@@ -819,8 +866,10 @@ services:
- | - |
apk add --no-cache bash curl docker-cli su-exec apk add --no-cache bash curl docker-cli su-exec
chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true chmod +x /tmp/scripts/bash/auto-post-install.sh 2>/dev/null || true
echo "📥 Running post-install as ${CONTAINER_USER}" echo "📥 Running post-install as root (testing mode)"
su-exec ${CONTAINER_USER} bash /tmp/scripts/bash/auto-post-install.sh mkdir -p /install-markers
chown -R ${CONTAINER_USER} /azerothcore/config /install-markers 2>/dev/null || true
bash /tmp/scripts/bash/auto-post-install.sh
restart: "no" restart: "no"
networks: networks:
- azerothcore - azerothcore
@@ -877,7 +926,7 @@ services:
timeout: 10s timeout: 10s
retries: 3 retries: 3
start_period: 40s start_period: 40s
logging: logging: *logging-default
security_opt: security_opt:
- no-new-privileges:true - no-new-privileges:true
networks: networks:

View File

@@ -3,6 +3,8 @@
**Last Updated:** 2025-11-14 **Last Updated:** 2025-11-14
**Status:** ✅ All blocked modules properly disabled **Status:** ✅ All blocked modules properly disabled
**Note:** This summary is historical. The authoritative block list lives in `config/module-manifest.json` (currently 94 modules marked `status: "blocked"`). This file and `docs/DISABLED_MODULES.md` should be reconciled during the next blocklist refresh.
--- ---
## Summary ## Summary

View File

@@ -4,6 +4,8 @@ This document tracks modules that have been disabled due to compilation errors o
**Last Updated:** 2025-11-14 **Last Updated:** 2025-11-14
**Note:** Historical snapshot. The current authoritative status for disabled/blocked modules is `status: "blocked"` in `config/module-manifest.json` (94 entries as of now). Align this file with the manifest during the next maintenance pass.
--- ---
## Disabled Modules ## Disabled Modules
@@ -111,7 +113,7 @@ These modules are blocked in the manifest with known issues:
## Current Working Module Count ## Current Working Module Count
**Total in Manifest:** ~93 modules **Total in Manifest:** ~93 modules (historical; current manifest: 348 total / 221 supported / 94 blocked)
**Enabled:** 89 modules **Enabled:** 89 modules
**Disabled (Build Issues):** 4 modules **Disabled (Build Issues):** 4 modules
**Blocked (Manifest):** 3 modules **Blocked (Manifest):** 3 modules

View File

@@ -9,7 +9,7 @@ This guide provides a complete walkthrough for deploying AzerothCore RealmMaster
Before you begin, ensure you have: Before you begin, ensure you have:
- **Docker** with Docker Compose - **Docker** with Docker Compose
- **16GB+ RAM** and **32GB+ storage** - **16GB+ RAM** and **64GB+ storage**
- **Linux/macOS/WSL2** (Windows with WSL2 recommended) - **Linux/macOS/WSL2** (Windows with WSL2 recommended)
## Quick Overview ## Quick Overview
@@ -40,7 +40,7 @@ cd AzerothCore-RealmMaster
The setup wizard will guide you through: The setup wizard will guide you through:
- **Server Configuration**: IP address, ports, timezone - **Server Configuration**: IP address, ports, timezone
- **Module Selection**: Choose from 30+ available modules or use presets - **Module Selection**: Choose from hundreds of official modules (348 in manifest; 221 currently supported) or use presets
- **Module Definitions**: Customize defaults in `config/module-manifest.json` and optional presets under `config/module-profiles/` - **Module Definitions**: Customize defaults in `config/module-manifest.json` and optional presets under `config/module-profiles/`
- **Storage Paths**: Configure NFS/local storage locations - **Storage Paths**: Configure NFS/local storage locations
- **Playerbot Settings**: Max bots, account limits (if enabled) - **Playerbot Settings**: Max bots, account limits (if enabled)
@@ -170,6 +170,12 @@ Optional flags:
- `--remote-port 2222` - Custom SSH port - `--remote-port 2222` - Custom SSH port
- `--remote-identity ~/.ssh/custom_key` - Specific SSH key - `--remote-identity ~/.ssh/custom_key` - Specific SSH key
- `--remote-skip-storage` - Don't sync storage directory (fresh install on remote) - `--remote-skip-storage` - Don't sync storage directory (fresh install on remote)
- `--remote-clean-containers` - Stop/remove existing `ac-*` containers and project images during migration
- `--remote-skip-env` - Leave the remote `.env` untouched (won't upload local one)
- `--remote-preserve-containers` - Do not stop/remove existing `ac-*` containers/images during migration
- `--remote-storage-path /mnt/acore-storage` - Override STORAGE_PATH on the remote host (local-storage stays per .env)
- `--remote-container-user 1001:1001` - Override CONTAINER_USER on the remote host (uid:gid)
- Note: do not combine `--remote-clean-containers` with `--remote-preserve-containers`; the flags are mutually exclusive.
### Step 3: Deploy on Remote Host ### Step 3: Deploy on Remote Host
```bash ```bash
@@ -197,8 +203,6 @@ The remote deployment process transfers:
### Module Presets ### Module Presets
> **⚠️ Warning:** Module preset support is still in progress. The bundled presets have not been fully tested yet—please share issues or suggestions via Discord (`uprightbass360`).
- Define JSON presets in `config/module-profiles/*.json`. Each file contains: - Define JSON presets in `config/module-profiles/*.json`. Each file contains:
- `modules` (array, required) list of `MODULE_*` identifiers to enable. - `modules` (array, required) list of `MODULE_*` identifiers to enable.
- `label` (string, optional) text shown in the setup menu (emoji welcome). - `label` (string, optional) text shown in the setup menu (emoji welcome).
@@ -216,11 +220,12 @@ The remote deployment process transfers:
``` ```
- `setup.sh` automatically adds these presets to the module menu and enables the listed modules when selected or when `--module-config <name>` is provided. - `setup.sh` automatically adds these presets to the module menu and enables the listed modules when selected or when `--module-config <name>` is provided.
- Built-in presets: - Built-in presets:
- `config/module-profiles/suggested-modules.json` default solo-friendly QoL stack. - `config/module-profiles/RealmMaster.json` 33-module baseline used for testing.
- `config/module-profiles/playerbots-suggested-modules.json` suggested stack plus playerbots. - `config/module-profiles/suggested-modules.json` light AzerothCore QoL stack without playerbots.
- `config/module-profiles/playerbots-only.json` playerbot-focused profile (adjust `--playerbot-max-bots`). - `config/module-profiles/playerbots-suggested-modules.json` suggested QoL stack plus playerbots.
- Custom example: - `config/module-profiles/azerothcore-vanilla.json` pure AzerothCore (no optional modules).
- `config/module-profiles/sam.json` Sam's playerbot-focused profile (set `--playerbot-max-bots 3000` when using this preset). - `config/module-profiles/playerbots-only.json` playerbot prerequisites only (tune bot counts separately).
- `config/module-profiles/all-modules.json` enable everything currently marked supported/active (not recommended).
- Module metadata lives in `config/module-manifest.json`; update that file if you need to add new modules or change repositories/branches. - Module metadata lives in `config/module-manifest.json`; update that file if you need to add new modules or change repositories/branches.
--- ---

View File

@@ -4,7 +4,7 @@ This document provides a comprehensive overview of all available modules in the
## Overview ## Overview
AzerothCore RealmMaster includes **93 modules** that are automatically downloaded, configured, and SQL scripts executed when enabled. All modules are organized into logical categories for easy browsing and selection. AzerothCore RealmMaster currently ships a manifest of **348 modules** (221 marked supported/active). The default RealmMaster preset enables 33 of these for day-to-day testing. All modules are automatically downloaded, configured, and SQL scripts executed when enabled. Modules are organized into logical categories for easy browsing and selection.
## How Modules Work ## How Modules Work
@@ -233,10 +233,13 @@ This will present a menu for selecting individual modules or choosing from prede
Pre-configured module combinations are available in `config/module-profiles/`: Pre-configured module combinations are available in `config/module-profiles/`:
- **Suggested Modules** - Baseline solo-friendly quality of life mix - `RealmMaster` - 33-module baseline used for day-to-day testing
- **Playerbots Suggested** - Suggested stack plus playerbots - `suggested-modules` - Light AzerothCore QoL stack without playerbots
- **Playerbots Only** - Playerbot-focused profile - `playerbots-suggested-modules` - Suggested QoL stack plus playerbots
- **Custom Profiles** - Additional specialized configurations - `azerothcore-vanilla` - Pure AzerothCore with no optional modules
- `playerbots-only` - Playerbot prerequisites only
- `all-modules` - Everything in the manifest (not recommended)
- Custom profiles - Drop new JSON files to add your own combinations
### Manual Configuration ### Manual Configuration

View File

@@ -6,6 +6,8 @@ This document tracks all modules that have been disabled due to compilation fail
**Total Blocked Modules:** 93 **Total Blocked Modules:** 93
**Note:** Historical snapshot from 2025-11-22 validation. The current authoritative count lives in `config/module-manifest.json` (94 modules marked `status: "blocked"`). Update this file when reconciling the manifest.
--- ---
## Compilation Errors ## Compilation Errors

View File

@@ -3,6 +3,8 @@
**Date:** 2025-11-14 **Date:** 2025-11-14
**Status:** ✅ PRE-DEPLOYMENT TESTS PASSED **Status:** ✅ PRE-DEPLOYMENT TESTS PASSED
**Note:** Historical record for the 2025-11-14 run. Counts here reflect that test set (93 modules). The current manifest contains 348 modules, 221 marked supported/active, and the RealmMaster preset exercises 33 modules.
--- ---
## Test Execution Summary ## Test Execution Summary
@@ -31,7 +33,7 @@
**Verified:** **Verified:**
- Environment file present - Environment file present
- Module configuration loaded - Module configuration loaded
- 93 modules enabled for testing - 93 modules enabled for testing in this run (current manifest: 348 total / 221 supported; RealmMaster preset: 33)
### Test 2: Module Manifest Validation ✅ ### Test 2: Module Manifest Validation ✅
```bash ```bash
@@ -139,7 +141,7 @@ MODULES_ENABLED="mod-playerbots mod-aoe-loot ..."
**What Gets Built:** **What Gets Built:**
- AzerothCore with playerbots branch - AzerothCore with playerbots branch
- 93 modules compiled and integrated - 93 modules compiled and integrated in this run (current manifest: 348 total / 221 supported)
- Custom Docker images: `acore-compose:worldserver-modules-latest` etc. - Custom Docker images: `acore-compose:worldserver-modules-latest` etc.
### Deployment Status: READY TO DEPLOY 🚀 ### Deployment Status: READY TO DEPLOY 🚀
@@ -261,7 +263,7 @@ docker exec ac-mysql mysql -uroot -p[password] acore_world \
- **Bash:** 5.0+ - **Bash:** 5.0+
- **Python:** 3.x - **Python:** 3.x
- **Docker:** Available - **Docker:** Available
- **Modules Enabled:** 93 - **Modules Enabled:** 93 (historical run)
- **Test Date:** 2025-11-14 - **Test Date:** 2025-11-14
--- ---

View File

@@ -23,7 +23,7 @@ Interactive `.env` generator with module selection, server configuration, and de
```bash ```bash
./setup.sh # Interactive configuration ./setup.sh # Interactive configuration
./setup.sh --module-config sam # Use predefined module profile, check profiles directory ./setup.sh --module-config RealmMaster # Use predefined module profile (see config/module-profiles)
./setup.sh --playerbot-max-bots 3000 # Set playerbot limits ./setup.sh --playerbot-max-bots 3000 # Set playerbot limits
``` ```

View File

@@ -195,8 +195,10 @@ else
# Step 3: Update realmlist table # Step 3: Update realmlist table
echo "" echo ""
echo "🌐 Step 3: Updating realmlist table..." echo "🌐 Step 3: Updating realmlist table..."
echo " 🔧 Setting realm address to: ${SERVER_ADDRESS}:${REALM_PORT}"
mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" -e " mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" -e "
UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1; UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1;
SELECT CONCAT(' ✓ Realm configured: ', name, ' at ', address, ':', port) AS status FROM realmlist WHERE id=1;
" || echo "⚠️ Could not update realmlist table" " || echo "⚠️ Could not update realmlist table"
echo "✅ Realmlist updated" echo "✅ Realmlist updated"

View File

@@ -4,8 +4,28 @@ set -euo pipefail
INVOCATION_DIR="$PWD" INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$SCRIPT_DIR" cd "$SCRIPT_DIR"
# Source common libraries for standardized functionality
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
exit 1
fi
# Source utility libraries
source "$SCRIPT_DIR/lib/mysql-utils.sh" 2>/dev/null || warn "MySQL utilities not available"
source "$SCRIPT_DIR/lib/docker-utils.sh" 2>/dev/null || warn "Docker utilities not available"
source "$SCRIPT_DIR/lib/env-utils.sh" 2>/dev/null || warn "Environment utilities not available"
# Load environment defaults if present
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
# shellcheck disable=SC1091
source "$PROJECT_ROOT/.env"
set +a
fi
SUPPORTED_DBS=(auth characters world) SUPPORTED_DBS=(auth characters world)
declare -A SUPPORTED_SET=() declare -A SUPPORTED_SET=()
for db in "${SUPPORTED_DBS[@]}"; do for db in "${SUPPORTED_DBS[@]}"; do
@@ -16,10 +36,12 @@ declare -A DB_NAMES=([auth]="" [characters]="" [world]="")
declare -a INCLUDE_DBS=() declare -a INCLUDE_DBS=()
declare -a SKIP_DBS=() declare -a SKIP_DBS=()
MYSQL_PW="" MYSQL_PW="${MYSQL_ROOT_PASSWORD:-}"
DEST_PARENT="" DEST_PARENT=""
DEST_PROVIDED=false DEST_PROVIDED=false
EXPLICIT_SELECTION=false EXPLICIT_SELECTION=false
MYSQL_CONTAINER="${CONTAINER_MYSQL:-ac-mysql}"
DEFAULT_BACKUP_DIR="${BACKUP_PATH:-${STORAGE_PATH:-./storage}/backups}"
usage(){ usage(){
cat <<'EOF' cat <<'EOF'
@@ -28,7 +50,7 @@ Usage: ./backup-export.sh [options]
Creates a timestamped backup of one or more ACore databases. Creates a timestamped backup of one or more ACore databases.
Options: Options:
-o, --output DIR Destination directory (default: storage/backups) -o, --output DIR Destination directory (default: BACKUP_PATH from .env, fallback: ./storage/backups)
-p, --password PASS MySQL root password -p, --password PASS MySQL root password
--auth-db NAME Auth database schema name --auth-db NAME Auth database schema name
--characters-db NAME Characters database schema name --characters-db NAME Characters database schema name
@@ -52,7 +74,7 @@ Examples:
EOF EOF
} }
err(){ printf 'Error: %s\n' "$*" >&2; } # Use standardized error function from lib/common.sh
die(){ err "$1"; exit 1; } die(){ err "$1"; exit 1; }
normalize_token(){ normalize_token(){
@@ -93,10 +115,14 @@ remove_from_list(){
arr=("${filtered[@]}") arr=("${filtered[@]}")
} }
# Use env-utils.sh function if available, fallback to local implementation
resolve_relative(){ resolve_relative(){
local base="$1" path="$2" if command -v path_resolve_absolute >/dev/null 2>&1; then
if command -v python3 >/dev/null 2>&1; then path_resolve_absolute "$2" "$1"
python3 - "$base" "$path" <<'PY' else
local base="$1" path="$2"
if command -v python3 >/dev/null 2>&1; then
python3 - "$base" "$path" <<'PY'
import os, sys import os, sys
base, path = sys.argv[1:3] base, path = sys.argv[1:3]
if not path: if not path:
@@ -106,8 +132,9 @@ elif os.path.isabs(path):
else: else:
print(os.path.normpath(os.path.join(base, path))) print(os.path.normpath(os.path.join(base, path)))
PY PY
else else
die "python3 is required but was not found on PATH" die "python3 is required but was not found on PATH"
fi
fi fi
} }
@@ -224,13 +251,9 @@ done
if $DEST_PROVIDED; then if $DEST_PROVIDED; then
DEST_PARENT="$(resolve_relative "$INVOCATION_DIR" "$DEST_PARENT")" DEST_PARENT="$(resolve_relative "$INVOCATION_DIR" "$DEST_PARENT")"
else else
# Use storage/backups as default to align with existing backup structure DEFAULT_BACKUP_DIR="$(resolve_relative "$PROJECT_ROOT" "$DEFAULT_BACKUP_DIR")"
if [ -d "$SCRIPT_DIR/storage" ]; then DEST_PARENT="$DEFAULT_BACKUP_DIR"
DEST_PARENT="$SCRIPT_DIR/storage/backups" mkdir -p "$DEST_PARENT"
mkdir -p "$DEST_PARENT"
else
DEST_PARENT="$SCRIPT_DIR"
fi
fi fi
TIMESTAMP="$(date +%Y%m%d_%H%M%S)" TIMESTAMP="$(date +%Y%m%d_%H%M%S)"
@@ -241,7 +264,13 @@ generated_at="$(date --iso-8601=seconds)"
dump_db(){ dump_db(){
local schema="$1" outfile="$2" local schema="$1" outfile="$2"
echo "Dumping ${schema} -> ${outfile}" echo "Dumping ${schema} -> ${outfile}"
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" | gzip > "$outfile"
# Use mysql-utils.sh function if available, fallback to direct command
if command -v mysql_backup_database >/dev/null 2>&1; then
mysql_backup_database "$schema" "$outfile" "gzip" "$MYSQL_CONTAINER" "$MYSQL_PW"
else
docker exec "$MYSQL_CONTAINER" mysqldump -uroot -p"$MYSQL_PW" "$schema" | gzip > "$outfile"
fi
} }
for db in "${ACTIVE_DBS[@]}"; do for db in "${ACTIVE_DBS[@]}"; do

View File

@@ -6,15 +6,19 @@ INVOCATION_DIR="$PWD"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR" cd "$SCRIPT_DIR"
COLOR_RED='\033[0;31m' # Source common libraries for standardized functionality
COLOR_GREEN='\033[0;32m' if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
COLOR_YELLOW='\033[1;33m' echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
COLOR_RESET='\033[0m' exit 1
fi
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; } # Source utility libraries
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; } source "$SCRIPT_DIR/lib/mysql-utils.sh" 2>/dev/null || warn "MySQL utilities not available"
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; } source "$SCRIPT_DIR/lib/docker-utils.sh" 2>/dev/null || warn "Docker utilities not available"
fatal(){ err "$*"; exit 1; } source "$SCRIPT_DIR/lib/env-utils.sh" 2>/dev/null || warn "Environment utilities not available"
# Use log() for main output to maintain existing behavior
log() { ok "$*"; }
SUPPORTED_DBS=(auth characters world) SUPPORTED_DBS=(auth characters world)
declare -A SUPPORTED_SET=() declare -A SUPPORTED_SET=()
@@ -102,10 +106,14 @@ remove_from_list(){
arr=("${filtered[@]}") arr=("${filtered[@]}")
} }
# Use env-utils.sh function if available, fallback to local implementation
resolve_relative(){ resolve_relative(){
local base="$1" path="$2" if command -v path_resolve_absolute >/dev/null 2>&1; then
if command -v python3 >/dev/null 2>&1; then path_resolve_absolute "$2" "$1"
python3 - "$base" "$path" <<'PY' else
local base="$1" path="$2"
if command -v python3 >/dev/null 2>&1; then
python3 - "$base" "$path" <<'PY'
import os, sys import os, sys
base, path = sys.argv[1:3] base, path = sys.argv[1:3]
if not path: if not path:
@@ -115,8 +123,9 @@ elif os.path.isabs(path):
else: else:
print(os.path.normpath(os.path.join(base, path))) print(os.path.normpath(os.path.join(base, path)))
PY PY
else else
fatal "python3 is required but was not found on PATH" fatal "python3 is required but was not found on PATH"
fi
fi fi
} }
@@ -280,7 +289,13 @@ backup_db(){
local out="manual-backups/${label}-pre-import-$(timestamp).sql" local out="manual-backups/${label}-pre-import-$(timestamp).sql"
mkdir -p manual-backups mkdir -p manual-backups
log "Backing up current ${schema} to ${out}" log "Backing up current ${schema} to ${out}"
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" > "$out"
# Use mysql-utils.sh function if available, fallback to direct command
if command -v mysql_backup_database >/dev/null 2>&1; then
mysql_backup_database "$schema" "$out" "none" "ac-mysql" "$MYSQL_PW"
else
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" > "$out"
fi
} }
restore(){ restore(){
@@ -302,7 +317,22 @@ db_selected(){
} }
count_rows(){ count_rows(){
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$1" # Use mysql-utils.sh function if available, fallback to direct command
if command -v docker_mysql_query >/dev/null 2>&1; then
# Extract database name from query for mysql-utils function
local query="$1"
local db_name
# Simple extraction - assumes "FROM database.table" or "database.table" pattern
if [[ "$query" =~ FROM[[:space:]]+([^.[:space:]]+)\. ]]; then
db_name="${BASH_REMATCH[1]}"
docker_mysql_query "$db_name" "$query" "ac-mysql" "$MYSQL_PW"
else
# Fallback to original method if can't parse database
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$query"
fi
else
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$1"
fi
} }
case "${1:-}" in case "${1:-}" in

View File

@@ -6,18 +6,14 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR" cd "$SCRIPT_DIR"
COLOR_RED='\033[0;31m' # Source common library for standardized logging
COLOR_GREEN='\033[0;32m' if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
COLOR_YELLOW='\033[1;33m' echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
COLOR_BLUE='\033[0;34m' exit 1
COLOR_CYAN='\033[0;36m' fi
COLOR_RESET='\033[0m'
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; } # Use log() instead of info() for main output to maintain existing behavior
info(){ printf '%b\n' "${COLOR_CYAN}$*${COLOR_RESET}"; } log() { ok "$*"; }
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
fatal(){ err "$*"; exit 1; }
MYSQL_PW="" MYSQL_PW=""
BACKUP_DIR="" BACKUP_DIR=""

View File

@@ -4,9 +4,31 @@
# automatically rerun db-import-conditional to hydrate from backups. # automatically rerun db-import-conditional to hydrate from backups.
set -euo pipefail set -euo pipefail
log(){ echo "🛡️ [db-guard] $*"; } # Source common library if available (container environment)
warn(){ echo "⚠️ [db-guard] $*" >&2; } SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
err(){ echo "❌ [db-guard] $*" >&2; } if [ -f "$SCRIPT_DIR/../scripts/bash/lib/common.sh" ]; then
# Running from project root
source "$SCRIPT_DIR/../scripts/bash/lib/common.sh"
db_guard_log() { info "🛡️ [db-guard] $*"; }
db_guard_warn() { warn "[db-guard] $*"; }
db_guard_err() { err "[db-guard] $*"; }
elif [ -f "$SCRIPT_DIR/lib/common.sh" ]; then
# Running from scripts/bash directory
source "$SCRIPT_DIR/lib/common.sh"
db_guard_log() { info "🛡️ [db-guard] $*"; }
db_guard_warn() { warn "[db-guard] $*"; }
db_guard_err() { err "[db-guard] $*"; }
else
# Fallback for container environment where lib/common.sh may not be available
db_guard_log(){ echo "🛡️ [db-guard] $*"; }
db_guard_warn(){ echo "⚠️ [db-guard] $*" >&2; }
db_guard_err(){ echo "❌ [db-guard] $*" >&2; }
fi
# Maintain compatibility with existing function names
log() { db_guard_log "$*"; }
warn() { db_guard_warn "$*"; }
err() { db_guard_err "$*"; }
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}" MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
MYSQL_PORT="${MYSQL_PORT:-3306}" MYSQL_PORT="${MYSQL_PORT:-3306}"
@@ -24,6 +46,34 @@ STATUS_FILE="${DB_GUARD_STATUS_FILE:-/tmp/db-guard.status}"
ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}" ERROR_FILE="${DB_GUARD_ERROR_FILE:-/tmp/db-guard.error}"
MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}" MODULE_SQL_HOST_PATH="${MODULE_SQL_HOST_PATH:-/modules-sql}"
SEED_CONF_SCRIPT="${SEED_DBIMPORT_CONF_SCRIPT:-/tmp/seed-dbimport-conf.sh}"
if [ -f "$SEED_CONF_SCRIPT" ]; then
# shellcheck source=/dev/null
. "$SEED_CONF_SCRIPT"
elif ! command -v seed_dbimport_conf >/dev/null 2>&1; then
seed_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
mkdir -p "$(dirname "$conf")"
[ -f "$conf" ] && return 0
if [ -f "$dist" ]; then
cp "$dist" "$conf"
else
warn "dbimport.conf missing and no dist available; writing minimal defaults"
cat > "$conf" <<EOF
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
EnableDatabases = 15
Updates.AutoSetup = 1
MySQLExecutable = "/usr/bin/mysql"
TempDir = "/azerothcore/env/dist/etc/temp"
EOF
fi
}
fi
declare -a DB_SCHEMAS=() declare -a DB_SCHEMAS=()
for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do for var in DB_AUTH_NAME DB_WORLD_NAME DB_CHARACTERS_NAME DB_PLAYERBOTS_NAME; do
value="${!var:-}" value="${!var:-}"
@@ -85,15 +135,6 @@ rehydrate(){
"$IMPORT_SCRIPT" "$IMPORT_SCRIPT"
} }
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
sync_host_stage_files(){ sync_host_stage_files(){
local host_root="${MODULE_SQL_HOST_PATH}" local host_root="${MODULE_SQL_HOST_PATH}"
[ -d "$host_root" ] || return 0 [ -d "$host_root" ] || return 0
@@ -110,7 +151,7 @@ sync_host_stage_files(){
dbimport_verify(){ dbimport_verify(){
local bin_dir="/azerothcore/env/dist/bin" local bin_dir="/azerothcore/env/dist/bin"
ensure_dbimport_conf seed_dbimport_conf
sync_host_stage_files sync_host_stage_files
if [ ! -x "${bin_dir}/dbimport" ]; then if [ ! -x "${bin_dir}/dbimport" ]; then
warn "dbimport binary not found at ${bin_dir}/dbimport" warn "dbimport binary not found at ${bin_dir}/dbimport"

View File

@@ -32,6 +32,22 @@ SHOW_PENDING=0
SHOW_MODULES=1 SHOW_MODULES=1
CONTAINER_NAME="ac-mysql" CONTAINER_NAME="ac-mysql"
resolve_path(){
local base="$1" path="$2"
if command -v python3 >/dev/null 2>&1; then
python3 - "$base" "$path" <<'PY'
import os, sys
base, path = sys.argv[1:3]
if os.path.isabs(path):
print(os.path.normpath(path))
else:
print(os.path.normpath(os.path.join(base, path)))
PY
else
(cd "$base" && realpath -m "$path")
fi
}
usage() { usage() {
cat <<'EOF' cat <<'EOF'
Usage: ./db-health-check.sh [options] Usage: ./db-health-check.sh [options]
@@ -73,6 +89,10 @@ if [ -f "$PROJECT_ROOT/.env" ]; then
set +a set +a
fi fi
BACKUP_PATH_RAW="${BACKUP_PATH:-${STORAGE_PATH:-./storage}/backups}"
BACKUP_PATH="$(resolve_path "$PROJECT_ROOT" "$BACKUP_PATH_RAW")"
CONTAINER_NAME="${CONTAINER_MYSQL:-$CONTAINER_NAME}"
MYSQL_HOST="${MYSQL_HOST:-ac-mysql}" MYSQL_HOST="${MYSQL_HOST:-ac-mysql}"
MYSQL_PORT="${MYSQL_PORT:-3306}" MYSQL_PORT="${MYSQL_PORT:-3306}"
MYSQL_USER="${MYSQL_USER:-root}" MYSQL_USER="${MYSQL_USER:-root}"
@@ -263,7 +283,7 @@ show_module_updates() {
# Get backup information # Get backup information
get_backup_info() { get_backup_info() {
local backup_dir="$PROJECT_ROOT/storage/backups" local backup_dir="$BACKUP_PATH"
if [ ! -d "$backup_dir" ]; then if [ ! -d "$backup_dir" ]; then
printf " ${ICON_INFO} No backups directory found\n" printf " ${ICON_INFO} No backups directory found\n"

View File

@@ -81,15 +81,6 @@ wait_for_mysql(){
return 1 return 1
} }
ensure_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
if [ ! -f "$conf" ] && [ -f "$dist" ]; then
cp "$dist" "$conf"
fi
mkdir -p /azerothcore/env/dist/temp
}
case "${1:-}" in case "${1:-}" in
-h|--help) -h|--help)
print_help print_help
@@ -106,6 +97,34 @@ esac
echo "🔧 Conditional AzerothCore Database Import" echo "🔧 Conditional AzerothCore Database Import"
echo "========================================" echo "========================================"
SEED_CONF_SCRIPT="${SEED_DBIMPORT_CONF_SCRIPT:-/tmp/seed-dbimport-conf.sh}"
if [ -f "$SEED_CONF_SCRIPT" ]; then
# shellcheck source=/dev/null
. "$SEED_CONF_SCRIPT"
elif ! command -v seed_dbimport_conf >/dev/null 2>&1; then
seed_dbimport_conf(){
local conf="/azerothcore/env/dist/etc/dbimport.conf"
local dist="${conf}.dist"
mkdir -p "$(dirname "$conf")"
[ -f "$conf" ] && return 0
if [ -f "$dist" ]; then
cp "$dist" "$conf"
else
echo "⚠️ dbimport.conf missing and no dist available; using localhost defaults" >&2
cat > "$conf" <<EOF
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
EnableDatabases = 15
Updates.AutoSetup = 1
MySQLExecutable = "/usr/bin/mysql"
TempDir = "/azerothcore/env/dist/etc/temp"
EOF
fi
}
fi
if ! wait_for_mysql; then if ! wait_for_mysql; then
echo "❌ MySQL service is unavailable; aborting database import" echo "❌ MySQL service is unavailable; aborting database import"
exit 1 exit 1
@@ -158,6 +177,8 @@ echo "🔧 Starting database import process..."
echo "🔍 Checking for backups to restore..." echo "🔍 Checking for backups to restore..."
# Allow tolerant scanning; re-enable -e after search.
set +e
# Define backup search paths in priority order # Define backup search paths in priority order
BACKUP_SEARCH_PATHS=( BACKUP_SEARCH_PATHS=(
"/backups" "/backups"
@@ -253,13 +274,16 @@ if [ -z "$backup_path" ]; then
# Check for manual backups (*.sql files) # Check for manual backups (*.sql files)
if [ -z "$backup_path" ]; then if [ -z "$backup_path" ]; then
echo "🔍 Checking for manual backup files..." echo "🔍 Checking for manual backup files..."
latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql 2>/dev/null | head -n 1) latest_manual=""
if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then if ls "$BACKUP_DIRS"/*.sql >/dev/null 2>&1; then
echo "📦 Found manual backup: $(basename "$latest_manual")" latest_manual=$(ls -1t "$BACKUP_DIRS"/*.sql | head -n 1)
if timeout 10 head -20 "$latest_manual" >/dev/null 2>&1; then if [ -n "$latest_manual" ] && [ -f "$latest_manual" ]; then
echo "✅ Valid manual backup file: $(basename "$latest_manual")" echo "📦 Found manual backup: $(basename "$latest_manual")"
backup_path="$latest_manual" if timeout 10 head -20 "$latest_manual" >/dev/null 2>&1; then
break echo "✅ Valid manual backup file: $(basename "$latest_manual")"
backup_path="$latest_manual"
break
fi
fi fi
fi fi
fi fi
@@ -272,6 +296,7 @@ if [ -z "$backup_path" ]; then
done done
fi fi
set -e
echo "🔄 Final backup path result: '$backup_path'" echo "🔄 Final backup path result: '$backup_path'"
if [ -n "$backup_path" ]; then if [ -n "$backup_path" ]; then
echo "📦 Found backup: $(basename "$backup_path")" echo "📦 Found backup: $(basename "$backup_path")"
@@ -357,7 +382,7 @@ if [ -n "$backup_path" ]; then
return 0 return 0
fi fi
ensure_dbimport_conf seed_dbimport_conf
cd /azerothcore/env/dist/bin cd /azerothcore/env/dist/bin
echo "🔄 Running dbimport to apply any missing updates..." echo "🔄 Running dbimport to apply any missing updates..."
@@ -424,23 +449,73 @@ fi
echo "🗄️ Creating fresh AzerothCore databases..." echo "🗄️ Creating fresh AzerothCore databases..."
mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e " mysql -h ${CONTAINER_MYSQL} -u${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} -e "
CREATE DATABASE IF NOT EXISTS ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; DROP DATABASE IF EXISTS ${DB_AUTH_NAME};
CREATE DATABASE IF NOT EXISTS ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; DROP DATABASE IF EXISTS ${DB_WORLD_NAME};
CREATE DATABASE IF NOT EXISTS ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; DROP DATABASE IF EXISTS ${DB_CHARACTERS_NAME};
CREATE DATABASE IF NOT EXISTS acore_playerbots DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; DROP DATABASE IF EXISTS ${DB_PLAYERBOTS_NAME:-acore_playerbots};
CREATE DATABASE ${DB_AUTH_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
CREATE DATABASE ${DB_WORLD_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
CREATE DATABASE ${DB_CHARACTERS_NAME} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
CREATE DATABASE ${DB_PLAYERBOTS_NAME:-acore_playerbots} DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; } SHOW DATABASES;" || { echo "❌ Failed to create databases"; exit 1; }
echo "✅ Fresh databases created - proceeding with schema import" echo "✅ Fresh databases created - proceeding with schema import"
ensure_dbimport_conf
echo "🚀 Running database import..." echo "🚀 Running database import..."
cd /azerothcore/env/dist/bin cd /azerothcore/env/dist/bin
seed_dbimport_conf
maybe_run_base_import(){
local mysql_host="${CONTAINER_MYSQL:-ac-mysql}"
local mysql_port="${MYSQL_PORT:-3306}"
local mysql_user="${MYSQL_USER:-root}"
local mysql_pass="${MYSQL_ROOT_PASSWORD:-root}"
import_dir(){
local db="$1" dir="$2"
[ -d "$dir" ] || return 0
echo "🔧 Importing base schema for ${db} from $(basename "$dir")..."
for f in $(ls "$dir"/*.sql 2>/dev/null | LC_ALL=C sort); do
MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" "$db" < "$f" >/dev/null 2>&1 || true
done
}
needs_import(){
local db="$1"
local count
count="$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='${db}';" 2>/dev/null || echo 0)"
[ "${count:-0}" -eq 0 ] && return 0
local updates
updates="$(MYSQL_PWD="$mysql_pass" mysql -h "$mysql_host" -P "$mysql_port" -u "$mysql_user" -N -B -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='${db}' AND table_name='updates';" 2>/dev/null || echo 0)"
[ "${updates:-0}" -eq 0 ]
}
if needs_import "${DB_WORLD_NAME:-acore_world}"; then
import_dir "${DB_WORLD_NAME:-acore_world}" "/azerothcore/data/sql/base/db_world"
fi
if needs_import "${DB_AUTH_NAME:-acore_auth}"; then
import_dir "${DB_AUTH_NAME:-acore_auth}" "/azerothcore/data/sql/base/db_auth"
fi
if needs_import "${DB_CHARACTERS_NAME:-acore_characters}"; then
import_dir "${DB_CHARACTERS_NAME:-acore_characters}" "/azerothcore/data/sql/base/db_characters"
fi
}
maybe_run_base_import
if ./dbimport; then if ./dbimport; then
echo "✅ Database import completed successfully!" echo "✅ Database import completed successfully!"
echo "$(date): Database import completed successfully" > "$RESTORE_STATUS_DIR/.import-completed" || echo "$(date): Database import completed successfully" > "$MARKER_STATUS_DIR/.import-completed" import_marker_msg="$(date): Database import completed successfully"
if [ -w "$RESTORE_STATUS_DIR" ]; then
echo "$import_marker_msg" > "$RESTORE_STATUS_DIR/.import-completed"
elif [ -w "$MARKER_STATUS_DIR" ]; then
echo "$import_marker_msg" > "$MARKER_STATUS_DIR/.import-completed" 2>/dev/null || true
fi
else else
echo "❌ Database import failed!" echo "❌ Database import failed!"
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed" || echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed" if [ -w "$RESTORE_STATUS_DIR" ]; then
echo "$(date): Database import failed" > "$RESTORE_STATUS_DIR/.import-failed"
elif [ -w "$MARKER_STATUS_DIR" ]; then
echo "$(date): Database import failed" > "$MARKER_STATUS_DIR/.import-failed" 2>/dev/null || true
fi
exit 1 exit 1
fi fi

View File

@@ -6,6 +6,13 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Source common library for standardized logging
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
exit 1
fi
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml" DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
ENV_FILE="$ROOT_DIR/.env" ENV_FILE="$ROOT_DIR/.env"
TEMPLATE_FILE="$ROOT_DIR/.env.template" TEMPLATE_FILE="$ROOT_DIR/.env.template"
@@ -16,17 +23,6 @@ DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
source "$ROOT_DIR/scripts/bash/compose_overrides.sh" source "$ROOT_DIR/scripts/bash/compose_overrides.sh"
declare -a COMPOSE_FILE_ARGS=() declare -a COMPOSE_FILE_ARGS=()
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
info(){ echo -e "${BLUE} $*${NC}"; }
ok(){ echo -e "${GREEN}$*${NC}"; }
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
err(){ echo -e "${RED}$*${NC}"; }
read_env(){ read_env(){
local key="$1" default="${2:-}" value="" local key="$1" default="${2:-}" value=""
if [ -f "$ENV_FILE" ]; then if [ -f "$ENV_FILE" ]; then

View File

@@ -50,9 +50,9 @@ log() {
printf '%b\n' "${GREEN}$*${NC}" printf '%b\n' "${GREEN}$*${NC}"
} }
# Log warning messages (yellow with warning icon) # Log warning messages (yellow with warning icon, to stderr for compatibility)
warn() { warn() {
printf '%b\n' "${YELLOW}⚠️ $*${NC}" printf '%b\n' "${YELLOW}⚠️ $*${NC}" >&2
} }
# Log error messages (red with error icon, continues execution) # Log error messages (red with error icon, continues execution)

View File

@@ -0,0 +1,530 @@
#!/bin/bash
#
# Docker utility library for AzerothCore RealmMaster scripts
# This library provides standardized Docker operations, container management,
# and deployment functions.
#
# Usage: source /path/to/scripts/bash/lib/docker-utils.sh
#
# Prevent multiple sourcing
if [ -n "${_DOCKER_UTILS_LIB_LOADED:-}" ]; then
return 0
fi
_DOCKER_UTILS_LIB_LOADED=1
# Source common library for logging functions
DOCKER_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "$DOCKER_UTILS_DIR/common.sh" ]; then
source "$DOCKER_UTILS_DIR/common.sh"
elif command -v info >/dev/null 2>&1; then
# Common functions already available
:
else
# Fallback logging functions
info() { printf '\033[0;34m %s\033[0m\n' "$*"; }
warn() { printf '\033[1;33m⚠ %s\033[0m\n' "$*" >&2; }
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
fatal() { err "$*"; exit 1; }
fi
# =============================================================================
# DOCKER CONTAINER MANAGEMENT
# =============================================================================
# Get container status
# Returns: running, exited, paused, restarting, removing, dead, created, or "not_found"
#
# Usage:
# status=$(docker_get_container_status "ac-mysql")
# if [ "$status" = "running" ]; then
# echo "Container is running"
# fi
#
docker_get_container_status() {
local container_name="$1"
if ! docker ps -a --format "table {{.Names}}\t{{.Status}}" | grep -q "^$container_name"; then
echo "not_found"
return 1
fi
docker inspect --format='{{.State.Status}}' "$container_name" 2>/dev/null || echo "not_found"
}
# Check if container is running
# Returns 0 if running, 1 if not running or not found
#
# Usage:
# if docker_is_container_running "ac-mysql"; then
# echo "MySQL container is running"
# fi
#
docker_is_container_running() {
local container_name="$1"
local status
status=$(docker_get_container_status "$container_name")
[ "$status" = "running" ]
}
# Wait for container to reach desired state
# Returns 0 if container reaches state within timeout, 1 if timeout
#
# Usage:
# docker_wait_for_container_state "ac-mysql" "running" 30
# docker_wait_for_container_state "ac-mysql" "exited" 10
#
docker_wait_for_container_state() {
local container_name="$1"
local desired_state="$2"
local timeout="${3:-30}"
local check_interval="${4:-2}"
local elapsed=0
info "Waiting for container '$container_name' to reach state '$desired_state' (timeout: ${timeout}s)"
while [ $elapsed -lt $timeout ]; do
local current_state
current_state=$(docker_get_container_status "$container_name")
if [ "$current_state" = "$desired_state" ]; then
info "Container '$container_name' reached desired state: $desired_state"
return 0
fi
sleep "$check_interval"
elapsed=$((elapsed + check_interval))
done
err "Container '$container_name' did not reach state '$desired_state' within ${timeout}s (current: $current_state)"
return 1
}
# Execute command in container with retry logic
# Handles container availability and connection issues
#
# Usage:
# docker_exec_with_retry "ac-mysql" "mysql -uroot -ppassword -e 'SELECT 1'"
# echo "SELECT 1" | docker_exec_with_retry "ac-mysql" "mysql -uroot -ppassword"
#
docker_exec_with_retry() {
local container_name="$1"
local command="$2"
local max_attempts="${3:-3}"
local retry_delay="${4:-2}"
local interactive="${5:-false}"
if ! docker_is_container_running "$container_name"; then
err "Container '$container_name' is not running"
return 1
fi
local attempt=1
while [ $attempt -le $max_attempts ]; do
if [ "$interactive" = "true" ]; then
if docker exec -i "$container_name" sh -c "$command"; then
return 0
fi
else
if docker exec "$container_name" sh -c "$command"; then
return 0
fi
fi
if [ $attempt -lt $max_attempts ]; then
warn "Docker exec failed in '$container_name' (attempt $attempt/$max_attempts), retrying in ${retry_delay}s..."
sleep "$retry_delay"
fi
attempt=$((attempt + 1))
done
err "Docker exec failed in '$container_name' after $max_attempts attempts"
return 1
}
# =============================================================================
# DOCKER COMPOSE PROJECT MANAGEMENT
# =============================================================================
# Get project name from environment or docker-compose.yml
# Returns the Docker Compose project name
#
# Usage:
# project_name=$(docker_get_project_name)
# echo "Project: $project_name"
#
docker_get_project_name() {
# Check environment variable first
if [ -n "${COMPOSE_PROJECT_NAME:-}" ]; then
echo "$COMPOSE_PROJECT_NAME"
return 0
fi
# Check for docker-compose.yml name directive
if [ -f "docker-compose.yml" ] && command -v python3 >/dev/null 2>&1; then
local project_name
project_name=$(python3 -c "
import yaml
try:
with open('docker-compose.yml', 'r') as f:
data = yaml.safe_load(f)
print(data.get('name', ''))
except:
print('')
" 2>/dev/null)
if [ -n "$project_name" ]; then
echo "$project_name"
return 0
fi
fi
# Fallback to directory name
basename "$PWD" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]//g'
}
# List containers for current project
# Returns list of container names with optional filtering
#
# Usage:
# containers=$(docker_list_project_containers)
# running_containers=$(docker_list_project_containers "running")
#
docker_list_project_containers() {
local status_filter="${1:-}"
local project_name
project_name=$(docker_get_project_name)
local filter_arg=""
if [ -n "$status_filter" ]; then
filter_arg="--filter status=$status_filter"
fi
# Use project label to find containers
docker ps -a $filter_arg --filter "label=com.docker.compose.project=$project_name" --format "{{.Names}}" 2>/dev/null
}
# Stop project containers gracefully
# Stops containers with configurable timeout
#
# Usage:
# docker_stop_project_containers 30 # Stop with 30s timeout
# docker_stop_project_containers # Use default 10s timeout
#
docker_stop_project_containers() {
local timeout="${1:-10}"
local containers
containers=$(docker_list_project_containers "running")
if [ -z "$containers" ]; then
info "No running containers found for project"
return 0
fi
info "Stopping project containers with ${timeout}s timeout: $containers"
echo "$containers" | xargs -r docker stop -t "$timeout"
}
# Start project containers
# Starts containers that are stopped but exist
#
# Usage:
# docker_start_project_containers
#
docker_start_project_containers() {
local containers
containers=$(docker_list_project_containers "exited")
if [ -z "$containers" ]; then
info "No stopped containers found for project"
return 0
fi
info "Starting project containers: $containers"
echo "$containers" | xargs -r docker start
}
# =============================================================================
# DOCKER IMAGE MANAGEMENT
# =============================================================================
# Get image information for container
# Returns image name:tag for specified container
#
# Usage:
# image=$(docker_get_container_image "ac-mysql")
# echo "MySQL container using image: $image"
#
docker_get_container_image() {
local container_name="$1"
if ! docker_is_container_running "$container_name"; then
# Try to get from stopped container
docker inspect --format='{{.Config.Image}}' "$container_name" 2>/dev/null || echo "unknown"
else
docker inspect --format='{{.Config.Image}}' "$container_name" 2>/dev/null || echo "unknown"
fi
}
# Check if image exists locally
# Returns 0 if image exists, 1 if not found
#
# Usage:
# if docker_image_exists "mysql:8.0"; then
# echo "MySQL image is available"
# fi
#
docker_image_exists() {
local image_name="$1"
docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^${image_name}$"
}
# Pull image with retry logic
# Handles temporary network issues and registry problems
#
# Usage:
# docker_pull_image_with_retry "mysql:8.0"
# docker_pull_image_with_retry "azerothcore/ac-wotlk-worldserver:latest" 5 10
#
docker_pull_image_with_retry() {
local image_name="$1"
local max_attempts="${2:-3}"
local retry_delay="${3:-5}"
if docker_image_exists "$image_name"; then
info "Image '$image_name' already exists locally"
return 0
fi
local attempt=1
while [ $attempt -le $max_attempts ]; do
info "Pulling image '$image_name' (attempt $attempt/$max_attempts)"
if docker pull "$image_name"; then
info "Successfully pulled image '$image_name'"
return 0
fi
if [ $attempt -lt $max_attempts ]; then
warn "Failed to pull image '$image_name', retrying in ${retry_delay}s..."
sleep "$retry_delay"
fi
attempt=$((attempt + 1))
done
err "Failed to pull image '$image_name' after $max_attempts attempts"
return 1
}
# =============================================================================
# DOCKER COMPOSE OPERATIONS
# =============================================================================
# Validate docker-compose.yml configuration
# Returns 0 if valid, 1 if invalid or errors found
#
# Usage:
# if docker_compose_validate; then
# echo "Docker Compose configuration is valid"
# fi
#
docker_compose_validate() {
local compose_file="${1:-docker-compose.yml}"
if [ ! -f "$compose_file" ]; then
err "Docker Compose file not found: $compose_file"
return 1
fi
if docker compose -f "$compose_file" config --quiet; then
info "Docker Compose configuration is valid"
return 0
else
err "Docker Compose configuration validation failed"
return 1
fi
}
# Get service status from docker-compose
# Returns service status or "not_found" if service doesn't exist
#
# Usage:
# status=$(docker_compose_get_service_status "ac-mysql")
#
docker_compose_get_service_status() {
local service_name="$1"
local project_name
project_name=$(docker_get_project_name)
# Get container name for the service
local container_name="${project_name}-${service_name}-1"
docker_get_container_status "$container_name"
}
# Deploy with profile and options
# Wrapper around docker compose up with standardized options
#
# Usage:
# docker_compose_deploy "services-standard" "--detach"
# docker_compose_deploy "services-modules" "--no-deps ac-worldserver"
#
docker_compose_deploy() {
local profile="${1:-services-standard}"
local additional_options="${2:-}"
if ! docker_compose_validate; then
err "Cannot deploy: Docker Compose configuration is invalid"
return 1
fi
info "Deploying with profile: $profile"
# Use exec to replace current shell for proper signal handling
if [ -n "$additional_options" ]; then
docker compose --profile "$profile" up $additional_options
else
docker compose --profile "$profile" up --detach
fi
}
# =============================================================================
# DOCKER SYSTEM UTILITIES
# =============================================================================
# Check Docker daemon availability
# Returns 0 if Docker is available, 1 if not
#
# Usage:
# if docker_check_daemon; then
# echo "Docker daemon is available"
# fi
#
docker_check_daemon() {
if docker info >/dev/null 2>&1; then
return 0
else
err "Docker daemon is not available or accessible"
return 1
fi
}
# Get Docker system information
# Returns formatted system info for debugging
#
# Usage:
# docker_print_system_info
#
docker_print_system_info() {
info "Docker System Information:"
if ! docker_check_daemon; then
err "Cannot retrieve Docker system information - daemon not available"
return 1
fi
local docker_version compose_version
docker_version=$(docker --version 2>/dev/null | cut -d' ' -f3 | tr -d ',' || echo "unknown")
compose_version=$(docker compose version --short 2>/dev/null || echo "unknown")
info " Docker Version: $docker_version"
info " Compose Version: $compose_version"
info " Project Name: $(docker_get_project_name)"
local running_containers
running_containers=$(docker_list_project_containers "running" | wc -l)
info " Running Containers: $running_containers"
}
# Cleanup unused Docker resources
# Removes stopped containers, unused networks, and dangling images
#
# Usage:
# docker_cleanup_system true # Include unused volumes
# docker_cleanup_system false # Preserve volumes (default)
#
docker_cleanup_system() {
local include_volumes="${1:-false}"
info "Cleaning up Docker system resources..."
# Remove stopped containers
local stopped_containers
stopped_containers=$(docker ps -aq --filter "status=exited")
if [ -n "$stopped_containers" ]; then
info "Removing stopped containers"
echo "$stopped_containers" | xargs docker rm
fi
# Remove unused networks
info "Removing unused networks"
docker network prune -f
# Remove dangling images
info "Removing dangling images"
docker image prune -f
# Remove unused volumes if requested
if [ "$include_volumes" = "true" ]; then
warn "Removing unused volumes (this may delete data!)"
docker volume prune -f
fi
info "Docker system cleanup completed"
}
# =============================================================================
# CONTAINER HEALTH AND MONITORING
# =============================================================================
# Get container resource usage
# Returns CPU and memory usage statistics
#
# Usage:
# docker_get_container_stats "ac-mysql"
#
docker_get_container_stats() {
local container_name="$1"
if ! docker_is_container_running "$container_name"; then
err "Container '$container_name' is not running"
return 1
fi
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}" "$container_name"
}
# Check container logs for errors
# Searches recent logs for error patterns
#
# Usage:
# docker_check_container_errors "ac-mysql" 100
#
docker_check_container_errors() {
local container_name="$1"
local lines="${2:-50}"
if ! docker ps -a --format "{{.Names}}" | grep -q "^${container_name}$"; then
err "Container '$container_name' not found"
return 1
fi
info "Checking last $lines log lines for errors in '$container_name'"
# Look for common error patterns
docker logs --tail "$lines" "$container_name" 2>&1 | grep -i "error\|exception\|fail\|fatal" || {
info "No obvious errors found in recent logs"
return 0
}
}
# =============================================================================
# INITIALIZATION
# =============================================================================
# Library loaded successfully
# Scripts can check for $_DOCKER_UTILS_LIB_LOADED to verify library is loaded

View File

@@ -0,0 +1,613 @@
#!/bin/bash
#
# Environment and file utility library for AzerothCore RealmMaster scripts
# This library provides enhanced environment variable handling, file operations,
# and path management functions.
#
# Usage: source /path/to/scripts/bash/lib/env-utils.sh
#
# Prevent multiple sourcing
if [ -n "${_ENV_UTILS_LIB_LOADED:-}" ]; then
return 0
fi
_ENV_UTILS_LIB_LOADED=1
# Source common library for logging functions
ENV_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "$ENV_UTILS_DIR/common.sh" ]; then
source "$ENV_UTILS_DIR/common.sh"
elif command -v info >/dev/null 2>&1; then
# Common functions already available
:
else
# Fallback logging functions
info() { printf '\033[0;34m %s\033[0m\n' "$*"; }
warn() { printf '\033[1;33m⚠ %s\033[0m\n' "$*" >&2; }
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
fatal() { err "$*"; exit 1; }
fi
# =============================================================================
# ENVIRONMENT VARIABLE MANAGEMENT
# =============================================================================
# Enhanced read_env function with advanced features
# Supports multiple .env files, environment variable precedence, and validation
#
# Usage:
# value=$(env_read_with_fallback "MYSQL_PASSWORD" "default_password")
# value=$(env_read_with_fallback "PORT" "" ".env.local" "validate_port")
#
env_read_with_fallback() {
local key="$1"
local default="${2:-}"
local env_file="${3:-${ENV_PATH:-${DEFAULT_ENV_PATH:-.env}}}"
local validator_func="${4:-}"
local value=""
# 1. Check if variable is already set in environment (highest precedence)
if [ -n "${!key:-}" ]; then
value="${!key}"
else
# 2. Read from .env file if it exists
if [ -f "$env_file" ]; then
# Extract value using grep and cut, handling various formats
value="$(grep -E "^${key}=" "$env_file" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
# Remove inline comments (everything after # that's not inside quotes)
value="$(echo "$value" | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
# Strip quotes if present
if [[ "$value" == \"*\" && "$value" == *\" ]]; then
# Double quotes
value="${value:1:-1}"
elif [[ "$value" == \'*\' && "$value" == *\' ]]; then
# Single quotes
value="${value:1:-1}"
fi
fi
# 3. Use default if still empty
if [ -z "${value:-}" ]; then
value="$default"
fi
fi
# 4. Validate if validator function provided
if [ -n "$validator_func" ] && command -v "$validator_func" >/dev/null 2>&1; then
if ! "$validator_func" "$value"; then
err "Validation failed for $key: $value"
return 1
fi
fi
printf '%s\n' "${value}"
}
# Read environment variable with type conversion
# Supports string, int, bool, and path types
#
# Usage:
# port=$(env_read_typed "MYSQL_PORT" "int" "3306")
# debug=$(env_read_typed "DEBUG" "bool" "false")
# path=$(env_read_typed "DATA_PATH" "path" "/data")
#
env_read_typed() {
local key="$1"
local type="$2"
local default="${3:-}"
local value
value=$(env_read_with_fallback "$key" "$default")
case "$type" in
int|integer)
if ! [[ "$value" =~ ^[0-9]+$ ]]; then
err "Environment variable $key must be an integer: $value"
return 1
fi
echo "$value"
;;
bool|boolean)
case "${value,,}" in
true|yes|1|on|enabled) echo "true" ;;
false|no|0|off|disabled) echo "false" ;;
*) err "Environment variable $key must be boolean: $value"; return 1 ;;
esac
;;
path)
# Expand relative paths to absolute
if [ -n "$value" ]; then
path_resolve_absolute "$value"
fi
;;
string|*)
echo "$value"
;;
esac
}
# Update or add environment variable in .env file with backup
# Creates backup and maintains file integrity
#
# Usage:
# env_update_value "MYSQL_PASSWORD" "new_password"
# env_update_value "DEBUG" "true" ".env.local"
# env_update_value "PORT" "8080" ".env" "true" # create backup
#
env_update_value() {
local key="$1"
local value="$2"
local env_file="${3:-${ENV_PATH:-${DEFAULT_ENV_PATH:-.env}}}"
local create_backup="${4:-false}"
[ -n "$env_file" ] || return 0
# Create backup if requested
if [ "$create_backup" = "true" ] && [ -f "$env_file" ]; then
file_create_backup "$env_file"
fi
# Create file if it doesn't exist
if [ ! -f "$env_file" ]; then
file_ensure_writable_dir "$(dirname "$env_file")"
printf '%s=%s\n' "$key" "$value" >> "$env_file"
return 0
fi
# Update existing or append new
if grep -q "^${key}=" "$env_file"; then
# Use platform-appropriate sed in-place editing
local sed_opts=""
if [[ "$OSTYPE" == "darwin"* ]]; then
sed_opts="-i ''"
else
sed_opts="-i"
fi
# Use a temporary file for safer editing
local temp_file="${env_file}.tmp.$$"
sed "s|^${key}=.*|${key}=${value}|" "$env_file" > "$temp_file" && mv "$temp_file" "$env_file"
else
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
fi
info "Updated $key in $env_file"
}
# Load multiple environment files with precedence
# Later files override earlier ones
#
# Usage:
# env_load_multiple ".env" ".env.local" ".env.production"
#
env_load_multiple() {
local files=("$@")
local loaded_count=0
for env_file in "${files[@]}"; do
if [ -f "$env_file" ]; then
info "Loading environment from: $env_file"
set -a
# shellcheck disable=SC1090
source "$env_file"
set +a
loaded_count=$((loaded_count + 1))
fi
done
if [ $loaded_count -eq 0 ]; then
warn "No environment files found: ${files[*]}"
return 1
fi
info "Loaded $loaded_count environment file(s)"
return 0
}
# =============================================================================
# PATH AND FILE UTILITIES
# =============================================================================
# Resolve path to absolute form with proper error handling
# Handles both existing and non-existing paths
#
# Usage:
# abs_path=$(path_resolve_absolute "./relative/path")
# abs_path=$(path_resolve_absolute "/already/absolute")
#
path_resolve_absolute() {
local path="$1"
local base_dir="${2:-$PWD}"
if command -v python3 >/dev/null 2>&1; then
python3 - "$base_dir" "$path" <<'PY'
import os, sys
base, path = sys.argv[1:3]
if not path:
print(os.path.abspath(base))
elif os.path.isabs(path):
print(os.path.normpath(path))
else:
print(os.path.normpath(os.path.join(base, path)))
PY
elif command -v realpath >/dev/null 2>&1; then
if [ "${path:0:1}" = "/" ]; then
echo "$path"
else
realpath -m "$base_dir/$path"
fi
else
# Fallback manual resolution
if [ "${path:0:1}" = "/" ]; then
echo "$path"
else
echo "$base_dir/$path"
fi
fi
}
# Ensure directory exists and is writable with proper permissions
# Creates parent directories if needed
#
# Usage:
# file_ensure_writable_dir "/path/to/directory"
# file_ensure_writable_dir "/path/to/directory" "0755"
#
file_ensure_writable_dir() {
local dir="$1"
local permissions="${2:-0755}"
if [ ! -d "$dir" ]; then
if mkdir -p "$dir" 2>/dev/null; then
info "Created directory: $dir"
chmod "$permissions" "$dir" 2>/dev/null || warn "Could not set permissions on $dir"
else
err "Failed to create directory: $dir"
return 1
fi
fi
if [ ! -w "$dir" ]; then
if chmod u+w "$dir" 2>/dev/null; then
info "Made directory writable: $dir"
else
err "Directory not writable and cannot fix permissions: $dir"
return 1
fi
fi
return 0
}
# Create timestamped backup of file
# Supports custom backup directory and compression
#
# Usage:
# file_create_backup "/path/to/important.conf"
# file_create_backup "/path/to/file" "/backup/dir" "gzip"
#
file_create_backup() {
local file="$1"
local backup_dir="${2:-$(dirname "$file")}"
local compression="${3:-none}"
if [ ! -f "$file" ]; then
warn "File does not exist, skipping backup: $file"
return 0
fi
file_ensure_writable_dir "$backup_dir"
local filename basename backup_file
filename=$(basename "$file")
basename="${filename%.*}"
local extension="${filename##*.}"
# Create backup filename with timestamp
if [ "$filename" = "$basename" ]; then
# No extension
backup_file="${backup_dir}/${filename}.backup.$(date +%Y%m%d_%H%M%S)"
else
# Has extension
backup_file="${backup_dir}/${basename}.backup.$(date +%Y%m%d_%H%M%S).${extension}"
fi
case "$compression" in
gzip|gz)
if gzip -c "$file" > "${backup_file}.gz"; then
info "Created compressed backup: ${backup_file}.gz"
else
err "Failed to create compressed backup: ${backup_file}.gz"
return 1
fi
;;
none|*)
if cp "$file" "$backup_file"; then
info "Created backup: $backup_file"
else
err "Failed to create backup: $backup_file"
return 1
fi
;;
esac
return 0
}
# Set file permissions safely with validation
# Handles both numeric and symbolic modes
#
# Usage:
# file_set_permissions "/path/to/file" "0644"
# file_set_permissions "/path/to/script" "u+x"
#
file_set_permissions() {
local file="$1"
local permissions="$2"
local recursive="${3:-false}"
if [ ! -e "$file" ]; then
err "File or directory does not exist: $file"
return 1
fi
local chmod_opts=""
if [ "$recursive" = "true" ] && [ -d "$file" ]; then
chmod_opts="-R"
fi
if chmod $chmod_opts "$permissions" "$file" 2>/dev/null; then
info "Set permissions $permissions on $file"
return 0
else
err "Failed to set permissions $permissions on $file"
return 1
fi
}
# =============================================================================
# CONFIGURATION FILE UTILITIES
# =============================================================================
# Read value from template file with variable expansion support
# Enhanced version supporting more template formats
#
# Usage:
# value=$(config_read_template_value "MYSQL_PASSWORD" ".env.template")
# value=$(config_read_template_value "PORT" "config.template.yml" "yaml")
#
config_read_template_value() {
local key="$1"
local template_file="${2:-${TEMPLATE_FILE:-${TEMPLATE_PATH:-.env.template}}}"
local format="${3:-env}"
if [ ! -f "$template_file" ]; then
err "Template file not found: $template_file"
return 1
fi
case "$format" in
env)
local raw_line value
raw_line=$(grep "^${key}=" "$template_file" 2>/dev/null | head -1)
if [ -z "$raw_line" ]; then
err "Key '$key' not found in template: $template_file"
return 1
fi
value="${raw_line#*=}"
value=$(echo "$value" | sed 's/^"\(.*\)"$/\1/')
# Handle ${VAR:-default} syntax by extracting the default value
if [[ "$value" =~ ^\$\{[^}]*:-([^}]*)\}$ ]]; then
value="${BASH_REMATCH[1]}"
fi
echo "$value"
;;
yaml|yml)
if command -v python3 >/dev/null 2>&1; then
python3 -c "
import yaml, sys
try:
with open('$template_file', 'r') as f:
data = yaml.safe_load(f)
# Simple key lookup - can be enhanced for nested keys
print(data.get('$key', ''))
except:
sys.exit(1)
" 2>/dev/null
else
err "python3 required for YAML template parsing"
return 1
fi
;;
*)
err "Unsupported template format: $format"
return 1
;;
esac
}
# Validate configuration against schema
# Supports basic validation rules
#
# Usage:
# config_validate_env ".env" "required:MYSQL_PASSWORD,PORT;optional:DEBUG"
#
config_validate_env() {
local env_file="$1"
local rules="${2:-}"
if [ ! -f "$env_file" ]; then
err "Environment file not found: $env_file"
return 1
fi
if [ -z "$rules" ]; then
info "No validation rules specified"
return 0
fi
local validation_failed=false
# Parse validation rules
IFS=';' read -ra rule_sets <<< "$rules"
for rule_set in "${rule_sets[@]}"; do
IFS=':' read -ra rule_parts <<< "$rule_set"
local rule_type="${rule_parts[0]}"
local variables="${rule_parts[1]}"
case "$rule_type" in
required)
IFS=',' read -ra req_vars <<< "$variables"
for var in "${req_vars[@]}"; do
if ! grep -q "^${var}=" "$env_file" || [ -z "$(env_read_with_fallback "$var" "" "$env_file")" ]; then
err "Required environment variable missing or empty: $var"
validation_failed=true
fi
done
;;
optional)
# Optional variables - just log if missing
IFS=',' read -ra opt_vars <<< "$variables"
for var in "${opt_vars[@]}"; do
if ! grep -q "^${var}=" "$env_file"; then
info "Optional environment variable not set: $var"
fi
done
;;
esac
done
if [ "$validation_failed" = "true" ]; then
err "Environment validation failed"
return 1
fi
info "Environment validation passed"
return 0
}
# =============================================================================
# SYSTEM UTILITIES
# =============================================================================
# Detect operating system and distribution
# Returns standardized OS identifier
#
# Usage:
# os=$(system_detect_os)
# if [ "$os" = "ubuntu" ]; then
# echo "Running on Ubuntu"
# fi
#
system_detect_os() {
local os="unknown"
if [ -f /etc/os-release ]; then
# Source os-release for distribution info
local id
id=$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d '"')
case "$id" in
ubuntu|debian|centos|rhel|fedora|alpine|arch)
os="$id"
;;
*)
os="linux"
;;
esac
elif [[ "$OSTYPE" == "darwin"* ]]; then
os="macos"
elif [[ "$OSTYPE" == "cygwin" || "$OSTYPE" == "msys" ]]; then
os="windows"
fi
echo "$os"
}
# Check system requirements
# Validates required commands and versions
#
# Usage:
# system_check_requirements "docker:20.0,python3:3.6"
#
system_check_requirements() {
local requirements="${1:-}"
if [ -z "$requirements" ]; then
return 0
fi
local check_failed=false
IFS=',' read -ra req_list <<< "$requirements"
for requirement in "${req_list[@]}"; do
IFS=':' read -ra req_parts <<< "$requirement"
local command="${req_parts[0]}"
local min_version="${req_parts[1]:-}"
if ! command -v "$command" >/dev/null 2>&1; then
err "Required command not found: $command"
check_failed=true
continue
fi
if [ -n "$min_version" ]; then
# Basic version checking - can be enhanced
info "Found $command (version checking not fully implemented)"
else
info "Found required command: $command"
fi
done
if [ "$check_failed" = "true" ]; then
err "System requirements check failed"
return 1
fi
info "System requirements check passed"
return 0
}
# =============================================================================
# INITIALIZATION AND VALIDATION
# =============================================================================
# Validate environment utility configuration
# Checks that utilities are working correctly
#
# Usage:
# env_utils_validate
#
env_utils_validate() {
info "Validating environment utilities..."
# Test path resolution
local test_path
test_path=$(path_resolve_absolute "." 2>/dev/null)
if [ -z "$test_path" ]; then
err "Path resolution utility not working"
return 1
fi
# Test directory operations
if ! file_ensure_writable_dir "/tmp/env-utils-test.$$"; then
err "Directory utility not working"
return 1
fi
rmdir "/tmp/env-utils-test.$$" 2>/dev/null || true
info "Environment utilities validation successful"
return 0
}
# =============================================================================
# INITIALIZATION
# =============================================================================
# Library loaded successfully
# Scripts can check for $_ENV_UTILS_LIB_LOADED to verify library is loaded

View File

@@ -0,0 +1,376 @@
#!/bin/bash
#
# MySQL utility library for AzerothCore RealmMaster scripts
# This library provides standardized MySQL operations, connection management,
# and database interaction functions.
#
# Usage: source /path/to/scripts/bash/lib/mysql-utils.sh
#
# Prevent multiple sourcing
if [ -n "${_MYSQL_UTILS_LIB_LOADED:-}" ]; then
return 0
fi
_MYSQL_UTILS_LIB_LOADED=1
# Source common library for logging functions
MYSQL_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -f "$MYSQL_UTILS_DIR/common.sh" ]; then
source "$MYSQL_UTILS_DIR/common.sh"
elif command -v info >/dev/null 2>&1; then
# Common functions already available
:
else
# Fallback logging functions
info() { printf '\033[0;34m %s\033[0m\n' "$*"; }
warn() { printf '\033[1;33m⚠ %s\033[0m\n' "$*" >&2; }
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
fatal() { err "$*"; exit 1; }
fi
# =============================================================================
# MYSQL CONNECTION CONFIGURATION
# =============================================================================
# Default MySQL configuration - can be overridden by environment
MYSQL_HOST="${MYSQL_HOST:-${CONTAINER_MYSQL:-ac-mysql}}"
MYSQL_PORT="${MYSQL_PORT:-3306}"
MYSQL_USER="${MYSQL_USER:-root}"
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-${MYSQL_PW:-azerothcore}}"
MYSQL_CONTAINER="${MYSQL_CONTAINER:-ac-mysql}"
# =============================================================================
# MYSQL CONNECTION FUNCTIONS
# =============================================================================
# Test MySQL connection with current configuration
# Returns 0 if connection successful, 1 if failed
#
# Usage:
# if mysql_test_connection; then
# echo "MySQL is available"
# fi
#
mysql_test_connection() {
local host="${1:-$MYSQL_HOST}"
local port="${2:-$MYSQL_PORT}"
local user="${3:-$MYSQL_USER}"
local password="${4:-$MYSQL_ROOT_PASSWORD}"
MYSQL_PWD="$password" mysql -h "$host" -P "$port" -u "$user" -e "SELECT 1" >/dev/null 2>&1
}
# Wait for MySQL to be ready with timeout
# Returns 0 if MySQL becomes available within timeout, 1 if timeout reached
#
# Usage:
# mysql_wait_for_connection 60 # Wait up to 60 seconds
# mysql_wait_for_connection # Use default 30 second timeout
#
mysql_wait_for_connection() {
local timeout="${1:-30}"
local retry_interval="${2:-2}"
local elapsed=0
info "Waiting for MySQL connection (${MYSQL_HOST}:${MYSQL_PORT}) with ${timeout}s timeout..."
while [ $elapsed -lt $timeout ]; do
if mysql_test_connection; then
info "MySQL connection established"
return 0
fi
sleep "$retry_interval"
elapsed=$((elapsed + retry_interval))
done
err "MySQL connection failed after ${timeout}s timeout"
return 1
}
# Execute MySQL command with retry logic
# Handles both direct queries and piped input
#
# Usage:
# mysql_exec_with_retry "database_name" "SELECT COUNT(*) FROM table;"
# echo "SELECT 1;" | mysql_exec_with_retry "database_name"
# mysql_exec_with_retry "database_name" < script.sql
#
mysql_exec_with_retry() {
local database="$1"
local query="${2:-}"
local max_attempts="${3:-3}"
local retry_delay="${4:-2}"
local attempt=1
while [ $attempt -le $max_attempts ]; do
if [ -n "$query" ]; then
# Direct query execution
if MYSQL_PWD="$MYSQL_ROOT_PASSWORD" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" "$database" -e "$query"; then
return 0
fi
else
# Input from pipe/stdin
if MYSQL_PWD="$MYSQL_ROOT_PASSWORD" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" "$database"; then
return 0
fi
fi
if [ $attempt -lt $max_attempts ]; then
warn "MySQL query failed (attempt $attempt/$max_attempts), retrying in ${retry_delay}s..."
sleep "$retry_delay"
fi
attempt=$((attempt + 1))
done
err "MySQL query failed after $max_attempts attempts"
return 1
}
# Execute MySQL query and return result (no table headers)
# Optimized for single values and parsing
#
# Usage:
# count=$(mysql_query "acore_characters" "SELECT COUNT(*) FROM characters")
# tables=$(mysql_query "information_schema" "SHOW TABLES")
#
mysql_query() {
local database="$1"
local query="$2"
local host="${3:-$MYSQL_HOST}"
local port="${4:-$MYSQL_PORT}"
local user="${5:-$MYSQL_USER}"
local password="${6:-$MYSQL_ROOT_PASSWORD}"
MYSQL_PWD="$password" mysql -h "$host" -P "$port" -u "$user" -N -B "$database" -e "$query" 2>/dev/null
}
# =============================================================================
# DOCKER MYSQL FUNCTIONS
# =============================================================================
# Execute MySQL command inside Docker container
# Wrapper around docker exec with standardized MySQL connection
#
# Usage:
# docker_mysql_exec "acore_auth" "SELECT COUNT(*) FROM account;"
# echo "SELECT 1;" | docker_mysql_exec "acore_auth"
#
docker_mysql_exec() {
local database="$1"
local query="${2:-}"
local container="${3:-$MYSQL_CONTAINER}"
local password="${4:-$MYSQL_ROOT_PASSWORD}"
if [ -n "$query" ]; then
docker exec "$container" mysql -uroot -p"$password" "$database" -e "$query"
else
docker exec -i "$container" mysql -uroot -p"$password" "$database"
fi
}
# Execute MySQL query in Docker container (no table headers)
# Optimized for single values and parsing
#
# Usage:
# count=$(docker_mysql_query "acore_characters" "SELECT COUNT(*) FROM characters")
#
docker_mysql_query() {
local database="$1"
local query="$2"
local container="${3:-$MYSQL_CONTAINER}"
local password="${4:-$MYSQL_ROOT_PASSWORD}"
docker exec "$container" mysql -uroot -p"$password" -N -B "$database" -e "$query" 2>/dev/null
}
# Check if MySQL container is healthy and accepting connections
#
# Usage:
# if docker_mysql_is_ready; then
# echo "MySQL container is ready"
# fi
#
docker_mysql_is_ready() {
local container="${1:-$MYSQL_CONTAINER}"
local password="${2:-$MYSQL_ROOT_PASSWORD}"
docker exec "$container" mysqladmin ping -uroot -p"$password" >/dev/null 2>&1
}
# =============================================================================
# DATABASE UTILITY FUNCTIONS
# =============================================================================
# Check if database exists
# Returns 0 if database exists, 1 if not found
#
# Usage:
# if mysql_database_exists "acore_world"; then
# echo "World database found"
# fi
#
mysql_database_exists() {
local database_name="$1"
local result
result=$(mysql_query "information_schema" "SELECT COUNT(*) FROM SCHEMATA WHERE SCHEMA_NAME='$database_name'" 2>/dev/null || echo "0")
[ "$result" -gt 0 ] 2>/dev/null
}
# Get table count for database(s)
# Supports both single database and multiple database patterns
#
# Usage:
# count=$(mysql_get_table_count "acore_world")
# count=$(mysql_get_table_count "acore_auth,acore_characters")
#
mysql_get_table_count() {
local databases="$1"
local schema_list
# Convert comma-separated list to SQL IN clause format
schema_list=$(echo "$databases" | sed "s/,/','/g" | sed "s/^/'/" | sed "s/$/'/")
mysql_query "information_schema" "SELECT COUNT(*) FROM tables WHERE table_schema IN ($schema_list)"
}
# Get database connection string for applications
# Returns connection string in format: host;port;user;password;database
#
# Usage:
# conn_str=$(mysql_get_connection_string "acore_auth")
#
mysql_get_connection_string() {
local database="$1"
local host="${2:-$MYSQL_HOST}"
local port="${3:-$MYSQL_PORT}"
local user="${4:-$MYSQL_USER}"
local password="${5:-$MYSQL_ROOT_PASSWORD}"
printf '%s;%s;%s;%s;%s\n' "$host" "$port" "$user" "$password" "$database"
}
# =============================================================================
# BACKUP AND RESTORE UTILITIES
# =============================================================================
# Create database backup using mysqldump
# Supports both compressed and uncompressed output
#
# Usage:
# mysql_backup_database "acore_characters" "/path/to/backup.sql"
# mysql_backup_database "acore_world" "/path/to/backup.sql.gz" "gzip"
#
mysql_backup_database() {
local database="$1"
local output_file="$2"
local compression="${3:-none}"
local container="${4:-$MYSQL_CONTAINER}"
local password="${5:-$MYSQL_ROOT_PASSWORD}"
info "Creating backup of $database -> $output_file"
case "$compression" in
gzip|gz)
docker exec "$container" mysqldump -uroot -p"$password" "$database" | gzip > "$output_file"
;;
none|*)
docker exec "$container" mysqldump -uroot -p"$password" "$database" > "$output_file"
;;
esac
}
# Restore database from backup file
# Handles both compressed and uncompressed files automatically
#
# Usage:
# mysql_restore_database "acore_characters" "/path/to/backup.sql"
# mysql_restore_database "acore_world" "/path/to/backup.sql.gz"
#
mysql_restore_database() {
local database="$1"
local backup_file="$2"
local container="${3:-$MYSQL_CONTAINER}"
local password="${4:-$MYSQL_ROOT_PASSWORD}"
if [ ! -f "$backup_file" ]; then
err "Backup file not found: $backup_file"
return 1
fi
info "Restoring $database from $backup_file"
case "$backup_file" in
*.gz)
gzip -dc "$backup_file" | docker exec -i "$container" mysql -uroot -p"$password" "$database"
;;
*.sql)
docker exec -i "$container" mysql -uroot -p"$password" "$database" < "$backup_file"
;;
*)
warn "Unknown backup file format, treating as uncompressed SQL"
docker exec -i "$container" mysql -uroot -p"$password" "$database" < "$backup_file"
;;
esac
}
# =============================================================================
# VALIDATION AND DIAGNOSTICS
# =============================================================================
# Validate MySQL configuration and connectivity
# Comprehensive health check for MySQL setup
#
# Usage:
# mysql_validate_configuration
#
mysql_validate_configuration() {
info "Validating MySQL configuration..."
# Check required environment variables
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
err "MYSQL_ROOT_PASSWORD is not set"
return 1
fi
# Test basic connectivity
if ! mysql_test_connection; then
err "Cannot connect to MySQL at ${MYSQL_HOST}:${MYSQL_PORT}"
return 1
fi
# Check Docker container if using container setup
if docker ps --format "table {{.Names}}" | grep -q "$MYSQL_CONTAINER"; then
if ! docker_mysql_is_ready; then
err "MySQL container $MYSQL_CONTAINER is not ready"
return 1
fi
info "MySQL container $MYSQL_CONTAINER is healthy"
fi
info "MySQL configuration validation successful"
return 0
}
# Print MySQL configuration summary
# Useful for debugging and verification
#
# Usage:
# mysql_print_configuration
#
mysql_print_configuration() {
info "MySQL Configuration Summary:"
info " Host: $MYSQL_HOST"
info " Port: $MYSQL_PORT"
info " User: $MYSQL_USER"
info " Container: $MYSQL_CONTAINER"
info " Password: $([ -n "$MYSQL_ROOT_PASSWORD" ] && echo "***SET***" || echo "***NOT SET***")"
}
# =============================================================================
# INITIALIZATION
# =============================================================================
# Library loaded successfully
# Scripts can check for $_MYSQL_UTILS_LIB_LOADED to verify library is loaded

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
# Utility to migrate module images (and optionally storage) to a remote host. # Utility to migrate deployment images (and optionally storage) to a remote host.
# Assumes module images have already been rebuilt locally. # Assumes your runtime images have already been built or pulled locally.
set -euo pipefail set -euo pipefail
@@ -41,6 +41,74 @@ resolve_project_image(){
echo "${project_name}:${tag}" echo "${project_name}:${tag}"
} }
declare -a DEPLOY_IMAGE_REFS=()
declare -a CLEANUP_IMAGE_REFS=()
declare -A DEPLOY_IMAGE_SET=()
declare -A CLEANUP_IMAGE_SET=()
add_deploy_image_ref(){
local image="$1"
[ -z "$image" ] && return
if [[ -z "${DEPLOY_IMAGE_SET[$image]:-}" ]]; then
DEPLOY_IMAGE_SET["$image"]=1
DEPLOY_IMAGE_REFS+=("$image")
fi
add_cleanup_image_ref "$image"
}
add_cleanup_image_ref(){
local image="$1"
[ -z "$image" ] && return
if [[ -z "${CLEANUP_IMAGE_SET[$image]:-}" ]]; then
CLEANUP_IMAGE_SET["$image"]=1
CLEANUP_IMAGE_REFS+=("$image")
fi
}
collect_deploy_image_refs(){
local auth_modules world_modules auth_playerbots world_playerbots db_import client_data bots_client_data
local auth_standard world_standard client_data_standard
auth_modules="$(read_env_value AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")"
world_modules="$(read_env_value AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
auth_playerbots="$(read_env_value AC_AUTHSERVER_IMAGE_PLAYERBOTS "$(resolve_project_image "authserver-playerbots")")"
world_playerbots="$(read_env_value AC_WORLDSERVER_IMAGE_PLAYERBOTS "$(resolve_project_image "worldserver-playerbots")")"
db_import="$(read_env_value AC_DB_IMPORT_IMAGE "$(resolve_project_image "db-import-playerbots")")"
client_data="$(read_env_value AC_CLIENT_DATA_IMAGE_PLAYERBOTS "$(resolve_project_image "client-data-playerbots")")"
auth_standard="$(read_env_value AC_AUTHSERVER_IMAGE "acore/ac-wotlk-authserver:master")"
world_standard="$(read_env_value AC_WORLDSERVER_IMAGE "acore/ac-wotlk-worldserver:master")"
client_data_standard="$(read_env_value AC_CLIENT_DATA_IMAGE "acore/ac-wotlk-client-data:master")"
local refs=(
"$auth_modules"
"$world_modules"
"$auth_playerbots"
"$world_playerbots"
"$db_import"
"$client_data"
"$auth_standard"
"$world_standard"
"$client_data_standard"
)
for ref in "${refs[@]}"; do
add_deploy_image_ref "$ref"
done
# Include default project-tagged images for cleanup even if env moved to custom tags
local fallback_refs=(
"$(resolve_project_image "authserver-modules-latest")"
"$(resolve_project_image "worldserver-modules-latest")"
"$(resolve_project_image "authserver-playerbots")"
"$(resolve_project_image "worldserver-playerbots")"
"$(resolve_project_image "db-import-playerbots")"
"$(resolve_project_image "client-data-playerbots")"
)
for ref in "${fallback_refs[@]}"; do
add_cleanup_image_ref "$ref"
done
}
ensure_host_writable(){ ensure_host_writable(){
local path="$1" local path="$1"
[ -n "$path" ] || return 0 [ -n "$path" ] || return 0
@@ -76,9 +144,13 @@ Options:
--port PORT SSH port (default: 22) --port PORT SSH port (default: 22)
--identity PATH SSH private key (passed to scp/ssh) --identity PATH SSH private key (passed to scp/ssh)
--project-dir DIR Remote project directory (default: ~/<project-name>) --project-dir DIR Remote project directory (default: ~/<project-name>)
--env-file PATH Use this env file for image lookup and upload (default: ./.env)
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar) --tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
--storage PATH Remote storage directory (default: <project-dir>/storage) --storage PATH Remote storage directory (default: <project-dir>/storage)
--skip-storage Do not sync the storage directory --skip-storage Do not sync the storage directory
--skip-env Do not upload .env to the remote host
--preserve-containers Skip stopping/removing existing remote containers and images
--clean-containers Stop/remove existing ac-* containers and project images on remote
--copy-source Copy the full local project directory instead of syncing via git --copy-source Copy the full local project directory instead of syncing via git
--yes, -y Auto-confirm prompts (for existing deployments) --yes, -y Auto-confirm prompts (for existing deployments)
--help Show this help --help Show this help
@@ -95,6 +167,9 @@ REMOTE_STORAGE=""
SKIP_STORAGE=0 SKIP_STORAGE=0
ASSUME_YES=0 ASSUME_YES=0
COPY_SOURCE=0 COPY_SOURCE=0
SKIP_ENV=0
PRESERVE_CONTAINERS=0
CLEAN_CONTAINERS=0
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
@@ -103,9 +178,13 @@ while [[ $# -gt 0 ]]; do
--port) PORT="$2"; shift 2;; --port) PORT="$2"; shift 2;;
--identity) IDENTITY="$2"; shift 2;; --identity) IDENTITY="$2"; shift 2;;
--project-dir) PROJECT_DIR="$2"; shift 2;; --project-dir) PROJECT_DIR="$2"; shift 2;;
--env-file) ENV_FILE="$2"; shift 2;;
--tarball) TARBALL="$2"; shift 2;; --tarball) TARBALL="$2"; shift 2;;
--storage) REMOTE_STORAGE="$2"; shift 2;; --storage) REMOTE_STORAGE="$2"; shift 2;;
--skip-storage) SKIP_STORAGE=1; shift;; --skip-storage) SKIP_STORAGE=1; shift;;
--skip-env) SKIP_ENV=1; shift;;
--preserve-containers) PRESERVE_CONTAINERS=1; shift;;
--clean-containers) CLEAN_CONTAINERS=1; shift;;
--copy-source) COPY_SOURCE=1; shift;; --copy-source) COPY_SOURCE=1; shift;;
--yes|-y) ASSUME_YES=1; shift;; --yes|-y) ASSUME_YES=1; shift;;
--help|-h) usage; exit 0;; --help|-h) usage; exit 0;;
@@ -119,6 +198,19 @@ if [[ -z "$HOST" || -z "$USER" ]]; then
exit 1 exit 1
fi fi
if [[ "$CLEAN_CONTAINERS" -eq 1 && "$PRESERVE_CONTAINERS" -eq 1 ]]; then
echo "Cannot combine --clean-containers with --preserve-containers." >&2
exit 1
fi
# Normalize env file path if provided and recompute defaults
if [ -n "$ENV_FILE" ] && [ -f "$ENV_FILE" ]; then
ENV_FILE="$(cd "$(dirname "$ENV_FILE")" && pwd)/$(basename "$ENV_FILE")"
else
ENV_FILE="$PROJECT_ROOT/.env"
fi
DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
expand_remote_path(){ expand_remote_path(){
local path="$1" local path="$1"
case "$path" in case "$path" in
@@ -145,6 +237,27 @@ ensure_host_writable "$LOCAL_STORAGE_ROOT"
TARBALL="${TARBALL:-${LOCAL_STORAGE_ROOT}/images/acore-modules-images.tar}" TARBALL="${TARBALL:-${LOCAL_STORAGE_ROOT}/images/acore-modules-images.tar}"
ensure_host_writable "$(dirname "$TARBALL")" ensure_host_writable "$(dirname "$TARBALL")"
# Resolve module SQL staging paths (local and remote)
resolve_path_relative_to_project(){
local path="$1" root="$2"
if [[ "$path" != /* ]]; then
# drop leading ./ if present
path="${path#./}"
path="${root%/}/$path"
fi
echo "${path%/}"
}
STAGE_SQL_PATH_RAW="$(read_env_value STAGE_PATH_MODULE_SQL "${LOCAL_STORAGE_ROOT:-./local-storage}/module-sql-updates")"
# Ensure STORAGE_PATH_LOCAL is defined to avoid set -u failures during expansion
if [ -z "${STORAGE_PATH_LOCAL:-}" ]; then
STORAGE_PATH_LOCAL="$LOCAL_STORAGE_ROOT"
fi
# Expand any env references (e.g., ${STORAGE_PATH_LOCAL})
STAGE_SQL_PATH_RAW="$(eval "echo \"$STAGE_SQL_PATH_RAW\"")"
LOCAL_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_ROOT")"
REMOTE_STAGE_SQL_DIR="$(resolve_path_relative_to_project "$STAGE_SQL_PATH_RAW" "$PROJECT_DIR")"
SCP_OPTS=(-P "$PORT") SCP_OPTS=(-P "$PORT")
SSH_OPTS=(-p "$PORT") SSH_OPTS=(-p "$PORT")
if [[ -n "$IDENTITY" ]]; then if [[ -n "$IDENTITY" ]]; then
@@ -200,14 +313,35 @@ validate_remote_environment(){
local running_containers local running_containers
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l") running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
if [ "$running_containers" -gt 0 ]; then if [ "$running_containers" -gt 0 ]; then
echo "⚠️ Warning: Found $running_containers running AzerothCore containers" if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
echo " Migration will overwrite existing deployment" echo "⚠️ Found $running_containers running AzerothCore containers; --preserve-containers set, leaving them running."
if [ "$ASSUME_YES" != "1" ]; then if [ "$ASSUME_YES" != "1" ]; then
read -r -p " Continue with migration? [y/N]: " reply read -r -p " Continue without stopping containers? [y/N]: " reply
case "$reply" in case "$reply" in
[Yy]*) echo " Proceeding with migration..." ;; [Yy]*) echo " Proceeding with migration (containers preserved)..." ;;
*) echo " Migration cancelled."; exit 1 ;; *) echo " Migration cancelled."; exit 1 ;;
esac esac
fi
elif [ "$CLEAN_CONTAINERS" -eq 1 ]; then
echo "⚠️ Found $running_containers running AzerothCore containers"
echo " --clean-containers set: they will be stopped/removed during migration."
if [ "$ASSUME_YES" != "1" ]; then
read -r -p " Continue with cleanup? [y/N]: " reply
case "$reply" in
[Yy]*) echo " Proceeding with cleanup..." ;;
*) echo " Migration cancelled."; exit 1 ;;
esac
fi
else
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
echo " Migration will NOT stop them automatically. Use --clean-containers to stop/remove."
if [ "$ASSUME_YES" != "1" ]; then
read -r -p " Continue with migration? [y/N]: " reply
case "$reply" in
[Yy]*) echo " Proceeding with migration..." ;;
*) echo " Migration cancelled."; exit 1 ;;
esac
fi
fi fi
fi fi
@@ -223,6 +357,25 @@ validate_remote_environment(){
echo "✅ Remote environment validation complete" echo "✅ Remote environment validation complete"
} }
confirm_remote_storage_overwrite(){
if [[ $SKIP_STORAGE -ne 0 ]]; then
return
fi
if [[ "$ASSUME_YES" = "1" ]]; then
return
fi
local has_content
has_content=$(run_ssh "if [ -d '$REMOTE_STORAGE' ]; then find '$REMOTE_STORAGE' -mindepth 1 -maxdepth 1 -print -quit; fi")
if [ -n "$has_content" ]; then
echo "⚠️ Remote storage at $REMOTE_STORAGE contains existing data."
read -r -p " Continue and sync local storage over it? [y/N]: " reply
case "${reply,,}" in
y|yes) echo " Proceeding with storage sync..." ;;
*) echo " Skipping storage sync for this run."; SKIP_STORAGE=1 ;;
esac
fi
}
copy_source_tree(){ copy_source_tree(){
echo " • Copying full local project directory..." echo " • Copying full local project directory..."
ensure_remote_temp_dir ensure_remote_temp_dir
@@ -286,27 +439,23 @@ setup_remote_repository(){
} }
cleanup_stale_docker_resources(){ cleanup_stale_docker_resources(){
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
echo "⋅ Skipping remote container/image cleanup (--preserve-containers)"
return
fi
if [ "$CLEAN_CONTAINERS" -ne 1 ]; then
echo "⋅ Skipping remote runtime cleanup (containers and images preserved)."
return
fi
echo "⋅ Cleaning up stale Docker resources on remote..." echo "⋅ Cleaning up stale Docker resources on remote..."
# Get project name to target our containers/images specifically
local project_name
project_name="$(resolve_project_name)"
# Stop and remove old containers # Stop and remove old containers
echo " • Removing old containers..." echo " • Removing old containers..."
run_ssh "docker ps -a --filter 'name=ac-' --format '{{.Names}}' | xargs -r docker rm -f 2>/dev/null || true" run_ssh "docker ps -a --filter 'name=ac-' --format '{{.Names}}' | xargs -r docker rm -f 2>/dev/null || true"
# Remove old project images to force fresh load # Remove old project images to force fresh load
echo " • Removing old project images..." echo " • Removing old project images..."
local images_to_remove=( for img in "${CLEANUP_IMAGE_REFS[@]}"; do
"${project_name}:authserver-modules-latest"
"${project_name}:worldserver-modules-latest"
"${project_name}:authserver-playerbots"
"${project_name}:worldserver-playerbots"
"${project_name}:db-import-playerbots"
"${project_name}:client-data-playerbots"
)
for img in "${images_to_remove[@]}"; do
run_ssh "docker rmi '$img' 2>/dev/null || true" run_ssh "docker rmi '$img' 2>/dev/null || true"
done done
@@ -320,31 +469,25 @@ cleanup_stale_docker_resources(){
validate_remote_environment validate_remote_environment
echo "⋅ Exporting module images to $TARBALL" collect_deploy_image_refs
echo "⋅ Exporting deployment images to $TARBALL"
# Ensure destination directory exists
ensure_host_writable "$(dirname "$TARBALL")"
# Check which images are available and collect them # Check which images are available and collect them
IMAGES_TO_SAVE=() IMAGES_TO_SAVE=()
MISSING_IMAGES=()
project_auth_modules="$(resolve_project_image "authserver-modules-latest")" for image in "${DEPLOY_IMAGE_REFS[@]}"; do
project_world_modules="$(resolve_project_image "worldserver-modules-latest")"
project_auth_playerbots="$(resolve_project_image "authserver-playerbots")"
project_world_playerbots="$(resolve_project_image "worldserver-playerbots")"
project_db_import="$(resolve_project_image "db-import-playerbots")"
project_client_data="$(resolve_project_image "client-data-playerbots")"
for image in \
"$project_auth_modules" \
"$project_world_modules" \
"$project_auth_playerbots" \
"$project_world_playerbots" \
"$project_db_import" \
"$project_client_data"; do
if docker image inspect "$image" >/dev/null 2>&1; then if docker image inspect "$image" >/dev/null 2>&1; then
IMAGES_TO_SAVE+=("$image") IMAGES_TO_SAVE+=("$image")
else
MISSING_IMAGES+=("$image")
fi fi
done done
if [ ${#IMAGES_TO_SAVE[@]} -eq 0 ]; then if [ ${#IMAGES_TO_SAVE[@]} -eq 0 ]; then
echo "❌ No AzerothCore images found to migrate. Run './build.sh' first or pull standard images." echo "❌ No AzerothCore images found to migrate. Run './build.sh' first or pull the images defined in your .env."
exit 1 exit 1
fi fi
@@ -352,6 +495,13 @@ echo "⋅ Found ${#IMAGES_TO_SAVE[@]} images to migrate:"
printf ' • %s\n' "${IMAGES_TO_SAVE[@]}" printf ' • %s\n' "${IMAGES_TO_SAVE[@]}"
docker image save "${IMAGES_TO_SAVE[@]}" > "$TARBALL" docker image save "${IMAGES_TO_SAVE[@]}" > "$TARBALL"
if [ ${#MISSING_IMAGES[@]} -gt 0 ]; then
echo "⚠️ Skipping ${#MISSING_IMAGES[@]} images not present locally (will need to pull on remote if required):"
printf ' • %s\n' "${MISSING_IMAGES[@]}"
fi
confirm_remote_storage_overwrite
if [[ $SKIP_STORAGE -eq 0 ]]; then if [[ $SKIP_STORAGE -eq 0 ]]; then
if [[ -d storage ]]; then if [[ -d storage ]]; then
echo "⋅ Syncing storage to remote" echo "⋅ Syncing storage to remote"
@@ -387,6 +537,18 @@ if [[ $SKIP_STORAGE -eq 0 ]]; then
rm -f "$modules_tar" rm -f "$modules_tar"
run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-modules.tar' -C '$REMOTE_STORAGE/modules' && rm '$REMOTE_TEMP_DIR/acore-modules.tar'" run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-modules.tar' -C '$REMOTE_STORAGE/modules' && rm '$REMOTE_TEMP_DIR/acore-modules.tar'"
fi fi
# Sync module SQL staging directory (STAGE_PATH_MODULE_SQL)
if [[ -d "$LOCAL_STAGE_SQL_DIR" ]]; then
echo "⋅ Syncing module SQL staging to remote"
run_ssh "rm -rf '$REMOTE_STAGE_SQL_DIR' && mkdir -p '$REMOTE_STAGE_SQL_DIR'"
sql_tar=$(mktemp)
tar -cf "$sql_tar" -C "$LOCAL_STAGE_SQL_DIR" .
ensure_remote_temp_dir
run_scp "$sql_tar" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-module-sql.tar"
rm -f "$sql_tar"
run_ssh "tar -xf '$REMOTE_TEMP_DIR/acore-module-sql.tar' -C '$REMOTE_STAGE_SQL_DIR' && rm '$REMOTE_TEMP_DIR/acore-module-sql.tar'"
fi
fi fi
reset_remote_post_install_marker(){ reset_remote_post_install_marker(){
@@ -406,9 +568,35 @@ ensure_remote_temp_dir
run_scp "$TARBALL" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-modules-images.tar" run_scp "$TARBALL" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-modules-images.tar"
run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'" run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'"
if [[ -f .env ]]; then if [[ -f "$ENV_FILE" ]]; then
echo "⋅ Uploading .env" if [[ $SKIP_ENV -eq 1 ]]; then
run_scp .env "$USER@$HOST:$PROJECT_DIR/.env" echo "⋅ Skipping .env upload (--skip-env)"
else
remote_env_path="$PROJECT_DIR/.env"
upload_env=1
if run_ssh "test -f '$remote_env_path'"; then
if [ "$ASSUME_YES" = "1" ]; then
echo "⋅ Overwriting existing remote .env (auto-confirm)"
elif [ -t 0 ]; then
read -r -p "⚠️ Remote .env exists at $remote_env_path. Overwrite? [y/N]: " reply
case "$reply" in
[Yy]*) ;;
*) upload_env=0 ;;
esac
else
echo "⚠️ Remote .env exists at $remote_env_path; skipping upload (no confirmation available)"
upload_env=0
fi
fi
if [[ $upload_env -eq 1 ]]; then
echo "⋅ Uploading .env"
run_scp "$ENV_FILE" "$USER@$HOST:$remote_env_path"
else
echo "⋅ Keeping existing remote .env"
fi
fi
fi fi
echo "⋅ Remote prepares completed" echo "⋅ Remote prepares completed"

View File

@@ -3,8 +3,21 @@
# to re-copy SQL files. # to re-copy SQL files.
set -euo pipefail set -euo pipefail
info(){ echo "🔧 [restore-stage] $*"; } SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
warn(){ echo "⚠️ [restore-stage] $*" >&2; }
# Source common library for standardized logging
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
exit 1
fi
# Specialized prefixed logging for this restoration context
restore_info() { info "🔧 [restore-stage] $*"; }
restore_warn() { warn "[restore-stage] $*"; }
# Maintain compatibility with existing function calls
info() { restore_info "$*"; }
warn() { restore_warn "$*"; }
MODULES_DIR="${MODULES_DIR:-/modules}" MODULES_DIR="${MODULES_DIR:-/modules}"
MODULES_META_DIR="${MODULES_DIR}/.modules-meta" MODULES_META_DIR="${MODULES_DIR}/.modules-meta"

View File

@@ -0,0 +1,88 @@
#!/bin/bash
# Ensure dbimport.conf exists with usable connection values.
set -euo pipefail 2>/dev/null || set -eu
# Usage: seed_dbimport_conf [conf_dir]
# - conf_dir: target directory (defaults to DBIMPORT_CONF_DIR or /azerothcore/env/dist/etc)
seed_dbimport_conf() {
local conf_dir="${1:-${DBIMPORT_CONF_DIR:-/azerothcore/env/dist/etc}}"
local conf="${conf_dir}/dbimport.conf"
local dist="${conf}.dist"
local source_root="${DBIMPORT_SOURCE_ROOT:-${AC_SOURCE_DIR:-/local-storage-root/source/azerothcore-playerbots}}"
if [ ! -d "$source_root" ]; then
local fallback="/local-storage-root/source/azerothcore-wotlk"
if [ -d "$fallback" ]; then
source_root="$fallback"
fi
fi
local source_dist="${DBIMPORT_DIST_PATH:-${source_root}/src/tools/dbimport/dbimport.conf.dist}"
# Put temp dir inside the writable config mount so non-root can create files.
local temp_dir="${DBIMPORT_TEMP_DIR:-/azerothcore/env/dist/etc/temp}"
mkdir -p "$conf_dir" "$temp_dir"
# Prefer a real .dist from the source tree if it exists.
if [ -f "$source_dist" ]; then
cp -n "$source_dist" "$dist" 2>/dev/null || true
fi
if [ ! -f "$conf" ]; then
if [ -f "$dist" ]; then
cp "$dist" "$conf"
else
echo "⚠️ dbimport.conf.dist not found; generating minimal dbimport.conf" >&2
cat > "$conf" <<EOF
LoginDatabaseInfo = "localhost;3306;root;root;acore_auth"
WorldDatabaseInfo = "localhost;3306;root;root;acore_world"
CharacterDatabaseInfo = "localhost;3306;root;root;acore_characters"
PlayerbotsDatabaseInfo = "localhost;3306;root;root;acore_playerbots"
EnableDatabases = 15
Updates.AutoSetup = 1
MySQLExecutable = "/usr/bin/mysql"
TempDir = "/azerothcore/env/dist/temp"
EOF
fi
fi
set_conf() {
local key="$1" value="$2" file="$3" quoted="${4:-true}"
local formatted="$value"
if [ "$quoted" = "true" ]; then
formatted="\"${value}\""
fi
if grep -qE "^[[:space:]]*${key}[[:space:]]*=" "$file"; then
sed -i "s|^[[:space:]]*${key}[[:space:]]*=.*|${key} = ${formatted}|" "$file"
else
printf '%s = %s\n' "$key" "$formatted" >> "$file"
fi
}
local host="${CONTAINER_MYSQL:-${MYSQL_HOST:-localhost}}"
local port="${MYSQL_PORT:-3306}"
local user="${MYSQL_USER:-root}"
local pass="${MYSQL_ROOT_PASSWORD:-root}"
local db_auth="${DB_AUTH_NAME:-acore_auth}"
local db_world="${DB_WORLD_NAME:-acore_world}"
local db_chars="${DB_CHARACTERS_NAME:-acore_characters}"
local db_bots="${DB_PLAYERBOTS_NAME:-acore_playerbots}"
set_conf "LoginDatabaseInfo" "${host};${port};${user};${pass};${db_auth}" "$conf"
set_conf "WorldDatabaseInfo" "${host};${port};${user};${pass};${db_world}" "$conf"
set_conf "CharacterDatabaseInfo" "${host};${port};${user};${pass};${db_chars}" "$conf"
set_conf "PlayerbotsDatabaseInfo" "${host};${port};${user};${pass};${db_bots}" "$conf"
set_conf "EnableDatabases" "${AC_UPDATES_ENABLE_DATABASES:-15}" "$conf" false
set_conf "Updates.AutoSetup" "${AC_UPDATES_AUTO_SETUP:-1}" "$conf" false
set_conf "Updates.ExceptionShutdownDelay" "${AC_UPDATES_EXCEPTION_SHUTDOWN_DELAY:-10000}" "$conf" false
set_conf "Updates.AllowedModules" "${DB_UPDATES_ALLOWED_MODULES:-all}" "$conf"
set_conf "Updates.Redundancy" "${DB_UPDATES_REDUNDANCY:-1}" "$conf" false
set_conf "Database.Reconnect.Seconds" "${DB_RECONNECT_SECONDS:-5}" "$conf" false
set_conf "Database.Reconnect.Attempts" "${DB_RECONNECT_ATTEMPTS:-5}" "$conf" false
set_conf "LoginDatabase.WorkerThreads" "${DB_LOGIN_WORKER_THREADS:-1}" "$conf" false
set_conf "WorldDatabase.WorkerThreads" "${DB_WORLD_WORKER_THREADS:-1}" "$conf" false
set_conf "CharacterDatabase.WorkerThreads" "${DB_CHARACTER_WORKER_THREADS:-1}" "$conf" false
set_conf "LoginDatabase.SynchThreads" "${DB_LOGIN_SYNCH_THREADS:-1}" "$conf" false
set_conf "WorldDatabase.SynchThreads" "${DB_WORLD_SYNCH_THREADS:-1}" "$conf" false
set_conf "CharacterDatabase.SynchThreads" "${DB_CHARACTER_SYNCH_THREADS:-1}" "$conf" false
set_conf "MySQLExecutable" "/usr/bin/mysql" "$conf"
set_conf "TempDir" "$temp_dir" "$conf"
}

View File

@@ -259,14 +259,14 @@ SENTINEL_FILE="$LOCAL_STORAGE_PATH/modules/.requires_rebuild"
MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta" MODULES_META_DIR="$STORAGE_PATH/modules/.modules-meta"
RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged" RESTORE_PRESTAGED_FLAG="$MODULES_META_DIR/.restore-prestaged"
MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt" MODULES_ENABLED_FILE="$MODULES_META_DIR/modules-enabled.txt"
MODULE_SQL_STAGE_PATH="$(read_env MODULE_SQL_STAGE_PATH "$STORAGE_PATH/module-sql-updates")" STAGE_PATH_MODULE_SQL="$(read_env STAGE_PATH_MODULE_SQL "$STORAGE_PATH/module-sql-updates")"
MODULE_SQL_STAGE_PATH="$(eval "echo \"$MODULE_SQL_STAGE_PATH\"")" STAGE_PATH_MODULE_SQL="$(eval "echo \"$STAGE_PATH_MODULE_SQL\"")"
if [[ "$MODULE_SQL_STAGE_PATH" != /* ]]; then if [[ "$STAGE_PATH_MODULE_SQL" != /* ]]; then
MODULE_SQL_STAGE_PATH="$PROJECT_DIR/$MODULE_SQL_STAGE_PATH" STAGE_PATH_MODULE_SQL="$PROJECT_DIR/$STAGE_PATH_MODULE_SQL"
fi fi
MODULE_SQL_STAGE_PATH="$(canonical_path "$MODULE_SQL_STAGE_PATH")" STAGE_PATH_MODULE_SQL="$(canonical_path "$STAGE_PATH_MODULE_SQL")"
mkdir -p "$MODULE_SQL_STAGE_PATH" mkdir -p "$STAGE_PATH_MODULE_SQL"
ensure_host_writable "$MODULE_SQL_STAGE_PATH" ensure_host_writable "$STAGE_PATH_MODULE_SQL"
HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")" HOST_STAGE_HELPER_IMAGE="$(read_env ALPINE_IMAGE "alpine:latest")"
declare -A ENABLED_MODULES=() declare -A ENABLED_MODULES=()
@@ -439,7 +439,7 @@ esac
# Stage module SQL to core updates directory (after containers start) # Stage module SQL to core updates directory (after containers start)
host_stage_clear(){ host_stage_clear(){
docker run --rm \ docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \ -v "$STAGE_PATH_MODULE_SQL":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \ "$HOST_STAGE_HELPER_IMAGE" \
sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true sh -c 'find /host-stage -type f -name "MODULE_*.sql" -delete' >/dev/null 2>&1 || true
} }
@@ -447,7 +447,7 @@ host_stage_clear(){
host_stage_reset_dir(){ host_stage_reset_dir(){
local dir="$1" local dir="$1"
docker run --rm \ docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \ -v "$STAGE_PATH_MODULE_SQL":/host-stage \
"$HOST_STAGE_HELPER_IMAGE" \ "$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true sh -c "mkdir -p /host-stage/$dir && rm -f /host-stage/$dir/MODULE_*.sql" >/dev/null 2>&1 || true
} }
@@ -461,7 +461,7 @@ copy_to_host_stage(){
local base_name local base_name
base_name="$(basename "$file_path")" base_name="$(basename "$file_path")"
docker run --rm \ docker run --rm \
-v "$MODULE_SQL_STAGE_PATH":/host-stage \ -v "$STAGE_PATH_MODULE_SQL":/host-stage \
-v "$src_dir":/src \ -v "$src_dir":/src \
"$HOST_STAGE_HELPER_IMAGE" \ "$HOST_STAGE_HELPER_IMAGE" \
sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1 sh -c "mkdir -p /host-stage/$core_dir && cp \"/src/$base_name\" \"/host-stage/$core_dir/$target_name\"" >/dev/null 2>&1

View File

@@ -9,6 +9,10 @@ from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parents[2] PROJECT_DIR = Path(__file__).resolve().parents[2]
ENV_FILE = PROJECT_DIR / ".env" ENV_FILE = PROJECT_DIR / ".env"
DEFAULT_ACORE_STANDARD_REPO = "https://github.com/azerothcore/azerothcore-wotlk.git"
DEFAULT_ACORE_PLAYERBOTS_REPO = "https://github.com/mod-playerbots/azerothcore-wotlk.git"
DEFAULT_ACORE_STANDARD_BRANCH = "master"
DEFAULT_ACORE_PLAYERBOTS_BRANCH = "Playerbot"
def load_env(): def load_env():
env = {} env = {}
@@ -150,6 +154,195 @@ def volume_info(name, fallback=None):
pass pass
return {"name": name, "exists": False, "mountpoint": "-"} return {"name": name, "exists": False, "mountpoint": "-"}
def detect_source_variant(env):
variant = read_env(env, "STACK_SOURCE_VARIANT", "").strip().lower()
if variant in ("playerbots", "playerbot"):
return "playerbots"
if variant == "core":
return "core"
if read_env(env, "STACK_IMAGE_MODE", "").strip().lower() == "playerbots":
return "playerbots"
if read_env(env, "MODULE_PLAYERBOTS", "0") == "1" or read_env(env, "PLAYERBOT_ENABLED", "0") == "1":
return "playerbots"
return "core"
def repo_config_for_variant(env, variant):
if variant == "playerbots":
repo = read_env(env, "ACORE_REPO_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_REPO)
branch = read_env(env, "ACORE_BRANCH_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_BRANCH)
else:
repo = read_env(env, "ACORE_REPO_STANDARD", DEFAULT_ACORE_STANDARD_REPO)
branch = read_env(env, "ACORE_BRANCH_STANDARD", DEFAULT_ACORE_STANDARD_BRANCH)
return repo, branch
def image_labels(image):
try:
result = subprocess.run(
["docker", "image", "inspect", "--format", "{{json .Config.Labels}}", image],
capture_output=True,
text=True,
check=True,
timeout=3,
)
labels = json.loads(result.stdout or "{}")
if isinstance(labels, dict):
return {k: (v or "").strip() for k, v in labels.items()}
except Exception:
pass
return {}
def first_label(labels, keys):
for key in keys:
value = labels.get(key, "")
if value:
return value
return ""
def short_commit(commit):
commit = commit.strip()
if re.fullmatch(r"[0-9a-fA-F]{12,}", commit):
return commit[:12]
return commit
def git_info_from_path(path):
repo_path = Path(path)
if not (repo_path / ".git").exists():
return None
def run_git(args):
try:
result = subprocess.run(
["git"] + args,
cwd=repo_path,
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
except Exception:
return ""
commit = run_git(["rev-parse", "HEAD"])
if not commit:
return None
return {
"commit": commit,
"commit_short": run_git(["rev-parse", "--short", "HEAD"]) or short_commit(commit),
"date": run_git(["log", "-1", "--format=%cd", "--date=iso-strict"]),
"repo": run_git(["remote", "get-url", "origin"]),
"branch": run_git(["rev-parse", "--abbrev-ref", "HEAD"]),
"path": str(repo_path),
}
def candidate_source_paths(env, variant):
paths = []
for key in ("MODULES_REBUILD_SOURCE_PATH", "SOURCE_DIR"):
value = read_env(env, key, "")
if value:
paths.append(value)
local_root = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
primary_dir = "azerothcore-playerbots" if variant == "playerbots" else "azerothcore"
fallback_dir = "azerothcore" if variant == "playerbots" else "azerothcore-playerbots"
paths.append(os.path.join(local_root, "source", primary_dir))
paths.append(os.path.join(local_root, "source", fallback_dir))
normalized = []
for p in paths:
expanded = expand_path(p, env)
try:
normalized.append(str(Path(expanded).expanduser().resolve()))
except Exception:
normalized.append(str(Path(expanded).expanduser()))
# Deduplicate while preserving order
seen = set()
unique_paths = []
for p in normalized:
if p not in seen:
seen.add(p)
unique_paths.append(p)
return unique_paths
def build_info(service_data, env):
variant = detect_source_variant(env)
repo, branch = repo_config_for_variant(env, variant)
info = {
"variant": variant,
"repo": repo,
"branch": branch,
"image": "",
"commit": "",
"commit_date": "",
"commit_source": "",
"source_path": "",
}
image_candidates = []
for svc in service_data:
if svc.get("name") in ("ac-worldserver", "ac-authserver", "ac-db-import"):
image = svc.get("image") or ""
if image:
image_candidates.append(image)
for env_key in (
"AC_WORLDSERVER_IMAGE_PLAYERBOTS",
"AC_WORLDSERVER_IMAGE_MODULES",
"AC_WORLDSERVER_IMAGE",
"AC_AUTHSERVER_IMAGE_PLAYERBOTS",
"AC_AUTHSERVER_IMAGE_MODULES",
"AC_AUTHSERVER_IMAGE",
):
value = read_env(env, env_key, "")
if value:
image_candidates.append(value)
seen = set()
deduped_images = []
for img in image_candidates:
if img not in seen:
seen.add(img)
deduped_images.append(img)
commit_label_keys = [
"build.source_commit",
"org.opencontainers.image.revision",
"org.opencontainers.image.version",
]
date_label_keys = [
"build.source_date",
"org.opencontainers.image.created",
"build.timestamp",
]
for image in deduped_images:
labels = image_labels(image)
if not info["image"]:
info["image"] = image
if not labels:
continue
commit = short_commit(first_label(labels, commit_label_keys))
date = first_label(labels, date_label_keys)
if commit or date:
info["commit"] = commit
info["commit_date"] = date
info["commit_source"] = "image-label"
info["image"] = image
return info
for path in candidate_source_paths(env, variant):
git_meta = git_info_from_path(path)
if git_meta:
info["commit"] = git_meta.get("commit_short") or short_commit(git_meta.get("commit", ""))
info["commit_date"] = git_meta.get("date", "")
info["commit_source"] = "source-tree"
info["source_path"] = git_meta.get("path", "")
info["repo"] = git_meta.get("repo") or info["repo"]
info["branch"] = git_meta.get("branch") or info["branch"]
return info
return info
def expand_path(value, env): def expand_path(value, env):
storage = read_env(env, "STORAGE_PATH", "./storage") storage = read_env(env, "STORAGE_PATH", "./storage")
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage") local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
@@ -175,13 +368,61 @@ def mysql_query(env, database, query):
except Exception: except Exception:
return 0 return 0
def escape_like_prefix(prefix):
# Basic escape for single quotes in SQL literals
return prefix.replace("'", "''")
def bot_prefixes(env):
prefixes = []
for key in ("PLAYERBOT_ACCOUNT_PREFIXES", "PLAYERBOT_ACCOUNT_PREFIX"):
raw = read_env(env, key, "")
for part in raw.replace(",", " ").split():
part = part.strip()
if part:
prefixes.append(part)
# Default fallback if nothing configured
if not prefixes:
prefixes.extend(["playerbot", "rndbot", "bot"])
return prefixes
def user_stats(env): def user_stats(env):
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth") db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters") db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;") prefixes = bot_prefixes(env)
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;") account_conditions = []
for prefix in prefixes:
prefix = escape_like_prefix(prefix)
upper_prefix = prefix.upper()
account_conditions.append(f"UPPER(username) NOT LIKE '{upper_prefix}%%'")
account_query = "SELECT COUNT(*) FROM account"
if account_conditions:
account_query += " WHERE " + " AND ".join(account_conditions)
accounts = mysql_query(env, db_auth, account_query + ";")
online_conditions = ["c.online = 1"]
for prefix in prefixes:
prefix = escape_like_prefix(prefix)
upper_prefix = prefix.upper()
online_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
online_query = (
f"SELECT COUNT(DISTINCT a.id) FROM `{db_characters}`.characters c "
f"JOIN `{db_auth}`.account a ON a.id = c.account "
f"WHERE {' AND '.join(online_conditions)};"
)
online = mysql_query(env, db_characters, online_query)
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);") active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;") character_conditions = []
for prefix in prefixes:
prefix = escape_like_prefix(prefix)
upper_prefix = prefix.upper()
character_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
characters_query = (
f"SELECT COUNT(*) FROM `{db_characters}`.characters c "
f"JOIN `{db_auth}`.account a ON a.id = c.account"
)
if character_conditions:
characters_query += " WHERE " + " AND ".join(character_conditions)
characters = mysql_query(env, db_characters, characters_query + ";")
return { return {
"accounts": accounts, "accounts": accounts,
"online": online, "online": online,
@@ -274,6 +515,8 @@ def main():
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"), "mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
} }
build = build_info(service_data, env)
data = { data = {
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"project": project, "project": project,
@@ -285,6 +528,7 @@ def main():
"volumes": volumes, "volumes": volumes,
"users": user_stats(env), "users": user_stats(env),
"stats": docker_stats(), "stats": docker_stats(),
"build": build,
} }
print(json.dumps(data)) print(json.dumps(data))

View File

@@ -22,6 +22,32 @@ ICON_ERROR="❌"
ICON_INFO="" ICON_INFO=""
ICON_TEST="🧪" ICON_TEST="🧪"
resolve_path(){
local base="$1" path="$2"
if command -v python3 >/dev/null 2>&1; then
python3 - "$base" "$path" <<'PY'
import os, sys
base, path = sys.argv[1:3]
if os.path.isabs(path):
print(os.path.normpath(path))
else:
print(os.path.normpath(os.path.join(base, path)))
PY
else
(cd "$base" && realpath -m "$path")
fi
}
if [ -f "$PROJECT_ROOT/.env" ]; then
set -a
# shellcheck disable=SC1091
source "$PROJECT_ROOT/.env"
set +a
fi
LOCAL_MODULES_DIR_RAW="${STORAGE_PATH_LOCAL:-./local-storage}/modules"
LOCAL_MODULES_DIR="$(resolve_path "$PROJECT_ROOT" "$LOCAL_MODULES_DIR_RAW")"
# Counters # Counters
TESTS_TOTAL=0 TESTS_TOTAL=0
TESTS_PASSED=0 TESTS_PASSED=0
@@ -117,7 +143,7 @@ info "Running: python3 scripts/python/modules.py generate"
if python3 scripts/python/modules.py \ if python3 scripts/python/modules.py \
--env-path .env \ --env-path .env \
--manifest config/module-manifest.json \ --manifest config/module-manifest.json \
generate --output-dir local-storage/modules > /tmp/phase1-modules-generate.log 2>&1; then generate --output-dir "$LOCAL_MODULES_DIR" > /tmp/phase1-modules-generate.log 2>&1; then
ok "Module state generation successful" ok "Module state generation successful"
else else
# Check if it's just warnings # Check if it's just warnings
@@ -130,11 +156,11 @@ fi
# Test 4: Verify SQL manifest created # Test 4: Verify SQL manifest created
test_header "SQL Manifest Verification" test_header "SQL Manifest Verification"
if [ -f local-storage/modules/.sql-manifest.json ]; then if [ -f "$LOCAL_MODULES_DIR/.sql-manifest.json" ]; then
ok "SQL manifest created: local-storage/modules/.sql-manifest.json" ok "SQL manifest created: $LOCAL_MODULES_DIR/.sql-manifest.json"
# Check manifest structure # Check manifest structure
module_count=$(python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0") module_count=$(python3 -c "import json; data=json.load(open('$LOCAL_MODULES_DIR/.sql-manifest.json')); print(len(data.get('modules', [])))" 2>/dev/null || echo "0")
info "Modules with SQL: $module_count" info "Modules with SQL: $module_count"
if [ "$module_count" -gt 0 ]; then if [ "$module_count" -gt 0 ]; then
@@ -142,7 +168,7 @@ if [ -f local-storage/modules/.sql-manifest.json ]; then
# Show first module # Show first module
info "Sample module SQL info:" info "Sample module SQL info:"
python3 -c "import json; data=json.load(open('local-storage/modules/.sql-manifest.json')); m=data['modules'][0] if data['modules'] else {}; print(f\" Name: {m.get('name', 'N/A')}\n SQL files: {len(m.get('sql_files', {}))}\") " 2>/dev/null || true python3 -c "import json; data=json.load(open('$LOCAL_MODULES_DIR/.sql-manifest.json')); m=data['modules'][0] if data['modules'] else {}; print(f\" Name: {m.get('name', 'N/A')}\n SQL files: {len(m.get('sql_files', {}))}\") " 2>/dev/null || true
else else
warn "No modules with SQL files (expected if modules not yet staged)" warn "No modules with SQL files (expected if modules not yet staged)"
fi fi
@@ -152,19 +178,19 @@ fi
# Test 5: Verify modules.env created # Test 5: Verify modules.env created
test_header "Module Environment File Check" test_header "Module Environment File Check"
if [ -f local-storage/modules/modules.env ]; then if [ -f "$LOCAL_MODULES_DIR/modules.env" ]; then
ok "modules.env created" ok "modules.env created"
# Check for key exports # Check for key exports
if grep -q "MODULES_ENABLED=" local-storage/modules/modules.env; then if grep -q "MODULES_ENABLED=" "$LOCAL_MODULES_DIR/modules.env"; then
ok "MODULES_ENABLED variable present" ok "MODULES_ENABLED variable present"
fi fi
if grep -q "MODULES_REQUIRES_CUSTOM_BUILD=" local-storage/modules/modules.env; then if grep -q "MODULES_REQUIRES_CUSTOM_BUILD=" "$LOCAL_MODULES_DIR/modules.env"; then
ok "Build requirement flags present" ok "Build requirement flags present"
# Check if build required # Check if build required
source local-storage/modules/modules.env source "$LOCAL_MODULES_DIR/modules.env"
if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then if [ "${MODULES_REQUIRES_CUSTOM_BUILD:-0}" = "1" ]; then
info "Custom build required (C++ modules enabled)" info "Custom build required (C++ modules enabled)"
else else
@@ -177,8 +203,8 @@ fi
# Test 6: Check build requirement # Test 6: Check build requirement
test_header "Build Requirement Check" test_header "Build Requirement Check"
if [ -f local-storage/modules/modules.env ]; then if [ -f "$LOCAL_MODULES_DIR/modules.env" ]; then
source local-storage/modules/modules.env source "$LOCAL_MODULES_DIR/modules.env"
info "MODULES_REQUIRES_CUSTOM_BUILD=${MODULES_REQUIRES_CUSTOM_BUILD:-0}" info "MODULES_REQUIRES_CUSTOM_BUILD=${MODULES_REQUIRES_CUSTOM_BUILD:-0}"
info "MODULES_REQUIRES_PLAYERBOT_SOURCE=${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}" info "MODULES_REQUIRES_PLAYERBOT_SOURCE=${MODULES_REQUIRES_PLAYERBOT_SOURCE:-0}"

121
scripts/bash/update-remote.sh Executable file
View File

@@ -0,0 +1,121 @@
#!/bin/bash
# Helper to push a fresh build to a remote host with minimal downtime and no data touch by default.
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
DEFAULT_PROJECT_DIR="~$(printf '/%s' "$(basename "$ROOT_DIR")")"
HOST=""
USER=""
PORT=22
IDENTITY=""
PROJECT_DIR="$DEFAULT_PROJECT_DIR"
PUSH_ENV=0
PUSH_STORAGE=0
CLEAN_CONTAINERS=0
AUTO_DEPLOY=1
ASSUME_YES=0
usage(){
cat <<'EOF'
Usage: scripts/bash/update-remote.sh --host HOST --user USER [options]
Options:
--host HOST Remote hostname or IP (required)
--user USER SSH username on remote host (required)
--port PORT SSH port (default: 22)
--identity PATH SSH private key
--project-dir DIR Remote project directory (default: ~/<repo-name>)
--remote-path DIR Alias for --project-dir (backward compat)
--push-env Upload local .env to remote (default: skip)
--push-storage Sync ./storage to remote (default: skip)
--clean-containers Stop/remove remote ac-* containers & project images during migration (default: preserve)
--no-auto-deploy Do not trigger remote deploy after migration
--yes Auto-confirm prompts
--help Show this help
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--host) HOST="$2"; shift 2;;
--user) USER="$2"; shift 2;;
--port) PORT="$2"; shift 2;;
--identity) IDENTITY="$2"; shift 2;;
--project-dir) PROJECT_DIR="$2"; shift 2;;
--remote-path) PROJECT_DIR="$2"; shift 2;;
--push-env) PUSH_ENV=1; shift;;
--push-storage) PUSH_STORAGE=1; shift;;
--clean-containers) CLEAN_CONTAINERS=1; shift;;
--no-auto-deploy) AUTO_DEPLOY=0; shift;;
--yes) ASSUME_YES=1; shift;;
--help|-h) usage; exit 0;;
*) echo "Unknown option: $1" >&2; usage; exit 1;;
esac
done
if [[ -z "$HOST" || -z "$USER" ]]; then
echo "--host and --user are required" >&2
usage
exit 1
fi
deploy_args=(--remote --remote-host "$HOST" --remote-user "$USER")
if [ -n "$PROJECT_DIR" ]; then
deploy_args+=(--remote-project-dir "$PROJECT_DIR")
fi
if [ -n "$IDENTITY" ]; then
deploy_args+=(--remote-identity "$IDENTITY")
fi
if [ "$PORT" != "22" ]; then
deploy_args+=(--remote-port "$PORT")
fi
if [ "$PUSH_STORAGE" -ne 1 ]; then
deploy_args+=(--remote-skip-storage)
fi
if [ "$PUSH_ENV" -ne 1 ]; then
deploy_args+=(--remote-skip-env)
fi
if [ "$CLEAN_CONTAINERS" -eq 1 ]; then
deploy_args+=(--remote-clean-containers)
else
deploy_args+=(--remote-preserve-containers)
fi
if [ "$AUTO_DEPLOY" -eq 1 ]; then
deploy_args+=(--remote-auto-deploy)
fi
deploy_args+=(--no-watch)
if [ "$ASSUME_YES" -eq 1 ]; then
deploy_args+=(--yes)
fi
echo "Remote update plan:"
echo " Host/User : ${USER}@${HOST}:${PORT}"
echo " Project Dir : ${PROJECT_DIR}"
echo " Push .env : $([ "$PUSH_ENV" -eq 1 ] && echo yes || echo no)"
echo " Push storage : $([ "$PUSH_STORAGE" -eq 1 ] && echo yes || echo no)"
echo " Cleanup mode : $([ "$CLEAN_CONTAINERS" -eq 1 ] && echo 'clean containers' || echo 'preserve containers')"
echo " Auto deploy : $([ "$AUTO_DEPLOY" -eq 1 ] && echo yes || echo no)"
if [ "$AUTO_DEPLOY" -eq 1 ] && [ "$PUSH_ENV" -ne 1 ]; then
echo " ⚠️ Auto-deploy is enabled but push-env is off; remote deploy will fail without a valid .env."
fi
if [ "$ASSUME_YES" -ne 1 ]; then
read -r -p "Proceed with remote update? [y/N]: " reply
reply="${reply:-n}"
case "${reply,,}" in
y|yes) ;;
*) echo "Aborted."; exit 1 ;;
esac
deploy_args+=(--yes)
fi
cd "$ROOT_DIR"
./deploy.sh "${deploy_args[@]}"

View File

@@ -4,13 +4,14 @@ set -e
# Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml # Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
info(){ echo -e "${BLUE} $*${NC}"; }
ok(){ echo -e "${GREEN}$*${NC}"; }
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
err(){ echo -e "${RED}$*${NC}"; }
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# Source common library for standardized logging
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
exit 1
fi
COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml" COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
ENV_FILE="" ENV_FILE=""
TEMPLATE_FILE="$PROJECT_DIR/.env.template" TEMPLATE_FILE="$PROJECT_DIR/.env.template"
@@ -98,12 +99,23 @@ read_env_value(){
if [ -f "$env_path" ]; then if [ -f "$env_path" ]; then
value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')" value="$(grep -E "^${key}=" "$env_path" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi fi
# Fallback to template defaults if not set in the chosen env file
if [ -z "$value" ] && [ -f "$TEMPLATE_FILE" ]; then
value="$(grep -E "^${key}=" "$TEMPLATE_FILE" | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
fi
if [ -z "$value" ]; then if [ -z "$value" ]; then
value="$default" value="$default"
fi fi
echo "$value" echo "$value"
} }
MYSQL_EXTERNAL_PORT="$(read_env_value MYSQL_EXTERNAL_PORT 64306)"
AUTH_EXTERNAL_PORT="$(read_env_value AUTH_EXTERNAL_PORT 3784)"
WORLD_EXTERNAL_PORT="$(read_env_value WORLD_EXTERNAL_PORT 8215)"
SOAP_EXTERNAL_PORT="$(read_env_value SOAP_EXTERNAL_PORT 7778)"
PMA_EXTERNAL_PORT="$(read_env_value PMA_EXTERNAL_PORT 8081)"
KEIRA3_EXTERNAL_PORT="$(read_env_value KEIRA3_EXTERNAL_PORT 4201)"
handle_auto_rebuild(){ handle_auto_rebuild(){
local storage_path local storage_path
storage_path="$(read_env_value STORAGE_PATH_LOCAL "./local-storage")" storage_path="$(read_env_value STORAGE_PATH_LOCAL "./local-storage")"
@@ -171,7 +183,7 @@ health_checks(){
check_health ac-worldserver || ((failures++)) check_health ac-worldserver || ((failures++))
if [ "$QUICK" = false ]; then if [ "$QUICK" = false ]; then
info "Port checks" info "Port checks"
for port in 64306 3784 8215 7778 8081 4201; do for port in "$MYSQL_EXTERNAL_PORT" "$AUTH_EXTERNAL_PORT" "$WORLD_EXTERNAL_PORT" "$SOAP_EXTERNAL_PORT" "$PMA_EXTERNAL_PORT" "$KEIRA3_EXTERNAL_PORT"; do
if timeout 3 bash -c "</dev/tcp/127.0.0.1/$port" 2>/dev/null; then ok "port $port: open"; else warn "port $port: closed"; fi if timeout 3 bash -c "</dev/tcp/127.0.0.1/$port" 2>/dev/null; then ok "port $port: open"; else warn "port $port: closed"; fi
done done
fi fi
@@ -190,7 +202,7 @@ main(){
fi fi
health_checks health_checks
handle_auto_rebuild handle_auto_rebuild
info "Endpoints: MySQL:64306, Auth:3784, World:8215, SOAP:7778, phpMyAdmin:8081, Keira3:4201" info "Endpoints: MySQL:${MYSQL_EXTERNAL_PORT}, Auth:${AUTH_EXTERNAL_PORT}, World:${WORLD_EXTERNAL_PORT}, SOAP:${SOAP_EXTERNAL_PORT}, phpMyAdmin:${PMA_EXTERNAL_PORT}, Keira3:${KEIRA3_EXTERNAL_PORT}"
} }
main "$@" main "$@"

View File

@@ -1,6 +1,6 @@
module acore-compose/statusdash module acore-compose/statusdash
go 1.22.2 go 1.22
require ( require (
github.com/gizak/termui/v3 v3.1.0 // indirect github.com/gizak/termui/v3 v3.1.0 // indirect

View File

@@ -4,6 +4,8 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"log" "log"
"net"
"os"
"os/exec" "os/exec"
"strings" "strings"
"time" "time"
@@ -61,17 +63,114 @@ type Module struct {
Type string `json:"type"` Type string `json:"type"`
} }
type BuildInfo struct {
Variant string `json:"variant"`
Repo string `json:"repo"`
Branch string `json:"branch"`
Image string `json:"image"`
Commit string `json:"commit"`
CommitDate string `json:"commit_date"`
CommitSource string `json:"commit_source"`
SourcePath string `json:"source_path"`
}
type Snapshot struct { type Snapshot struct {
Timestamp string `json:"timestamp"` Timestamp string `json:"timestamp"`
Project string `json:"project"` Project string `json:"project"`
Network string `json:"network"` Network string `json:"network"`
Services []Service `json:"services"` Services []Service `json:"services"`
Ports []Port `json:"ports"` Ports []Port `json:"ports"`
Modules []Module `json:"modules"` Modules []Module `json:"modules"`
Storage map[string]DirInfo `json:"storage"` Storage map[string]DirInfo `json:"storage"`
Volumes map[string]VolumeInfo `json:"volumes"` Volumes map[string]VolumeInfo `json:"volumes"`
Users UserStats `json:"users"` Users UserStats `json:"users"`
Stats map[string]ContainerStats `json:"stats"` Stats map[string]ContainerStats `json:"stats"`
Build BuildInfo `json:"build"`
}
var persistentServiceOrder = []string{
"ac-mysql",
"ac-db-guard",
"ac-authserver",
"ac-worldserver",
"ac-phpmyadmin",
"ac-keira3",
"ac-backup",
}
func humanDuration(d time.Duration) string {
if d < time.Minute {
return "<1m"
}
days := d / (24 * time.Hour)
d -= days * 24 * time.Hour
hours := d / time.Hour
d -= hours * time.Hour
mins := d / time.Minute
switch {
case days > 0:
return fmt.Sprintf("%dd %dh", days, hours)
case hours > 0:
return fmt.Sprintf("%dh %dm", hours, mins)
default:
return fmt.Sprintf("%dm", mins)
}
}
func formatUptime(startedAt string) string {
if startedAt == "" {
return "-"
}
parsed, err := time.Parse(time.RFC3339Nano, startedAt)
if err != nil {
parsed, err = time.Parse(time.RFC3339, startedAt)
if err != nil {
return "-"
}
}
if parsed.IsZero() {
return "-"
}
uptime := time.Since(parsed)
if uptime < 0 {
uptime = 0
}
return humanDuration(uptime)
}
func primaryIPv4() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 {
continue
}
addrs, err := iface.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue
}
return ip.String()
}
}
return ""
} }
func runSnapshot() (*Snapshot, error) { func runSnapshot() (*Snapshot, error) {
@@ -87,27 +186,76 @@ func runSnapshot() (*Snapshot, error) {
return snap, nil return snap, nil
} }
func buildServicesTable(s *Snapshot) *TableNoCol { func partitionServices(all []Service) ([]Service, []Service) {
table := NewTableNoCol() byName := make(map[string]Service)
rows := [][]string{{"Service", "Status", "Health", "CPU%", "Memory"}} for _, svc := range all {
for _, svc := range s.Services { byName[svc.Name] = svc
cpu := "-"
mem := "-"
if stats, ok := s.Stats[svc.Name]; ok {
cpu = fmt.Sprintf("%.1f", stats.CPU)
mem = strings.Split(stats.Memory, " / ")[0] // Just show used, not total
}
// Combine health with exit code for stopped containers
health := svc.Health
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
}
rows = append(rows, []string{svc.Label, svc.Status, health, cpu, mem})
} }
seen := make(map[string]bool)
persistent := make([]Service, 0, len(persistentServiceOrder))
for _, name := range persistentServiceOrder {
if svc, ok := byName[name]; ok {
persistent = append(persistent, svc)
seen[name] = true
}
}
setups := make([]Service, 0, len(all))
for _, svc := range all {
if seen[svc.Name] {
continue
}
setups = append(setups, svc)
}
return persistent, setups
}
func buildServicesTable(s *Snapshot) *TableNoCol {
runningServices, setupServices := partitionServices(s.Services)
table := NewTableNoCol()
rows := [][]string{{"Service", "Status", "Health", "Uptime", "CPU%", "Memory"}}
appendRows := func(services []Service) {
for _, svc := range services {
cpu := "-"
mem := "-"
if svcStats, ok := s.Stats[svc.Name]; ok {
cpu = fmt.Sprintf("%.1f", svcStats.CPU)
mem = strings.Split(svcStats.Memory, " / ")[0] // Just show used, not total
}
health := svc.Health
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
}
rows = append(rows, []string{svc.Label, svc.Status, health, formatUptime(svc.StartedAt), cpu, mem})
}
}
appendRows(runningServices)
appendRows(setupServices)
table.Rows = rows table.Rows = rows
table.RowSeparator = false table.RowSeparator = false
table.Border = true table.Border = true
table.Title = "Services" table.Title = "Services"
for i := 1; i < len(table.Rows); i++ {
if table.RowStyles == nil {
table.RowStyles = make(map[int]ui.Style)
}
state := strings.ToLower(table.Rows[i][2])
switch state {
case "running", "healthy":
table.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
case "restarting", "unhealthy":
table.RowStyles[i] = ui.NewStyle(ui.ColorRed)
case "exited":
table.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
default:
table.RowStyles[i] = ui.NewStyle(ui.ColorWhite)
}
}
return table return table
} }
@@ -115,9 +263,9 @@ func buildPortsTable(s *Snapshot) *TableNoCol {
table := NewTableNoCol() table := NewTableNoCol()
rows := [][]string{{"Port", "Number", "Reachable"}} rows := [][]string{{"Port", "Number", "Reachable"}}
for _, p := range s.Ports { for _, p := range s.Ports {
state := "down" state := "Closed"
if p.Reachable { if p.Reachable {
state = "up" state = "Open"
} }
rows = append(rows, []string{p.Name, p.Port, state}) rows = append(rows, []string{p.Name, p.Port, state})
} }
@@ -145,7 +293,6 @@ func buildModulesList(s *Snapshot) *widgets.List {
func buildStorageParagraph(s *Snapshot) *widgets.Paragraph { func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder var b strings.Builder
fmt.Fprintf(&b, "STORAGE:\n")
entries := []struct { entries := []struct {
Key string Key string
Label string Label string
@@ -161,23 +308,20 @@ func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
if !ok { if !ok {
continue continue
} }
mark := "○" fmt.Fprintf(&b, " %-15s %s (%s)\n", item.Label, info.Path, info.Size)
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-15s %s %s (%s)\n", item.Label, mark, info.Path, info.Size)
} }
par := widgets.NewParagraph() par := widgets.NewParagraph()
par.Title = "Storage" par.Title = "Storage"
par.Text = b.String() par.Text = strings.TrimRight(b.String(), "\n")
par.Border = true par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow) par.BorderStyle = ui.NewStyle(ui.ColorYellow)
par.PaddingLeft = 0
par.PaddingRight = 0
return par return par
} }
func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph { func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
var b strings.Builder var b strings.Builder
fmt.Fprintf(&b, "VOLUMES:\n")
entries := []struct { entries := []struct {
Key string Key string
Label string Label string
@@ -190,47 +334,89 @@ func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
if !ok { if !ok {
continue continue
} }
mark := "○" fmt.Fprintf(&b, " %-13s %s\n", item.Label, info.Mountpoint)
if info.Exists {
mark = "●"
}
fmt.Fprintf(&b, " %-13s %s %s\n", item.Label, mark, info.Mountpoint)
} }
par := widgets.NewParagraph() par := widgets.NewParagraph()
par.Title = "Volumes" par.Title = "Volumes"
par.Text = b.String() par.Text = strings.TrimRight(b.String(), "\n")
par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
par.PaddingLeft = 0
par.PaddingRight = 0
return par
}
func simplifyRepo(repo string) string {
repo = strings.TrimSpace(repo)
repo = strings.TrimSuffix(repo, ".git")
repo = strings.TrimPrefix(repo, "https://")
repo = strings.TrimPrefix(repo, "http://")
repo = strings.TrimPrefix(repo, "git@")
repo = strings.TrimPrefix(repo, "github.com:")
repo = strings.TrimPrefix(repo, "gitlab.com:")
repo = strings.TrimPrefix(repo, "github.com/")
repo = strings.TrimPrefix(repo, "gitlab.com/")
return repo
}
func buildInfoParagraph(s *Snapshot) *widgets.Paragraph {
build := s.Build
var lines []string
if build.Branch != "" {
lines = append(lines, fmt.Sprintf("Branch: %s", build.Branch))
}
if repo := simplifyRepo(build.Repo); repo != "" {
lines = append(lines, fmt.Sprintf("Repo: %s", repo))
}
commitLine := "Git: unknown"
if build.Commit != "" {
commitLine = fmt.Sprintf("Git: %s", build.Commit)
switch build.CommitSource {
case "image-label":
commitLine += " [image]"
case "source-tree":
commitLine += " [source]"
}
}
lines = append(lines, commitLine)
if build.Image != "" {
// Skip image line to keep header compact
}
lines = append(lines, fmt.Sprintf("Updated: %s", s.Timestamp))
par := widgets.NewParagraph()
par.Title = "Build"
par.Text = strings.Join(lines, "\n")
par.Border = true par.Border = true
par.BorderStyle = ui.NewStyle(ui.ColorYellow) par.BorderStyle = ui.NewStyle(ui.ColorYellow)
return par return par
} }
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) { func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
servicesTable := buildServicesTable(s) hostname, err := os.Hostname()
for i := 1; i < len(servicesTable.Rows); i++ { if err != nil || hostname == "" {
if servicesTable.RowStyles == nil { hostname = "unknown"
servicesTable.RowStyles = make(map[int]ui.Style)
}
state := strings.ToLower(servicesTable.Rows[i][1])
switch state {
case "running", "healthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
case "restarting", "unhealthy":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
case "exited":
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
default:
servicesTable.RowStyles[i] = ui.NewStyle(ui.ColorWhite)
}
} }
ip := primaryIPv4()
if ip == "" {
ip = "unknown"
}
servicesTable := buildServicesTable(s)
portsTable := buildPortsTable(s) portsTable := buildPortsTable(s)
for i := 1; i < len(portsTable.Rows); i++ { for i := 1; i < len(portsTable.Rows); i++ {
if portsTable.RowStyles == nil { if portsTable.RowStyles == nil {
portsTable.RowStyles = make(map[int]ui.Style) portsTable.RowStyles = make(map[int]ui.Style)
} }
if portsTable.Rows[i][2] == "up" { if portsTable.Rows[i][2] == "Open" {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen) portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
} else { } else {
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorRed) portsTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
} }
} }
modulesList := buildModulesList(s) modulesList := buildModulesList(s)
@@ -247,50 +433,88 @@ func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
moduleInfoPar.Title = "Module Info" moduleInfoPar.Title = "Module Info"
if selectedModule >= 0 && selectedModule < len(s.Modules) { if selectedModule >= 0 && selectedModule < len(s.Modules) {
mod := s.Modules[selectedModule] mod := s.Modules[selectedModule]
moduleInfoPar.Text = fmt.Sprintf("%s\n\nCategory: %s\nType: %s", mod.Description, mod.Category, mod.Type) moduleInfoPar.Text = fmt.Sprintf("%s\nCategory: %s\nType: %s", mod.Description, mod.Category, mod.Type)
} else { } else {
moduleInfoPar.Text = "Select a module to view info" moduleInfoPar.Text = "Select a module to view info"
} }
moduleInfoPar.Border = true moduleInfoPar.Border = true
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta) moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
storagePar := buildStorageParagraph(s) storagePar := buildStorageParagraph(s)
storagePar.Border = true
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
storagePar.PaddingLeft = 1
storagePar.PaddingRight = 1
volumesPar := buildVolumesParagraph(s) volumesPar := buildVolumesParagraph(s)
header := widgets.NewParagraph() header := widgets.NewParagraph()
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp) header.Text = fmt.Sprintf("Host: %s\nIP: %s\nProject: %s\nNetwork: %s", hostname, ip, s.Project, s.Network)
header.Border = true header.Border = true
buildPar := buildInfoParagraph(s)
usersPar := widgets.NewParagraph() usersPar := widgets.NewParagraph()
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d) usersPar.Title = "Users"
usersPar.Text = fmt.Sprintf(" Online: %d\n Accounts: %d\n Characters: %d\n Active 7d: %d", s.Users.Online, s.Users.Accounts, s.Users.Characters, s.Users.Active7d)
usersPar.Border = true usersPar.Border = true
const headerRowFrac = 0.18
const middleRowFrac = 0.43
const bottomRowFrac = 0.39
// Derive inner row ratios from the computed bottom row height so that
// internal containers tile their parent with the same spacing behavior
// as top-level rows.
grid := ui.NewGrid() grid := ui.NewGrid()
termWidth, termHeight := ui.TerminalDimensions() termWidth, termHeight := ui.TerminalDimensions()
headerHeight := int(float64(termHeight) * headerRowFrac)
middleHeight := int(float64(termHeight) * middleRowFrac)
bottomHeight := termHeight - headerHeight - middleHeight
if bottomHeight <= 0 {
bottomHeight = int(float64(termHeight) * bottomRowFrac)
}
helpHeight := int(float64(bottomHeight) * 0.32)
if helpHeight < 1 {
helpHeight = 1
}
moduleInfoHeight := bottomHeight - helpHeight
if moduleInfoHeight < 1 {
moduleInfoHeight = 1
}
storageHeight := int(float64(bottomHeight) * 0.513)
if storageHeight < 1 {
storageHeight = 1
}
volumesHeight := bottomHeight - storageHeight
if volumesHeight < 1 {
volumesHeight = 1
}
helpRatio := float64(helpHeight) / float64(bottomHeight)
moduleInfoRatio := float64(moduleInfoHeight) / float64(bottomHeight)
storageRatio := float64(storageHeight) / float64(bottomHeight)
volumesRatio := float64(volumesHeight) / float64(bottomHeight)
grid.SetRect(0, 0, termWidth, termHeight) grid.SetRect(0, 0, termWidth, termHeight)
grid.Set( grid.Set(
ui.NewRow(0.18, ui.NewRow(headerRowFrac,
ui.NewCol(0.6, header), ui.NewCol(0.34, header),
ui.NewCol(0.4, usersPar), ui.NewCol(0.33, buildPar),
ui.NewCol(0.33, usersPar),
), ),
ui.NewRow(0.42, ui.NewRow(middleRowFrac,
ui.NewCol(0.6, servicesTable), ui.NewCol(0.6, servicesTable),
ui.NewCol(0.4, portsTable), ui.NewCol(0.4, portsTable),
), ),
ui.NewRow(0.40, ui.NewRow(bottomRowFrac,
ui.NewCol(0.25, modulesList), ui.NewCol(0.25, modulesList),
ui.NewCol(0.15, ui.NewCol(0.15,
ui.NewRow(0.30, helpPar), ui.NewRow(helpRatio, helpPar),
ui.NewRow(0.70, moduleInfoPar), ui.NewRow(moduleInfoRatio, moduleInfoPar),
), ),
ui.NewCol(0.6, ui.NewCol(0.6,
ui.NewRow(0.55, ui.NewRow(storageRatio,
ui.NewCol(1.0, storagePar), ui.NewCol(1.0, storagePar),
), ),
ui.NewRow(0.45, ui.NewRow(volumesRatio,
ui.NewCol(1.0, volumesPar), ui.NewCol(1.0, volumesPar),
), ),
), ),

View File

@@ -588,14 +588,16 @@ def handle_generate(args: argparse.Namespace) -> int:
write_outputs(state, output_dir) write_outputs(state, output_dir)
if state.warnings: if state.warnings:
warning_block = "\n".join(f"- {warning}" for warning in state.warnings) module_keys_with_warnings = sorted(
{warning.split()[0].strip(":,") for warning in state.warnings if warning.startswith("MODULE_")}
)
warning_lines = []
if module_keys_with_warnings:
warning_lines.append(f"- Modules with warnings: {', '.join(module_keys_with_warnings)}")
warning_lines.extend(f"- {warning}" for warning in state.warnings)
warning_block = textwrap.indent("\n".join(warning_lines), " ")
print( print(
textwrap.dedent( f"⚠️ Module manifest warnings detected:\n{warning_block}\n",
f"""\
⚠️ Module manifest warnings detected:
{warning_block}
"""
),
file=sys.stderr, file=sys.stderr,
) )
if state.errors: if state.errors:

162
setup.sh
View File

@@ -578,8 +578,6 @@ main(){
local CLI_PLAYERBOT_ENABLED="" local CLI_PLAYERBOT_ENABLED=""
local CLI_PLAYERBOT_MIN="" local CLI_PLAYERBOT_MIN=""
local CLI_PLAYERBOT_MAX="" local CLI_PLAYERBOT_MAX=""
local CLI_AUTO_REBUILD=0
local CLI_MODULES_SOURCE=""
local FORCE_OVERWRITE=0 local FORCE_OVERWRITE=0
local CLI_ENABLE_MODULES_RAW=() local CLI_ENABLE_MODULES_RAW=()
@@ -622,9 +620,6 @@ Options:
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag --playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value --playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value --playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
--auto-rebuild-on-deploy Enable automatic rebuild during deploys
--modules-rebuild-source PATH Source checkout used for module rebuilds
--deploy-after Run ./deploy.sh automatically after setup completes
--force Overwrite existing .env without prompting --force Overwrite existing .env without prompting
EOF EOF
exit 0 exit 0
@@ -779,25 +774,10 @@ EOF
--playerbot-max-bots=*) --playerbot-max-bots=*)
CLI_PLAYERBOT_MAX="${1#*=}"; shift CLI_PLAYERBOT_MAX="${1#*=}"; shift
;; ;;
--auto-rebuild-on-deploy)
CLI_AUTO_REBUILD=1
shift
;;
--modules-rebuild-source)
[[ $# -ge 2 ]] || { say ERROR "--modules-rebuild-source requires a value"; exit 1; }
CLI_MODULES_SOURCE="$2"; shift 2
;;
--modules-rebuild-source=*)
CLI_MODULES_SOURCE="${1#*=}"; shift
;;
--force) --force)
FORCE_OVERWRITE=1 FORCE_OVERWRITE=1
shift shift
;; ;;
--deploy-after)
CLI_DEPLOY_AFTER=1
shift
;;
*) *)
echo "Unknown argument: $1" >&2 echo "Unknown argument: $1" >&2
echo "Use --help for usage" >&2 echo "Use --help for usage" >&2
@@ -1210,8 +1190,6 @@ fi
local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}" local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}" local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
local AUTO_REBUILD_ON_DEPLOY=$CLI_AUTO_REBUILD
local MODULES_REBUILD_SOURCE_PATH_VALUE="${CLI_MODULES_SOURCE}"
local NEEDS_CXX_REBUILD=0 local NEEDS_CXX_REBUILD=0
local module_mode_label="" local module_mode_label=""
@@ -1241,7 +1219,7 @@ fi
"automation" "quality-of-life" "gameplay-enhancement" "npc-service" "automation" "quality-of-life" "gameplay-enhancement" "npc-service"
"pvp" "progression" "economy" "social" "account-wide" "pvp" "progression" "economy" "social" "account-wide"
"customization" "scripting" "admin" "premium" "minigame" "customization" "scripting" "admin" "premium" "minigame"
"content" "rewards" "developer" "content" "rewards" "developer" "database" "tooling" "uncategorized"
) )
declare -A category_titles=( declare -A category_titles=(
["automation"]="🤖 Automation" ["automation"]="🤖 Automation"
@@ -1261,30 +1239,18 @@ fi
["content"]="🏰 Content" ["content"]="🏰 Content"
["rewards"]="🎁 Rewards" ["rewards"]="🎁 Rewards"
["developer"]="🛠️ Developer Tools" ["developer"]="🛠️ Developer Tools"
["database"]="🗄️ Database"
["tooling"]="🔨 Tooling"
["uncategorized"]="📦 Miscellaneous"
) )
declare -A processed_categories=()
# Group modules by category using arrays render_category() {
declare -A modules_by_category local cat="$1"
local key
for key in "${selection_keys[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local category="${MODULE_CATEGORY_MAP[$key]:-uncategorized}"
if [ -z "${modules_by_category[$category]:-}" ]; then
modules_by_category[$category]="$key"
else
modules_by_category[$category]="${modules_by_category[$category]} $key"
fi
done
# Process modules by category
local cat
for cat in "${category_order[@]}"; do
local module_list="${modules_by_category[$cat]:-}" local module_list="${modules_by_category[$cat]:-}"
[ -n "$module_list" ] || continue [ -n "$module_list" ] || return 0
# Check if this category has any valid modules before showing header
local has_valid_modules=0 local has_valid_modules=0
# Split the space-separated string properly
local -a module_array local -a module_array
IFS=' ' read -ra module_array <<< "$module_list" IFS=' ' read -ra module_array <<< "$module_list"
for key in "${module_array[@]}"; do for key in "${module_array[@]}"; do
@@ -1296,14 +1262,12 @@ fi
fi fi
done done
# Skip category if no valid modules [ "$has_valid_modules" = "1" ] || return 0
[ "$has_valid_modules" = "1" ] || continue
# Display category header only when we have valid modules
local cat_title="${category_titles[$cat]:-$cat}" local cat_title="${category_titles[$cat]:-$cat}"
printf '\n%b\n' "${BOLD}${CYAN}═══ ${cat_title} ═══${NC}" printf '\n%b\n' "${BOLD}${CYAN}═══ ${cat_title} ═══${NC}"
# Process modules in this category local first_in_cat=1
for key in "${module_array[@]}"; do for key in "${module_array[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue [ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local status_lc="${MODULE_STATUS_MAP[$key],,}" local status_lc="${MODULE_STATUS_MAP[$key],,}"
@@ -1313,6 +1277,10 @@ fi
printf -v "$key" '%s' "0" printf -v "$key" '%s' "0"
continue continue
fi fi
if [ "$first_in_cat" -ne 1 ]; then
printf '\n'
fi
first_in_cat=0
local prompt_label local prompt_label
prompt_label="$(module_display_name "$key")" prompt_label="$(module_display_name "$key")"
if [ "${MODULE_NEEDS_BUILD_MAP[$key]}" = "1" ]; then if [ "${MODULE_NEEDS_BUILD_MAP[$key]}" = "1" ]; then
@@ -1340,6 +1308,30 @@ fi
printf -v "$key" '%s' "0" printf -v "$key" '%s' "0"
fi fi
done done
processed_categories["$cat"]=1
}
# Group modules by category using arrays
declare -A modules_by_category
local key
for key in "${selection_keys[@]}"; do
[ -n "${KNOWN_MODULE_LOOKUP[$key]:-}" ] || continue
local category="${MODULE_CATEGORY_MAP[$key]:-uncategorized}"
if [ -z "${modules_by_category[$category]:-}" ]; then
modules_by_category[$category]="$key"
else
modules_by_category[$category]="${modules_by_category[$category]} $key"
fi
done
# Process modules by category (ordered, then any new categories)
local cat
for cat in "${category_order[@]}"; do
render_category "$cat"
done
for cat in "${!modules_by_category[@]}"; do
[ -n "${processed_categories[$cat]:-}" ] && continue
render_category "$cat"
done done
module_mode_label="preset 3 (Manual)" module_mode_label="preset 3 (Manual)"
elif [ "$MODE_SELECTION" = "4" ]; then elif [ "$MODE_SELECTION" = "4" ]; then
@@ -1459,7 +1451,6 @@ fi
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH" printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER" printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS" printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
printf " %-18s %s\n" "Source checkout:" "$default_source_rel"
printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE" printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE"
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT" printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
@@ -1506,17 +1497,28 @@ fi
echo "" echo ""
say WARNING "These modules require compiling AzerothCore from source." say WARNING "These modules require compiling AzerothCore from source."
say INFO "Run './build.sh' to compile your custom modules before deployment." say INFO "Run './build.sh' to compile your custom modules before deployment."
if [ "$CLI_AUTO_REBUILD" = "1" ]; then
AUTO_REBUILD_ON_DEPLOY=1
else
AUTO_REBUILD_ON_DEPLOY=$(ask_yn "Enable automatic rebuild during future deploys?" "$( [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ] && echo y || echo n )")
fi
# Set build sentinel to indicate rebuild is needed # Set build sentinel to indicate rebuild is needed
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild" local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
mkdir -p "$(dirname "$sentinel")" mkdir -p "$(dirname "$sentinel")"
touch "$sentinel" if touch "$sentinel" 2>/dev/null; then
say INFO "Build sentinel created at $sentinel" say INFO "Build sentinel created at $sentinel"
else
say WARNING "Could not create build sentinel at $sentinel (permissions/ownership); forcing with sudo..."
if command -v sudo >/dev/null 2>&1; then
if sudo mkdir -p "$(dirname "$sentinel")" \
&& sudo chown -R "$(id -u):$(id -g)" "$(dirname "$sentinel")" \
&& sudo touch "$sentinel"; then
say INFO "Build sentinel created at $sentinel (after fixing ownership)"
else
say ERROR "Failed to force build sentinel creation at $sentinel. Fix permissions and rerun setup."
exit 1
fi
else
say ERROR "Cannot force build sentinel creation (sudo unavailable). Fix permissions on $(dirname "$sentinel") and rerun setup."
exit 1
fi
fi
fi fi
local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore" local default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore"
@@ -1524,23 +1526,8 @@ fi
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots" default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
fi fi
if [ -n "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then # Persist rebuild source path for downstream build scripts
local storage_abs="$STORAGE_PATH" MODULES_REBUILD_SOURCE_PATH="$default_source_rel"
if [[ "$storage_abs" != /* ]]; then
storage_abs="$(pwd)/${storage_abs#./}"
fi
local candidate_path="$MODULES_REBUILD_SOURCE_PATH_VALUE"
if [[ "$candidate_path" != /* ]]; then
candidate_path="$(pwd)/${candidate_path#./}"
fi
if [[ "$candidate_path" == "$storage_abs"* ]]; then
say WARNING "MODULES_REBUILD_SOURCE_PATH is inside shared storage (${candidate_path}). Using local workspace ${default_source_rel} instead."
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
fi
fi
# Module staging will be handled directly in the rebuild section below
# Confirm write # Confirm write
@@ -1556,10 +1543,6 @@ fi
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; } [ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
fi fi
if [ -z "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
fi
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME} DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH} HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY} MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
@@ -1726,11 +1709,12 @@ BACKUP_HEALTHCHECK_GRACE_SECONDS=$BACKUP_HEALTHCHECK_GRACE_SECONDS
EOF EOF
echo echo
echo "# Modules" echo "# Modules"
for module_key in "${MODULE_KEYS[@]}"; do for module_key in "${MODULE_KEYS[@]}"; do
printf "%s=%s\n" "$module_key" "${!module_key:-0}" printf "%s=%s\n" "$module_key" "${!module_key:-0}"
done done
cat <<EOF cat <<EOF
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH
# Client data # Client data
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION} CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
@@ -1749,12 +1733,8 @@ MODULES_CPP_LIST=$MODULES_CPP_LIST
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
# Rebuild automation # Eluna
AUTO_REBUILD_ON_DEPLOY=$AUTO_REBUILD_ON_DEPLOY AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH_VALUE
# Eluna
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
@@ -1823,16 +1803,6 @@ EOF
printf ' 🚀 Quick deploy: ./deploy.sh\n' printf ' 🚀 Quick deploy: ./deploy.sh\n'
fi fi
if [ "${CLI_DEPLOY_AFTER:-0}" = "1" ]; then
local deploy_args=(bash "./deploy.sh" --yes)
if [ "$MODULE_PLAYERBOTS" != "1" ]; then
deploy_args+=(--profile standard)
fi
say INFO "Launching deploy after setup (--deploy-after enabled)"
if ! "${deploy_args[@]}"; then
say WARNING "Automatic deploy failed; please run ./deploy.sh manually."
fi
fi
} }
main "$@" main "$@"

117
update-latest.sh Executable file
View File

@@ -0,0 +1,117 @@
#!/bin/bash
#
# Safe wrapper to update to the latest commit on the current branch and run deploy.
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$ROOT_DIR"
# Source common library for standardized logging
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
exit 1
fi
FORCE_DIRTY=0
DEPLOY_ARGS=()
SKIP_BUILD=0
AUTO_DEPLOY=0
usage(){
cat <<'EOF'
Usage: ./update-latest.sh [--force] [--help] [deploy args...]
Updates the current git branch with a fast-forward pull, runs a fresh build,
and optionally runs ./deploy.sh with any additional arguments you provide
(e.g., --yes --no-watch).
Options:
--force Skip the dirty-tree check (not recommended; you may lose changes)
--skip-build Do not run ./build.sh after updating
--deploy Auto-run ./deploy.sh after build (non-interactive)
--help Show this help
Examples:
./update-latest.sh --yes --no-watch
./update-latest.sh --deploy --yes --no-watch
./update-latest.sh --force --skip-build
./update-latest.sh --force --deploy --remote --remote-host my.host --remote-user sam --yes
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--force) FORCE_DIRTY=1; shift;;
--skip-build) SKIP_BUILD=1; shift;;
--deploy) AUTO_DEPLOY=1; shift;;
--help|-h) usage; exit 0;;
*) DEPLOY_ARGS+=("$1"); shift;;
esac
done
command -v git >/dev/null 2>&1 || { err "git is required"; exit 1; }
if [ "$FORCE_DIRTY" -ne 1 ]; then
if [ -n "$(git status --porcelain)" ]; then
err "Working tree is dirty. Commit/stash or re-run with --force."
exit 1
fi
fi
current_branch="$(git rev-parse --abbrev-ref HEAD 2>/dev/null || true)"
if [ -z "$current_branch" ] || [ "$current_branch" = "HEAD" ]; then
err "Cannot update: detached HEAD or unknown branch."
exit 1
fi
if ! git ls-remote --exit-code --heads origin "$current_branch" >/dev/null 2>&1; then
err "Remote branch origin/$current_branch not found."
exit 1
fi
info "Fetching latest changes from origin/$current_branch"
git fetch --prune origin
info "Fast-forwarding to origin/$current_branch"
if ! git merge --ff-only "origin/$current_branch"; then
err "Fast-forward failed. Resolve manually or rebase, then rerun."
exit 1
fi
ok "Repository updated to $(git rev-parse --short HEAD)"
if [ "$SKIP_BUILD" -ne 1 ]; then
info "Running build.sh --yes"
if ! "$ROOT_DIR/build.sh" --yes; then
err "Build failed. Resolve issues and re-run."
exit 1
fi
ok "Build completed"
else
warn "Skipping build (--skip-build set)"
fi
# Offer to run deploy
if [ "$AUTO_DEPLOY" -eq 1 ]; then
info "Auto-deploy enabled; running deploy.sh ${DEPLOY_ARGS[*]:-(no extra args)}"
exec "$ROOT_DIR/deploy.sh" "${DEPLOY_ARGS[@]}"
fi
if [ -t 0 ]; then
read -r -p "Run deploy.sh now? [y/N]: " reply
reply="${reply:-n}"
case "$reply" in
[Yy]*)
info "Running deploy.sh ${DEPLOY_ARGS[*]:-(no extra args)}"
exec "$ROOT_DIR/deploy.sh" "${DEPLOY_ARGS[@]}"
;;
*)
ok "Update (and build) complete. Run ./deploy.sh ${DEPLOY_ARGS[*]} when ready."
exit 0
;;
esac
else
warn "Non-interactive mode and --deploy not set; skipping deploy."
ok "Update (and build) complete. Run ./deploy.sh ${DEPLOY_ARGS[*]} when ready."
fi