mirror of
https://github.com/uprightbass360/AzerothCore-RealmMaster.git
synced 2026-01-13 09:07:20 +00:00
Compare commits
10 Commits
b62e33bb03
...
feat/refac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b11e23546 | ||
|
|
4596320856 | ||
|
|
d11b9f4089 | ||
|
|
82a5104e87 | ||
|
|
251b5d8f9f | ||
|
|
5620fbae91 | ||
|
|
319da1a553 | ||
|
|
681da2767b | ||
|
|
d38c7557e0 | ||
|
|
df7689f26a |
@@ -21,6 +21,15 @@ COMPOSE_PROJECT_NAME=azerothcore-stack
|
|||||||
# =====================
|
# =====================
|
||||||
STORAGE_PATH=./storage
|
STORAGE_PATH=./storage
|
||||||
STORAGE_PATH_LOCAL=./local-storage
|
STORAGE_PATH_LOCAL=./local-storage
|
||||||
|
STORAGE_CONFIG_PATH=${STORAGE_PATH}/config
|
||||||
|
STORAGE_LOGS_PATH=${STORAGE_PATH}/logs
|
||||||
|
STORAGE_MODULES_PATH=${STORAGE_PATH}/modules
|
||||||
|
STORAGE_LUA_SCRIPTS_PATH=${STORAGE_PATH}/lua_scripts
|
||||||
|
STORAGE_MODULES_META_PATH=${STORAGE_MODULES_PATH}/.modules-meta
|
||||||
|
STORAGE_MODULE_SQL_PATH=${STORAGE_PATH}/module-sql-updates
|
||||||
|
STORAGE_INSTALL_MARKERS_PATH=${STORAGE_PATH}/install-markers
|
||||||
|
STORAGE_CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
|
||||||
|
STORAGE_LOCAL_SOURCE_PATH=${STORAGE_PATH_LOCAL}/source
|
||||||
BACKUP_PATH=${STORAGE_PATH}/backups
|
BACKUP_PATH=${STORAGE_PATH}/backups
|
||||||
HOST_ZONEINFO_PATH=/usr/share/zoneinfo
|
HOST_ZONEINFO_PATH=/usr/share/zoneinfo
|
||||||
TZ=UTC
|
TZ=UTC
|
||||||
@@ -65,12 +74,12 @@ DB_GUARD_VERIFY_INTERVAL_SECONDS=86400
|
|||||||
# =====================
|
# =====================
|
||||||
# Module SQL staging
|
# Module SQL staging
|
||||||
# =====================
|
# =====================
|
||||||
STAGE_PATH_MODULE_SQL=${STORAGE_PATH_LOCAL}/module-sql-updates
|
STAGE_PATH_MODULE_SQL=${STORAGE_MODULE_SQL_PATH}
|
||||||
|
|
||||||
# =====================
|
# =====================
|
||||||
# SQL Source Overlay
|
# SQL Source Overlay
|
||||||
# =====================
|
# =====================
|
||||||
AC_SQL_SOURCE_PATH=${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql
|
AC_SQL_SOURCE_PATH=${STORAGE_LOCAL_SOURCE_PATH}/azerothcore-playerbots/data/sql
|
||||||
|
|
||||||
# =====================
|
# =====================
|
||||||
# Images
|
# Images
|
||||||
@@ -141,7 +150,7 @@ MYSQL_INNODB_LOG_FILE_SIZE=64M
|
|||||||
MYSQL_INNODB_REDO_LOG_CAPACITY=512M
|
MYSQL_INNODB_REDO_LOG_CAPACITY=512M
|
||||||
MYSQL_RUNTIME_TMPFS_SIZE=8G
|
MYSQL_RUNTIME_TMPFS_SIZE=8G
|
||||||
MYSQL_DISABLE_BINLOG=1
|
MYSQL_DISABLE_BINLOG=1
|
||||||
MYSQL_CONFIG_DIR=${STORAGE_PATH}/config/mysql/conf.d
|
MYSQL_CONFIG_DIR=${STORAGE_CONFIG_PATH}/mysql/conf.d
|
||||||
DB_WAIT_RETRIES=60
|
DB_WAIT_RETRIES=60
|
||||||
DB_WAIT_SLEEP=10
|
DB_WAIT_SLEEP=10
|
||||||
|
|
||||||
@@ -218,7 +227,6 @@ CLIENT_DATA_VERSION=
|
|||||||
# Available: none, blizzlike, fast-leveling, hardcore-pvp, casual-pve
|
# Available: none, blizzlike, fast-leveling, hardcore-pvp, casual-pve
|
||||||
SERVER_CONFIG_PRESET=none
|
SERVER_CONFIG_PRESET=none
|
||||||
CLIENT_DATA_CACHE_PATH=${STORAGE_PATH_LOCAL}/client-data-cache
|
CLIENT_DATA_CACHE_PATH=${STORAGE_PATH_LOCAL}/client-data-cache
|
||||||
CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
|
|
||||||
|
|
||||||
# =====================
|
# =====================
|
||||||
# Module toggles (0/1)
|
# Module toggles (0/1)
|
||||||
|
|||||||
372
CLEANUP_TODO.md
Normal file
372
CLEANUP_TODO.md
Normal file
@@ -0,0 +1,372 @@
|
|||||||
|
# AzerothCore RealmMaster - Cleanup TODO
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
This document outlines systematic cleanup opportunities using the proven methodology from our successful consolidation. Each phase must be validated and tested incrementally without breaking existing functionality.
|
||||||
|
|
||||||
|
## Methodology
|
||||||
|
1. **Analyze** - Map dependencies and usage patterns
|
||||||
|
2. **Consolidate** - Create shared libraries/templates
|
||||||
|
3. **Replace** - Update scripts to use centralized versions
|
||||||
|
4. **Test** - Validate each change incrementally
|
||||||
|
5. **Document** - Track changes and dependencies
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 1: Complete Script Function Consolidation
|
||||||
|
**Priority: HIGH** | **Risk: LOW** | **Impact: HIGH**
|
||||||
|
|
||||||
|
### Status
|
||||||
|
✅ **Completed**: Master scripts (deploy.sh, build.sh, cleanup.sh) + 4 critical scripts
|
||||||
|
🔄 **Remaining**: 10+ scripts with duplicate logging functions
|
||||||
|
|
||||||
|
### Remaining Scripts to Consolidate
|
||||||
|
```bash
|
||||||
|
# Root level scripts
|
||||||
|
./changelog.sh # Has: info(), warn(), err()
|
||||||
|
./update-latest.sh # Has: info(), ok(), warn(), err()
|
||||||
|
|
||||||
|
# Backup system scripts
|
||||||
|
./scripts/bash/backup-export.sh # Has: info(), ok(), warn(), err()
|
||||||
|
./scripts/bash/backup-import.sh # Has: info(), ok(), warn(), err()
|
||||||
|
|
||||||
|
# Database scripts
|
||||||
|
./scripts/bash/db-guard.sh # Has: info(), warn(), err()
|
||||||
|
./scripts/bash/db-health-check.sh # Has: info(), ok(), warn(), err()
|
||||||
|
|
||||||
|
# Module & verification scripts
|
||||||
|
./scripts/bash/verify-sql-updates.sh # Has: info(), warn(), err()
|
||||||
|
./scripts/bash/manage-modules.sh # Has: info(), ok(), warn(), err()
|
||||||
|
./scripts/bash/repair-storage-permissions.sh # Has: info(), warn(), err()
|
||||||
|
./scripts/bash/test-phase1-integration.sh # Has: info(), ok(), warn(), err()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation Plan
|
||||||
|
**Step 1.1**: Consolidate Root Level Scripts (changelog.sh, update-latest.sh)
|
||||||
|
- Add lib/common.sh sourcing with error handling
|
||||||
|
- Remove duplicate function definitions
|
||||||
|
- Test functionality with `--help` flags
|
||||||
|
|
||||||
|
**Step 1.2**: Consolidate Backup System Scripts
|
||||||
|
- Update backup-export.sh and backup-import.sh
|
||||||
|
- Ensure backup operations still work correctly
|
||||||
|
- Test with dry-run flags where available
|
||||||
|
|
||||||
|
**Step 1.3**: Consolidate Database Scripts
|
||||||
|
- Update db-guard.sh and db-health-check.sh
|
||||||
|
- **CRITICAL**: These run in containers - verify mount paths work
|
||||||
|
- Test with existing database connections
|
||||||
|
|
||||||
|
**Step 1.4**: Consolidate Module & Verification Scripts
|
||||||
|
- Update manage-modules.sh, verify-sql-updates.sh, repair-storage-permissions.sh
|
||||||
|
- Test module staging and SQL verification workflows
|
||||||
|
- Verify test-phase1-integration.sh still functions
|
||||||
|
|
||||||
|
### Validation Tests
|
||||||
|
```bash
|
||||||
|
# Test each script category after consolidation
|
||||||
|
./changelog.sh --help
|
||||||
|
./update-latest.sh --help
|
||||||
|
./scripts/bash/backup-export.sh --dry-run
|
||||||
|
./scripts/bash/manage-modules.sh --list
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Docker Compose YAML Anchor Completion
|
||||||
|
**Priority: HIGH** | **Risk: MEDIUM** | **Impact: HIGH**
|
||||||
|
|
||||||
|
### Status
|
||||||
|
✅ **Completed**: Basic YAML anchors, 2 authserver services consolidated
|
||||||
|
🔄 **Remaining**: 4 worldserver services, database services, volume patterns
|
||||||
|
|
||||||
|
### Current Docker Compose Analysis
|
||||||
|
```yaml
|
||||||
|
# Services needing consolidation:
|
||||||
|
- ac-worldserver-standard # ~45 lines → can reduce to ~10
|
||||||
|
- ac-worldserver-playerbots # ~45 lines → can reduce to ~10
|
||||||
|
- ac-worldserver-modules # ~45 lines → can reduce to ~10
|
||||||
|
- ac-authserver-modules # ~30 lines → can reduce to ~8
|
||||||
|
|
||||||
|
# Database services with repeated patterns:
|
||||||
|
- ac-db-import # Repeated volume mounts
|
||||||
|
- ac-db-guard # Similar environment variables
|
||||||
|
- ac-db-init # Similar MySQL connection patterns
|
||||||
|
|
||||||
|
# Volume mount patterns repeated 15+ times:
|
||||||
|
- ${STORAGE_CONFIG_PATH}:/azerothcore/env/dist/etc
|
||||||
|
- ${STORAGE_LOGS_PATH}:/azerothcore/logs
|
||||||
|
- ${BACKUP_PATH}:/backups
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation Plan
|
||||||
|
**Step 2.1**: Complete Worldserver Service Consolidation
|
||||||
|
- Extend x-worldserver-common anchor to cover all variants
|
||||||
|
- Consolidate ac-worldserver-standard, ac-worldserver-playerbots, ac-worldserver-modules
|
||||||
|
- Test each Docker profile: `docker compose --profile services-standard config`
|
||||||
|
|
||||||
|
**Step 2.2**: Database Services Consolidation
|
||||||
|
- Create x-database-common anchor for shared database configurations
|
||||||
|
- Create x-database-volumes anchor for repeated volume patterns
|
||||||
|
- Update ac-db-import, ac-db-guard, ac-db-init services
|
||||||
|
|
||||||
|
**Step 2.3**: Complete Authserver Consolidation
|
||||||
|
- Consolidate remaining ac-authserver-modules service
|
||||||
|
- Verify all three profiles work: standard, playerbots, modules
|
||||||
|
|
||||||
|
### Validation Tests
|
||||||
|
```bash
|
||||||
|
# Test all profiles generate valid configurations
|
||||||
|
docker compose --profile services-standard config --quiet
|
||||||
|
docker compose --profile services-playerbots config --quiet
|
||||||
|
docker compose --profile services-modules config --quiet
|
||||||
|
|
||||||
|
# Test actual deployment (non-destructive)
|
||||||
|
docker compose --profile services-standard up --dry-run
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: Utility Function Libraries
|
||||||
|
**Priority: MEDIUM** | **Risk: MEDIUM** | **Impact: MEDIUM**
|
||||||
|
|
||||||
|
### Status
|
||||||
|
✅ **Completed**: All three utility libraries created and tested
|
||||||
|
✅ **Completed**: Integration with backup-import.sh as proof of concept
|
||||||
|
🔄 **Remaining**: Update remaining 14+ scripts to use new libraries
|
||||||
|
|
||||||
|
### Created Libraries
|
||||||
|
|
||||||
|
**✅ scripts/bash/lib/mysql-utils.sh** - COMPLETED
|
||||||
|
- MySQL connection management: `mysql_test_connection()`, `mysql_wait_for_connection()`
|
||||||
|
- Query execution: `mysql_exec_with_retry()`, `mysql_query()`, `docker_mysql_query()`
|
||||||
|
- Database utilities: `mysql_database_exists()`, `mysql_get_table_count()`
|
||||||
|
- Backup/restore: `mysql_backup_database()`, `mysql_restore_database()`
|
||||||
|
- Configuration: `mysql_validate_configuration()`, `mysql_print_configuration()`
|
||||||
|
|
||||||
|
**✅ scripts/bash/lib/docker-utils.sh** - COMPLETED
|
||||||
|
- Container management: `docker_get_container_status()`, `docker_wait_for_container_state()`
|
||||||
|
- Execution: `docker_exec_with_retry()`, `docker_is_container_running()`
|
||||||
|
- Project management: `docker_get_project_name()`, `docker_list_project_containers()`
|
||||||
|
- Image operations: `docker_get_container_image()`, `docker_pull_image_with_retry()`
|
||||||
|
- Compose integration: `docker_compose_validate()`, `docker_compose_deploy()`
|
||||||
|
- System utilities: `docker_check_daemon()`, `docker_cleanup_system()`
|
||||||
|
|
||||||
|
**✅ scripts/bash/lib/env-utils.sh** - COMPLETED
|
||||||
|
- Environment management: `env_read_with_fallback()`, `env_read_typed()`, `env_update_value()`
|
||||||
|
- Path utilities: `path_resolve_absolute()`, `file_ensure_writable_dir()`
|
||||||
|
- File operations: `file_create_backup()`, `file_set_permissions()`
|
||||||
|
- Configuration: `config_read_template_value()`, `config_validate_env()`
|
||||||
|
- System detection: `system_detect_os()`, `system_check_requirements()`
|
||||||
|
|
||||||
|
### Integration Status
|
||||||
|
|
||||||
|
**✅ Proof of Concept**: backup-import.sh updated with fallback compatibility
|
||||||
|
- Uses new utility functions when available
|
||||||
|
- Maintains backward compatibility with graceful fallbacks
|
||||||
|
- Tested and functional
|
||||||
|
|
||||||
|
### Remaining Implementation
|
||||||
|
**Step 3.4**: Update High-Priority Scripts
|
||||||
|
- backup-export.sh: Use mysql-utils and env-utils functions
|
||||||
|
- db-guard.sh: Use mysql-utils for database operations
|
||||||
|
- deploy-tools.sh: Use docker-utils for container management
|
||||||
|
- verify-deployment.sh: Use docker-utils for status checking
|
||||||
|
|
||||||
|
**Step 3.5**: Update Database Scripts
|
||||||
|
- db-health-check.sh: Use mysql-utils for health validation
|
||||||
|
- db-import-conditional.sh: Use mysql-utils and env-utils
|
||||||
|
- manual-backup.sh: Use mysql-utils backup functions
|
||||||
|
|
||||||
|
**Step 3.6**: Update Deployment Scripts
|
||||||
|
- migrate-stack.sh: Use docker-utils for remote operations
|
||||||
|
- stage-modules.sh: Use env-utils for path management
|
||||||
|
- rebuild-with-modules.sh: Use docker-utils for build operations
|
||||||
|
|
||||||
|
### Validation Tests - COMPLETED ✅
|
||||||
|
```bash
|
||||||
|
# Test MySQL utilities
|
||||||
|
source scripts/bash/lib/mysql-utils.sh
|
||||||
|
mysql_print_configuration # ✅ PASSED
|
||||||
|
|
||||||
|
# Test Docker utilities
|
||||||
|
source scripts/bash/lib/docker-utils.sh
|
||||||
|
docker_print_system_info # ✅ PASSED
|
||||||
|
|
||||||
|
# Test Environment utilities
|
||||||
|
source scripts/bash/lib/env-utils.sh
|
||||||
|
env_utils_validate # ✅ PASSED
|
||||||
|
|
||||||
|
# Test integrated script
|
||||||
|
./backup-import.sh --help # ✅ PASSED with new libraries
|
||||||
|
```
|
||||||
|
|
||||||
|
### Next Steps
|
||||||
|
- Continue with Step 3.4: Update backup-export.sh, db-guard.sh, deploy-tools.sh
|
||||||
|
- Implement progressive rollout with testing after each script update
|
||||||
|
- Complete remaining 11 scripts in dependency order
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: Error Handling Standardization
|
||||||
|
**Priority: MEDIUM** | **Risk: LOW** | **Impact: MEDIUM**
|
||||||
|
|
||||||
|
### Analysis
|
||||||
|
**Current State**: Mixed error handling patterns across scripts
|
||||||
|
```bash
|
||||||
|
# Found patterns:
|
||||||
|
set -e # 45 scripts
|
||||||
|
set -euo pipefail # 23 scripts
|
||||||
|
set -eu # 8 scripts
|
||||||
|
(no error handling) # 12 scripts
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementation Plan
|
||||||
|
**Step 4.1**: Standardize Error Handling
|
||||||
|
- Add `set -euo pipefail` to all scripts where safe
|
||||||
|
- Add error traps for cleanup in critical scripts
|
||||||
|
- Implement consistent exit codes
|
||||||
|
|
||||||
|
**Step 4.2**: Add Script Validation Framework
|
||||||
|
- Create validation helper functions
|
||||||
|
- Add dependency checking to critical scripts
|
||||||
|
- Implement graceful degradation where possible
|
||||||
|
|
||||||
|
### Target Pattern
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Error handling setup
|
||||||
|
trap 'echo "❌ Error on line $LINENO" >&2' ERR
|
||||||
|
trap 'cleanup_on_exit' EXIT
|
||||||
|
|
||||||
|
# Source libraries with validation
|
||||||
|
source_lib_or_exit() {
|
||||||
|
local lib_path="$1"
|
||||||
|
if ! source "$lib_path" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $lib_path" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: Configuration Template Consolidation
|
||||||
|
**Priority: LOW** | **Risk: LOW** | **Impact: LOW**
|
||||||
|
|
||||||
|
### Analysis
|
||||||
|
**Found**: 71 instances of duplicate color definitions across scripts
|
||||||
|
**Found**: Multiple .env template patterns that could be standardized
|
||||||
|
|
||||||
|
### Implementation Plan
|
||||||
|
**Step 5.1**: Color Definition Consolidation
|
||||||
|
- Ensure all scripts use lib/common.sh colors exclusively
|
||||||
|
- Remove remaining duplicate color definitions
|
||||||
|
- Add color theme support (optional)
|
||||||
|
|
||||||
|
**Step 5.2**: Configuration Template Cleanup
|
||||||
|
- Consolidate environment variable patterns
|
||||||
|
- Create shared configuration validation
|
||||||
|
- Standardize default value patterns
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Priority Order
|
||||||
|
|
||||||
|
### **Week 1: High Impact, Low Risk**
|
||||||
|
- [ ] Phase 1.1-1.2: Consolidate remaining root and backup scripts
|
||||||
|
- [ ] Phase 2.1: Complete worldserver YAML anchor consolidation
|
||||||
|
- [ ] Validate: All major scripts and Docker profiles work
|
||||||
|
|
||||||
|
### **Week 2: Complete Core Consolidation**
|
||||||
|
- [ ] Phase 1.3-1.4: Consolidate database and module scripts
|
||||||
|
- [ ] Phase 2.2-2.3: Complete database service and authserver consolidation
|
||||||
|
- [ ] Validate: Full deployment pipeline works end-to-end
|
||||||
|
|
||||||
|
### **Week 3: Utility Libraries**
|
||||||
|
- [ ] Phase 3.1: Create and implement MySQL utility library
|
||||||
|
- [ ] Phase 3.2: Create and implement Docker utility library
|
||||||
|
- [ ] Validate: Scripts using new libraries function correctly
|
||||||
|
|
||||||
|
### **Week 4: Polish and Standardization**
|
||||||
|
- [ ] Phase 3.3: Complete environment utility library
|
||||||
|
- [ ] Phase 4.1-4.2: Standardize error handling
|
||||||
|
- [ ] Phase 5.1-5.2: Final cleanup of colors and configs
|
||||||
|
- [ ] Validate: Complete system testing
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Validation Framework
|
||||||
|
|
||||||
|
### **Incremental Testing**
|
||||||
|
Each phase must pass these tests before proceeding:
|
||||||
|
|
||||||
|
**Script Functionality Tests:**
|
||||||
|
```bash
|
||||||
|
# Master scripts
|
||||||
|
./deploy.sh --help && ./build.sh --help && ./cleanup.sh --help
|
||||||
|
|
||||||
|
# Docker compose validation
|
||||||
|
docker compose config --quiet
|
||||||
|
|
||||||
|
# Profile validation
|
||||||
|
for profile in services-standard services-playerbots services-modules; do
|
||||||
|
docker compose --profile $profile config --quiet
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
**Integration Tests:**
|
||||||
|
```bash
|
||||||
|
# End-to-end validation (non-destructive)
|
||||||
|
./deploy.sh --profile services-standard --dry-run --no-watch
|
||||||
|
./scripts/bash/verify-deployment.sh --profile services-standard
|
||||||
|
```
|
||||||
|
|
||||||
|
**Regression Prevention:**
|
||||||
|
- Git commit after each completed phase
|
||||||
|
- Tag successful consolidations
|
||||||
|
- Maintain rollback procedures
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Risk Mitigation
|
||||||
|
|
||||||
|
### **Container Script Dependencies**
|
||||||
|
- **High Risk**: Scripts mounted into containers (db-guard.sh, backup-scheduler.sh)
|
||||||
|
- **Mitigation**: Test container mounting before consolidating
|
||||||
|
- **Validation**: Verify scripts work inside container environment
|
||||||
|
|
||||||
|
### **Remote Deployment Impact**
|
||||||
|
- **Medium Risk**: SSH deployment scripts (migrate-stack.sh)
|
||||||
|
- **Mitigation**: Test remote deployment on non-production host
|
||||||
|
- **Validation**: Verify remote script sourcing works correctly
|
||||||
|
|
||||||
|
### **Docker Compose Version Compatibility**
|
||||||
|
- **Medium Risk**: Advanced YAML anchors may not work on older versions
|
||||||
|
- **Mitigation**: Add version detection and warnings
|
||||||
|
- **Validation**: Test on minimum supported Docker Compose version
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
### **Quantitative Goals**
|
||||||
|
- Reduce duplicate logging functions from 14 → 0 scripts
|
||||||
|
- Reduce Docker compose file from ~1000 → ~600 lines
|
||||||
|
- Reduce color definitions from 71 → 1 centralized location
|
||||||
|
- Consolidate MySQL connection patterns from 22 → 1 library
|
||||||
|
|
||||||
|
### **Qualitative Goals**
|
||||||
|
- Single source of truth for common functionality
|
||||||
|
- Consistent user experience across all scripts
|
||||||
|
- Maintainable and extensible architecture
|
||||||
|
- Clear dependency relationships
|
||||||
|
- Robust error handling and validation
|
||||||
|
|
||||||
|
### **Completion Criteria**
|
||||||
|
- [ ] All scripts source centralized libraries exclusively
|
||||||
|
- [ ] No duplicate function definitions remain
|
||||||
|
- [ ] Docker compose uses YAML anchors for all repeated patterns
|
||||||
|
- [ ] Comprehensive test suite validates all functionality
|
||||||
|
- [ ] Documentation updated to reflect new architecture
|
||||||
@@ -79,7 +79,7 @@ For complete local and remote deployment guides, see **[docs/GETTING_STARTED.md]
|
|||||||
|
|
||||||
Choose from **hundreds of enhanced modules** spanning automation, quality-of-life improvements, gameplay enhancements, PvP features, and more. The manifest contains 348 modules (221 marked supported/active); the default RealmMaster preset enables 33 that are exercised in testing. All modules are automatically downloaded, configured, and integrated during deployment when selected.
|
Choose from **hundreds of enhanced modules** spanning automation, quality-of-life improvements, gameplay enhancements, PvP features, and more. The manifest contains 348 modules (221 marked supported/active); the default RealmMaster preset enables 33 that are exercised in testing. All modules are automatically downloaded, configured, and integrated during deployment when selected.
|
||||||
|
|
||||||
Want a shortcut? Use a preset (RealmMaster, suggested QoL, playerbot variants, all-modules) from `config/module-profiles/`—see [docs/GETTING_STARTED.md#module-presets](docs/GETTING_STARTED.md#module-presets).
|
Want a shortcut? Use a preset (`RealmMaster`, `suggested-modules`, `playerbots-suggested-modules`, `azerothcore-vanilla`, `playerbots-only`, `all-modules`) from `config/module-profiles/`—see [docs/GETTING_STARTED.md#module-presets](docs/GETTING_STARTED.md#module-presets).
|
||||||
|
|
||||||
**Popular Categories:**
|
**Popular Categories:**
|
||||||
- **Automation** - Playerbots, AI chat, level management
|
- **Automation** - Playerbots, AI chat, level management
|
||||||
@@ -143,4 +143,4 @@ This project builds upon:
|
|||||||
|
|
||||||
- **Create admin account** - Attach to worldserver and create a GM user (commands in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**).
|
- **Create admin account** - Attach to worldserver and create a GM user (commands in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**).
|
||||||
- **Point your client** - Update `realmlist.wtf` to your host/ports (defaults in the same section above).
|
- **Point your client** - Update `realmlist.wtf` to your host/ports (defaults in the same section above).
|
||||||
- **Open services** - phpMyAdmin and Keira3 URLs/ports are listed in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**.
|
- **Open services** - phpMyAdmin and Keira3 URLs/ports are listed in **[docs/GETTING_STARTED.md#post-installation-steps](docs/GETTING_STARTED.md#post-installation-steps)**.
|
||||||
|
|||||||
13
build.sh
13
build.sh
@@ -9,6 +9,13 @@ set -euo pipefail
|
|||||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
ENV_PATH="$ROOT_DIR/.env"
|
ENV_PATH="$ROOT_DIR/.env"
|
||||||
TEMPLATE_PATH="$ROOT_DIR/.env.template"
|
TEMPLATE_PATH="$ROOT_DIR/.env.template"
|
||||||
|
# Source common library with proper error handling
|
||||||
|
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
|
||||||
|
echo "This library is required for build.sh to function." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
source "$ROOT_DIR/scripts/bash/project_name.sh"
|
source "$ROOT_DIR/scripts/bash/project_name.sh"
|
||||||
|
|
||||||
# Default project name (read from .env or template)
|
# Default project name (read from .env or template)
|
||||||
@@ -17,11 +24,7 @@ ASSUME_YES=0
|
|||||||
FORCE_REBUILD=0
|
FORCE_REBUILD=0
|
||||||
SKIP_SOURCE_SETUP=0
|
SKIP_SOURCE_SETUP=0
|
||||||
CUSTOM_SOURCE_PATH=""
|
CUSTOM_SOURCE_PATH=""
|
||||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
# Color definitions and logging functions now provided by lib/common.sh
|
||||||
info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; }
|
|
||||||
ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; }
|
|
||||||
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
|
|
||||||
err(){ printf '%b\n' "${RED}❌ $*${NC}"; }
|
|
||||||
|
|
||||||
show_build_header(){
|
show_build_header(){
|
||||||
printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}"
|
printf '\n%b\n' "${BLUE}🔨 AZEROTHCORE BUILD SYSTEM 🔨${NC}"
|
||||||
|
|||||||
15
changelog.sh
15
changelog.sh
@@ -7,6 +7,12 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
PROJECT_ROOT="$SCRIPT_DIR"
|
PROJECT_ROOT="$SCRIPT_DIR"
|
||||||
cd "$PROJECT_ROOT"
|
cd "$PROJECT_ROOT"
|
||||||
|
|
||||||
|
# Source common library for standardized logging
|
||||||
|
if ! source "$SCRIPT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/scripts/bash/lib/common.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Load environment configuration (available on deployed servers)
|
# Load environment configuration (available on deployed servers)
|
||||||
if [ -f ".env" ]; then
|
if [ -f ".env" ]; then
|
||||||
set -a
|
set -a
|
||||||
@@ -20,11 +26,10 @@ OUTPUT_DIR="${CHANGELOG_OUTPUT_DIR:-./changelogs}"
|
|||||||
DAYS_BACK="${CHANGELOG_DAYS_BACK:-7}"
|
DAYS_BACK="${CHANGELOG_DAYS_BACK:-7}"
|
||||||
FORMAT="${CHANGELOG_FORMAT:-markdown}"
|
FORMAT="${CHANGELOG_FORMAT:-markdown}"
|
||||||
|
|
||||||
# Colors for output
|
# Specialized logging with timestamp for changelog context
|
||||||
GREEN='\033[0;32m'; BLUE='\033[0;34m'; YELLOW='\033[1;33m'; NC='\033[0m'
|
log() { info "[$(date '+%H:%M:%S')] $*"; }
|
||||||
log() { echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $*" >&2; }
|
success() { ok "$*"; }
|
||||||
success() { echo -e "${GREEN}✅${NC} $*" >&2; }
|
# warn() function already provided by lib/common.sh
|
||||||
warn() { echo -e "${YELLOW}⚠️${NC} $*" >&2; }
|
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|||||||
24
cleanup.sh
24
cleanup.sh
@@ -14,6 +14,13 @@ PROJECT_DIR="${SCRIPT_DIR}"
|
|||||||
DEFAULT_COMPOSE_FILE="${PROJECT_DIR}/docker-compose.yml"
|
DEFAULT_COMPOSE_FILE="${PROJECT_DIR}/docker-compose.yml"
|
||||||
ENV_FILE="${PROJECT_DIR}/.env"
|
ENV_FILE="${PROJECT_DIR}/.env"
|
||||||
TEMPLATE_FILE="${PROJECT_DIR}/.env.template"
|
TEMPLATE_FILE="${PROJECT_DIR}/.env.template"
|
||||||
|
# Source common library with proper error handling
|
||||||
|
if ! source "${PROJECT_DIR}/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load ${PROJECT_DIR}/scripts/bash/lib/common.sh" >&2
|
||||||
|
echo "This library is required for cleanup.sh to function." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
source "${PROJECT_DIR}/scripts/bash/project_name.sh"
|
source "${PROJECT_DIR}/scripts/bash/project_name.sh"
|
||||||
|
|
||||||
# Default project name (read from .env or template)
|
# Default project name (read from .env or template)
|
||||||
@@ -21,17 +28,16 @@ DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
|||||||
source "${PROJECT_DIR}/scripts/bash/compose_overrides.sh"
|
source "${PROJECT_DIR}/scripts/bash/compose_overrides.sh"
|
||||||
declare -a COMPOSE_FILE_ARGS=()
|
declare -a COMPOSE_FILE_ARGS=()
|
||||||
|
|
||||||
# Colors
|
# Color definitions now provided by lib/common.sh
|
||||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; BLUE='\033[0;34m'; MAGENTA='\033[0;35m'; NC='\033[0m'
|
# Legacy print_status function for cleanup.sh compatibility
|
||||||
|
|
||||||
print_status() {
|
print_status() {
|
||||||
case "$1" in
|
case "$1" in
|
||||||
INFO) echo -e "${BLUE}ℹ️ ${2}${NC}";;
|
INFO) info "${2}";;
|
||||||
SUCCESS) echo -e "${GREEN}✅ ${2}${NC}";;
|
SUCCESS) ok "${2}";;
|
||||||
WARNING) echo -e "${YELLOW}⚠️ ${2}${NC}";;
|
WARNING) warn "${2}";;
|
||||||
ERROR) echo -e "${RED}❌ ${2}${NC}";;
|
ERROR) err "${2}";;
|
||||||
DANGER) echo -e "${RED}💀 ${2}${NC}";;
|
DANGER) printf '%b\n' "${RED}💀 ${2}${NC}";;
|
||||||
HEADER) echo -e "\n${MAGENTA}=== ${2} ===${NC}";;
|
HEADER) printf '\n%b\n' "${CYAN}=== ${2} ===${NC}";;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
"MODULE_ACCOUNT_ACHIEVEMENTS",
|
"MODULE_ACCOUNT_ACHIEVEMENTS",
|
||||||
"MODULE_AUTO_REVIVE",
|
"MODULE_AUTO_REVIVE",
|
||||||
"MODULE_GAIN_HONOR_GUARD",
|
"MODULE_GAIN_HONOR_GUARD",
|
||||||
"MODULE_ELUNA",
|
|
||||||
"MODULE_TIME_IS_TIME",
|
"MODULE_TIME_IS_TIME",
|
||||||
"MODULE_RANDOM_ENCHANTS",
|
"MODULE_RANDOM_ENCHANTS",
|
||||||
"MODULE_SOLOCRAFT",
|
"MODULE_SOLOCRAFT",
|
||||||
@@ -24,6 +23,7 @@
|
|||||||
"MODULE_REAGENT_BANK",
|
"MODULE_REAGENT_BANK",
|
||||||
"MODULE_BLACK_MARKET_AUCTION_HOUSE",
|
"MODULE_BLACK_MARKET_AUCTION_HOUSE",
|
||||||
"MODULE_ELUNA_TS",
|
"MODULE_ELUNA_TS",
|
||||||
|
"MODULE_ELUNA",
|
||||||
"MODULE_AIO",
|
"MODULE_AIO",
|
||||||
"MODULE_ELUNA_SCRIPTS",
|
"MODULE_ELUNA_SCRIPTS",
|
||||||
"MODULE_EVENT_SCRIPTS",
|
"MODULE_EVENT_SCRIPTS",
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
"MODULE_ITEM_LEVEL_UP",
|
"MODULE_ITEM_LEVEL_UP",
|
||||||
"MODULE_GLOBAL_CHAT"
|
"MODULE_GLOBAL_CHAT"
|
||||||
],
|
],
|
||||||
"label": "\ud83e\udde9 Sam",
|
"label": "\ud83e\udde9 RealmMaster",
|
||||||
"description": "Sam's playerbot-centric preset (use high bot counts)",
|
"description": "RealmMaster suggested build (33 enabled modules)",
|
||||||
"order": 7
|
"order": 0
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -342,6 +342,6 @@
|
|||||||
"MODULE_CLASSIC_MODE"
|
"MODULE_CLASSIC_MODE"
|
||||||
],
|
],
|
||||||
"label": "\ud83e\udde9 All Modules",
|
"label": "\ud83e\udde9 All Modules",
|
||||||
"description": "Enable every optional module in the repository",
|
"description": "Enable every optional module in the repository - NOT RECOMMENDED",
|
||||||
"order": 5
|
"order": 7
|
||||||
}
|
}
|
||||||
|
|||||||
8
config/module-profiles/azerothcore-vanilla.json
Normal file
8
config/module-profiles/azerothcore-vanilla.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"modules": [
|
||||||
|
|
||||||
|
],
|
||||||
|
"label": "\u2b50 AzerothCore Main - Mod Free",
|
||||||
|
"description": "Pure AzerothCore with no optional modules enabled",
|
||||||
|
"order": 3
|
||||||
|
}
|
||||||
@@ -6,5 +6,5 @@
|
|||||||
],
|
],
|
||||||
"label": "\ud83e\udde9 Playerbots Only",
|
"label": "\ud83e\udde9 Playerbots Only",
|
||||||
"description": "Minimal preset that only enables playerbot prerequisites",
|
"description": "Minimal preset that only enables playerbot prerequisites",
|
||||||
"order": 6
|
"order": 4
|
||||||
}
|
}
|
||||||
@@ -7,9 +7,12 @@
|
|||||||
"MODULE_TRANSMOG",
|
"MODULE_TRANSMOG",
|
||||||
"MODULE_NPC_BUFFER",
|
"MODULE_NPC_BUFFER",
|
||||||
"MODULE_LEARN_SPELLS",
|
"MODULE_LEARN_SPELLS",
|
||||||
"MODULE_FIREWORKS"
|
"MODULE_FIREWORKS",
|
||||||
|
"MODULE_ELUNA_TS",
|
||||||
|
"MODULE_ELUNA",
|
||||||
|
"MODULE_AIO"
|
||||||
],
|
],
|
||||||
"label": "\ud83e\udd16 Playerbots + Suggested modules",
|
"label": "\ud83e\udd16 Playerbots + Suggested modules",
|
||||||
"description": "Suggested stack plus playerbots enabled",
|
"description": "Suggested stack plus playerbots enabled",
|
||||||
"order": 2
|
"order": 1
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
{
|
{
|
||||||
"modules": [
|
"modules": [
|
||||||
|
"MODULE_ELUNA_TS",
|
||||||
"MODULE_ELUNA",
|
"MODULE_ELUNA",
|
||||||
|
"MODULE_AIO",
|
||||||
"MODULE_SOLO_LFG",
|
"MODULE_SOLO_LFG",
|
||||||
"MODULE_SOLOCRAFT",
|
"MODULE_SOLOCRAFT",
|
||||||
"MODULE_AUTOBALANCE",
|
"MODULE_AUTOBALANCE",
|
||||||
@@ -10,6 +12,6 @@
|
|||||||
"MODULE_FIREWORKS"
|
"MODULE_FIREWORKS"
|
||||||
],
|
],
|
||||||
"label": "\u2b50 Suggested Modules",
|
"label": "\u2b50 Suggested Modules",
|
||||||
"description": "Baseline solo-friendly quality of life mix",
|
"description": "Baseline solo-friendly quality of life mix (no playerbots)",
|
||||||
"order": 1
|
"order": 2
|
||||||
}
|
}
|
||||||
|
|||||||
78
deploy.sh
78
deploy.sh
@@ -12,6 +12,13 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
|
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
|
||||||
ENV_PATH="$ROOT_DIR/.env"
|
ENV_PATH="$ROOT_DIR/.env"
|
||||||
TEMPLATE_PATH="$ROOT_DIR/.env.template"
|
TEMPLATE_PATH="$ROOT_DIR/.env.template"
|
||||||
|
# Source common library with proper error handling
|
||||||
|
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
|
||||||
|
echo "This library is required for deploy.sh to function." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
source "$ROOT_DIR/scripts/bash/project_name.sh"
|
source "$ROOT_DIR/scripts/bash/project_name.sh"
|
||||||
|
|
||||||
# Default project name (read from .env or template)
|
# Default project name (read from .env or template)
|
||||||
@@ -34,22 +41,19 @@ REMOTE_SKIP_STORAGE=0
|
|||||||
REMOTE_COPY_SOURCE=0
|
REMOTE_COPY_SOURCE=0
|
||||||
REMOTE_ARGS_PROVIDED=0
|
REMOTE_ARGS_PROVIDED=0
|
||||||
REMOTE_AUTO_DEPLOY=0
|
REMOTE_AUTO_DEPLOY=0
|
||||||
REMOTE_AUTO_DEPLOY=0
|
REMOTE_CLEAN_CONTAINERS=0
|
||||||
REMOTE_CLEAN_RUNTIME=0
|
|
||||||
REMOTE_STORAGE_OVERRIDE=""
|
REMOTE_STORAGE_OVERRIDE=""
|
||||||
REMOTE_CONTAINER_USER_OVERRIDE=""
|
REMOTE_CONTAINER_USER_OVERRIDE=""
|
||||||
REMOTE_ENV_FILE=""
|
REMOTE_ENV_FILE=""
|
||||||
|
REMOTE_SKIP_ENV=0
|
||||||
|
REMOTE_PRESERVE_CONTAINERS=0
|
||||||
|
|
||||||
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
|
MODULE_HELPER="$ROOT_DIR/scripts/python/modules.py"
|
||||||
MODULE_STATE_INITIALIZED=0
|
MODULE_STATE_INITIALIZED=0
|
||||||
declare -a MODULES_COMPILE_LIST=()
|
declare -a MODULES_COMPILE_LIST=()
|
||||||
declare -a COMPOSE_FILE_ARGS=()
|
declare -a COMPOSE_FILE_ARGS=()
|
||||||
|
|
||||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
# Color definitions and logging functions now provided by lib/common.sh
|
||||||
info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; }
|
|
||||||
ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; }
|
|
||||||
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
|
|
||||||
err(){ printf '%b\n' "${RED}❌ $*${NC}"; }
|
|
||||||
|
|
||||||
show_deployment_header(){
|
show_deployment_header(){
|
||||||
printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}"
|
printf '\n%b\n' "${BLUE}⚔️ AZEROTHCORE REALM DEPLOYMENT ⚔️${NC}"
|
||||||
@@ -174,8 +178,18 @@ collect_remote_details(){
|
|||||||
read -rp "Stop/remove remote containers & project images during migration? [y/N]: " cleanup_answer
|
read -rp "Stop/remove remote containers & project images during migration? [y/N]: " cleanup_answer
|
||||||
cleanup_answer="${cleanup_answer:-n}"
|
cleanup_answer="${cleanup_answer:-n}"
|
||||||
case "${cleanup_answer,,}" in
|
case "${cleanup_answer,,}" in
|
||||||
y|yes) REMOTE_CLEAN_RUNTIME=1 ;;
|
y|yes) REMOTE_CLEAN_CONTAINERS=1 ;;
|
||||||
*) REMOTE_CLEAN_RUNTIME=0 ;;
|
*)
|
||||||
|
REMOTE_CLEAN_CONTAINERS=0
|
||||||
|
# Offer explicit preservation when declining cleanup
|
||||||
|
local preserve_answer
|
||||||
|
read -rp "Preserve remote containers/images (skip cleanup)? [Y/n]: " preserve_answer
|
||||||
|
preserve_answer="${preserve_answer:-Y}"
|
||||||
|
case "${preserve_answer,,}" in
|
||||||
|
n|no) REMOTE_PRESERVE_CONTAINERS=0 ;;
|
||||||
|
*) REMOTE_PRESERVE_CONTAINERS=1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -251,9 +265,11 @@ Options:
|
|||||||
--remote-skip-storage Skip syncing the storage directory during migration
|
--remote-skip-storage Skip syncing the storage directory during migration
|
||||||
--remote-copy-source Copy the local project directory to remote instead of relying on git
|
--remote-copy-source Copy the local project directory to remote instead of relying on git
|
||||||
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
|
--remote-auto-deploy Run './deploy.sh --yes --no-watch' on the remote host after migration
|
||||||
--remote-clean-runtime Stop/remove remote containers & project images during migration
|
--remote-clean-containers Stop/remove remote containers & project images during migration
|
||||||
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
|
--remote-storage-path PATH Override STORAGE_PATH/STORAGE_PATH_LOCAL in the remote .env
|
||||||
--remote-container-user USER[:GROUP] Override CONTAINER_USER in the remote .env
|
--remote-container-user USER[:GROUP] Override CONTAINER_USER in the remote .env
|
||||||
|
--remote-skip-env Do not upload .env to the remote host
|
||||||
|
--remote-preserve-containers Skip stopping/removing remote containers during migration
|
||||||
--skip-config Skip applying server configuration preset
|
--skip-config Skip applying server configuration preset
|
||||||
-h, --help Show this help
|
-h, --help Show this help
|
||||||
|
|
||||||
@@ -282,15 +298,22 @@ while [[ $# -gt 0 ]]; do
|
|||||||
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
--remote-skip-storage) REMOTE_SKIP_STORAGE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
--remote-copy-source) REMOTE_COPY_SOURCE=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
--remote-auto-deploy) REMOTE_AUTO_DEPLOY=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
--remote-clean-runtime) REMOTE_CLEAN_RUNTIME=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
--remote-clean-containers) REMOTE_CLEAN_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
--remote-storage-path) REMOTE_STORAGE_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||||
--remote-container-user) REMOTE_CONTAINER_USER_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
--remote-container-user) REMOTE_CONTAINER_USER_OVERRIDE="$2"; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift 2;;
|
||||||
|
--remote-skip-env) REMOTE_SKIP_ENV=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
|
--remote-preserve-containers) REMOTE_PRESERVE_CONTAINERS=1; REMOTE_MODE=1; REMOTE_ARGS_PROVIDED=1; shift;;
|
||||||
--skip-config) SKIP_CONFIG=1; shift;;
|
--skip-config) SKIP_CONFIG=1; shift;;
|
||||||
-h|--help) usage; exit 0;;
|
-h|--help) usage; exit 0;;
|
||||||
*) err "Unknown option: $1"; usage; exit 1;;
|
*) err "Unknown option: $1"; usage; exit 1;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ] && [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||||
|
err "Cannot combine --remote-clean-containers with --remote-preserve-containers."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
require_cmd(){
|
require_cmd(){
|
||||||
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
|
command -v "$1" >/dev/null 2>&1 || { err "Missing required command: $1"; exit 1; }
|
||||||
}
|
}
|
||||||
@@ -552,6 +575,27 @@ prompt_build_if_needed(){
|
|||||||
local build_reasons_output
|
local build_reasons_output
|
||||||
build_reasons_output=$(detect_build_needed)
|
build_reasons_output=$(detect_build_needed)
|
||||||
|
|
||||||
|
if [ -z "$build_reasons_output" ]; then
|
||||||
|
# Belt-and-suspenders: if C++ modules are enabled but module images missing, warn
|
||||||
|
ensure_module_state
|
||||||
|
if [ "${#MODULES_COMPILE_LIST[@]}" -gt 0 ]; then
|
||||||
|
local authserver_modules_image
|
||||||
|
local worldserver_modules_image
|
||||||
|
authserver_modules_image="$(read_env AC_AUTHSERVER_IMAGE_MODULES "$(resolve_project_image "authserver-modules-latest")")"
|
||||||
|
worldserver_modules_image="$(read_env AC_WORLDSERVER_IMAGE_MODULES "$(resolve_project_image "worldserver-modules-latest")")"
|
||||||
|
local missing_images=()
|
||||||
|
if ! docker image inspect "$authserver_modules_image" >/dev/null 2>&1; then
|
||||||
|
missing_images+=("$authserver_modules_image")
|
||||||
|
fi
|
||||||
|
if ! docker image inspect "$worldserver_modules_image" >/dev/null 2>&1; then
|
||||||
|
missing_images+=("$worldserver_modules_image")
|
||||||
|
fi
|
||||||
|
if [ ${#missing_images[@]} -gt 0 ]; then
|
||||||
|
build_reasons_output=$(printf "C++ modules enabled but module images missing: %s\n" "${missing_images[*]}")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -z "$build_reasons_output" ]; then
|
if [ -z "$build_reasons_output" ]; then
|
||||||
return 0 # No build needed
|
return 0 # No build needed
|
||||||
fi
|
fi
|
||||||
@@ -693,14 +737,22 @@ run_remote_migration(){
|
|||||||
args+=(--copy-source)
|
args+=(--copy-source)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$REMOTE_CLEAN_RUNTIME" -eq 1 ]; then
|
if [ "$REMOTE_CLEAN_CONTAINERS" -eq 1 ]; then
|
||||||
args+=(--cleanup-runtime)
|
args+=(--clean-containers)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$ASSUME_YES" -eq 1 ]; then
|
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||||
args+=(--yes)
|
args+=(--yes)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$REMOTE_SKIP_ENV" -eq 1 ]; then
|
||||||
|
args+=(--skip-env)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$REMOTE_PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||||
|
args+=(--preserve-containers)
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -n "$REMOTE_ENV_FILE" ]; then
|
if [ -n "$REMOTE_ENV_FILE" ]; then
|
||||||
args+=(--env-file "$REMOTE_ENV_FILE")
|
args+=(--env-file "$REMOTE_ENV_FILE")
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,11 +1,110 @@
|
|||||||
name: ${COMPOSE_PROJECT_NAME}
|
name: ${COMPOSE_PROJECT_NAME}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# YAML ANCHORS - Shared Configuration Templates
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
x-logging: &logging-default
|
x-logging: &logging-default
|
||||||
driver: json-file
|
driver: json-file
|
||||||
options:
|
options:
|
||||||
max-size: "10m"
|
max-size: "10m"
|
||||||
max-file: "3"
|
max-file: "3"
|
||||||
|
|
||||||
|
# Common database connection environment variables
|
||||||
|
x-database-config: &database-config
|
||||||
|
CONTAINER_MYSQL: ${CONTAINER_MYSQL}
|
||||||
|
MYSQL_PORT: ${MYSQL_PORT}
|
||||||
|
MYSQL_USER: ${MYSQL_USER}
|
||||||
|
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
|
||||||
|
DB_AUTH_NAME: ${DB_AUTH_NAME}
|
||||||
|
DB_WORLD_NAME: ${DB_WORLD_NAME}
|
||||||
|
DB_CHARACTERS_NAME: ${DB_CHARACTERS_NAME}
|
||||||
|
DB_RECONNECT_SECONDS: ${DB_RECONNECT_SECONDS}
|
||||||
|
DB_RECONNECT_ATTEMPTS: ${DB_RECONNECT_ATTEMPTS}
|
||||||
|
|
||||||
|
# AzerothCore database connection strings
|
||||||
|
x-azerothcore-databases: &azerothcore-databases
|
||||||
|
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
||||||
|
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
||||||
|
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
||||||
|
|
||||||
|
# Common storage volume mounts
|
||||||
|
x-storage-volumes: &storage-volumes
|
||||||
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
|
||||||
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
|
||||||
|
|
||||||
|
# Authserver common configuration
|
||||||
|
x-authserver-common: &authserver-common
|
||||||
|
user: "${CONTAINER_USER}"
|
||||||
|
environment:
|
||||||
|
<<: *azerothcore-databases
|
||||||
|
AC_UPDATES_ENABLE_DATABASES: "0"
|
||||||
|
AC_BIND_IP: "0.0.0.0"
|
||||||
|
AC_LOG_LEVEL: "1"
|
||||||
|
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
||||||
|
AC_LOGGER_SERVER_CONFIG: "1,Console"
|
||||||
|
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
|
||||||
|
volumes: *storage-volumes
|
||||||
|
ports:
|
||||||
|
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
||||||
|
restart: unless-stopped
|
||||||
|
logging: *logging-default
|
||||||
|
networks:
|
||||||
|
- azerothcore
|
||||||
|
cap_add: ["SYS_NICE"]
|
||||||
|
healthcheck: &auth-healthcheck
|
||||||
|
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
|
||||||
|
interval: ${AUTH_HEALTHCHECK_INTERVAL}
|
||||||
|
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
|
||||||
|
retries: ${AUTH_HEALTHCHECK_RETRIES}
|
||||||
|
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
|
||||||
|
|
||||||
|
# Worldserver common configuration
|
||||||
|
x-worldserver-common: &worldserver-common
|
||||||
|
user: "${CONTAINER_USER}"
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
environment:
|
||||||
|
<<: *azerothcore-databases
|
||||||
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
|
AC_BIND_IP: "0.0.0.0"
|
||||||
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
|
AC_SOAP_PORT: "${SOAP_PORT}"
|
||||||
|
AC_PROCESS_PRIORITY: "0"
|
||||||
|
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
|
||||||
|
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
|
||||||
|
AC_ELUNA_AUTO_RELOAD: "${AC_ELUNA_AUTO_RELOAD}"
|
||||||
|
AC_ELUNA_BYTECODE_CACHE: "${AC_ELUNA_BYTECODE_CACHE}"
|
||||||
|
AC_ELUNA_SCRIPT_PATH: "${AC_ELUNA_SCRIPT_PATH}"
|
||||||
|
AC_ELUNA_REQUIRE_PATHS: "${AC_ELUNA_REQUIRE_PATHS}"
|
||||||
|
AC_ELUNA_REQUIRE_CPATHS: "${AC_ELUNA_REQUIRE_CPATHS}"
|
||||||
|
AC_ELUNA_AUTO_RELOAD_INTERVAL: "${AC_ELUNA_AUTO_RELOAD_INTERVAL}"
|
||||||
|
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
|
||||||
|
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
||||||
|
AC_LOG_LEVEL: "2"
|
||||||
|
ports:
|
||||||
|
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
|
||||||
|
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
|
||||||
|
volumes:
|
||||||
|
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
|
||||||
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
|
||||||
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
|
||||||
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/azerothcore/modules
|
||||||
|
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/azerothcore/lua_scripts
|
||||||
|
restart: unless-stopped
|
||||||
|
logging: *logging-default
|
||||||
|
networks:
|
||||||
|
- azerothcore
|
||||||
|
cap_add: ["SYS_NICE"]
|
||||||
|
healthcheck: &world-healthcheck
|
||||||
|
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
|
||||||
|
interval: ${WORLD_HEALTHCHECK_INTERVAL}
|
||||||
|
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
|
||||||
|
retries: ${WORLD_HEALTHCHECK_RETRIES}
|
||||||
|
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
|
||||||
|
|
||||||
services:
|
services:
|
||||||
# =====================
|
# =====================
|
||||||
# Database Layer (db)
|
# Database Layer (db)
|
||||||
@@ -33,7 +132,7 @@ services:
|
|||||||
- mysql-data:/var/lib/mysql-persistent
|
- mysql-data:/var/lib/mysql-persistent
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro
|
- ${HOST_ZONEINFO_PATH}:/usr/share/zoneinfo:ro
|
||||||
- ${MYSQL_CONFIG_DIR:-${STORAGE_PATH}/config/mysql/conf.d}:/etc/mysql/conf.d
|
- ${MYSQL_CONFIG_DIR:-${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}/mysql/conf.d}:/etc/mysql/conf.d
|
||||||
tmpfs:
|
tmpfs:
|
||||||
- /var/lib/mysql-runtime:size=${MYSQL_RUNTIME_TMPFS_SIZE}
|
- /var/lib/mysql-runtime:size=${MYSQL_RUNTIME_TMPFS_SIZE}
|
||||||
command:
|
command:
|
||||||
@@ -71,12 +170,13 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
|
||||||
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
|
||||||
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
- ${AC_SQL_SOURCE_PATH:-${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
||||||
|
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}}:/modules-sql
|
||||||
- mysql-data:/var/lib/mysql-persistent
|
- mysql-data:/var/lib/mysql-persistent
|
||||||
- ${STORAGE_PATH}/modules:/modules
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
||||||
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
@@ -136,12 +236,13 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
|
||||||
- ${AC_SQL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
|
||||||
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_PATH}/module-sql-updates}:/modules-sql
|
- ${AC_SQL_SOURCE_PATH:-${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}/azerothcore-playerbots/data/sql}:/azerothcore/data/sql:ro
|
||||||
|
- ${STAGE_PATH_MODULE_SQL:-${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}}:/modules-sql
|
||||||
- mysql-data:/var/lib/mysql-persistent
|
- mysql-data:/var/lib/mysql-persistent
|
||||||
- ${STORAGE_PATH}/modules:/modules
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
- ./scripts/bash/db-import-conditional.sh:/tmp/db-import-conditional.sh:ro
|
||||||
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
@@ -267,7 +368,7 @@ services:
|
|||||||
CONTAINER_USER: ${CONTAINER_USER}
|
CONTAINER_USER: ${CONTAINER_USER}
|
||||||
volumes:
|
volumes:
|
||||||
- ${BACKUP_PATH}:/backups
|
- ${BACKUP_PATH}:/backups
|
||||||
- ${STORAGE_PATH}/modules/.modules-meta:/modules-meta:ro
|
- ${STORAGE_MODULES_META_PATH:-${STORAGE_PATH}/modules/.modules-meta}:/modules-meta:ro
|
||||||
- ./scripts:/tmp/scripts:ro
|
- ./scripts:/tmp/scripts:ro
|
||||||
working_dir: /tmp
|
working_dir: /tmp
|
||||||
command:
|
command:
|
||||||
@@ -336,7 +437,7 @@ services:
|
|||||||
container_name: ac-volume-init
|
container_name: ac-volume-init
|
||||||
user: "0:0"
|
user: "0:0"
|
||||||
volumes:
|
volumes:
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
|
||||||
- client-data-cache:/cache
|
- client-data-cache:/cache
|
||||||
command:
|
command:
|
||||||
- sh
|
- sh
|
||||||
@@ -362,8 +463,17 @@ services:
|
|||||||
container_name: ac-storage-init
|
container_name: ac-storage-init
|
||||||
user: "0:0"
|
user: "0:0"
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}:/storage-root
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/storage-root/config
|
||||||
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/storage-root/logs
|
||||||
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/storage-root/modules
|
||||||
|
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/storage-root/lua_scripts
|
||||||
|
- ${STORAGE_INSTALL_MARKERS_PATH:-${STORAGE_PATH}/install-markers}:/storage-root/install-markers
|
||||||
|
- ${STORAGE_MODULE_SQL_PATH:-${STORAGE_PATH}/module-sql-updates}:/storage-root/module-sql-updates
|
||||||
|
- ${STORAGE_MODULES_META_PATH:-${STORAGE_PATH}/modules/.modules-meta}:/storage-root/modules/.modules-meta
|
||||||
|
- ${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/storage-root/client-data
|
||||||
|
- ${BACKUP_PATH}:/storage-root/backups
|
||||||
- ${STORAGE_PATH_LOCAL}:/local-storage-root
|
- ${STORAGE_PATH_LOCAL}:/local-storage-root
|
||||||
|
- ${STORAGE_LOCAL_SOURCE_PATH:-${STORAGE_PATH_LOCAL}/source}:/local-storage-root/source
|
||||||
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
- ./scripts/bash/seed-dbimport-conf.sh:/tmp/seed-dbimport-conf.sh:ro
|
||||||
command:
|
command:
|
||||||
- sh
|
- sh
|
||||||
@@ -372,6 +482,7 @@ services:
|
|||||||
echo "🔧 Initializing storage directories with proper permissions..."
|
echo "🔧 Initializing storage directories with proper permissions..."
|
||||||
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
|
mkdir -p /storage-root/config /storage-root/logs /storage-root/modules /storage-root/lua_scripts /storage-root/install-markers
|
||||||
mkdir -p /storage-root/config/mysql/conf.d
|
mkdir -p /storage-root/config/mysql/conf.d
|
||||||
|
mkdir -p /storage-root/module-sql-updates /storage-root/modules/.modules-meta
|
||||||
mkdir -p /storage-root/client-data
|
mkdir -p /storage-root/client-data
|
||||||
mkdir -p /storage-root/backups
|
mkdir -p /storage-root/backups
|
||||||
|
|
||||||
@@ -440,7 +551,7 @@ services:
|
|||||||
ac-volume-init:
|
ac-volume-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
volumes:
|
volumes:
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
|
||||||
- client-data-cache:/cache
|
- client-data-cache:/cache
|
||||||
- ./scripts:/tmp/scripts:ro
|
- ./scripts:/tmp/scripts:ro
|
||||||
working_dir: /tmp
|
working_dir: /tmp
|
||||||
@@ -471,7 +582,7 @@ services:
|
|||||||
ac-volume-init:
|
ac-volume-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
volumes:
|
volumes:
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
|
||||||
- client-data-cache:/cache
|
- client-data-cache:/cache
|
||||||
- ./scripts:/tmp/scripts:ro
|
- ./scripts:/tmp/scripts:ro
|
||||||
working_dir: /tmp
|
working_dir: /tmp
|
||||||
@@ -503,10 +614,10 @@ services:
|
|||||||
# Services - Standard (services-standard)
|
# Services - Standard (services-standard)
|
||||||
# =====================
|
# =====================
|
||||||
ac-authserver-standard:
|
ac-authserver-standard:
|
||||||
|
<<: *authserver-common
|
||||||
profiles: ["services-standard"]
|
profiles: ["services-standard"]
|
||||||
image: ${AC_AUTHSERVER_IMAGE}
|
image: ${AC_AUTHSERVER_IMAGE}
|
||||||
container_name: ac-authserver
|
container_name: ac-authserver
|
||||||
user: "${CONTAINER_USER}"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
ac-mysql:
|
ac-mysql:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -514,91 +625,26 @@ services:
|
|||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
ac-db-init:
|
ac-db-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
environment:
|
|
||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
|
||||||
AC_BIND_IP: "0.0.0.0"
|
|
||||||
AC_LOG_LEVEL: "1"
|
|
||||||
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
|
||||||
AC_LOGGER_SERVER_CONFIG: "1,Console"
|
|
||||||
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
|
|
||||||
ports:
|
|
||||||
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
|
||||||
restart: unless-stopped
|
|
||||||
logging: *logging-default
|
|
||||||
networks:
|
|
||||||
- azerothcore
|
|
||||||
volumes:
|
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
|
||||||
cap_add: ["SYS_NICE"]
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
|
|
||||||
interval: ${AUTH_HEALTHCHECK_INTERVAL}
|
|
||||||
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
|
|
||||||
retries: ${AUTH_HEALTHCHECK_RETRIES}
|
|
||||||
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
|
|
||||||
|
|
||||||
ac-worldserver-standard:
|
ac-worldserver-standard:
|
||||||
|
<<: *worldserver-common
|
||||||
profiles: ["services-standard"]
|
profiles: ["services-standard"]
|
||||||
image: ${AC_WORLDSERVER_IMAGE}
|
image: ${AC_WORLDSERVER_IMAGE}
|
||||||
container_name: ac-worldserver
|
container_name: ac-worldserver
|
||||||
user: "${CONTAINER_USER}"
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
depends_on:
|
depends_on:
|
||||||
ac-authserver-standard:
|
ac-authserver-standard:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
ac-client-data-standard:
|
ac-client-data-standard:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
environment:
|
|
||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
|
||||||
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
|
||||||
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
|
||||||
AC_UPDATES_ENABLE_DATABASES: "7"
|
|
||||||
AC_BIND_IP: "0.0.0.0"
|
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
|
||||||
AC_SOAP_PORT: "${SOAP_PORT}"
|
|
||||||
AC_PROCESS_PRIORITY: "0"
|
|
||||||
AC_ELUNA_ENABLED: "${AC_ELUNA_ENABLED}"
|
|
||||||
AC_ELUNA_TRACE_BACK: "${AC_ELUNA_TRACE_BACK}"
|
|
||||||
AC_ELUNA_AUTO_RELOAD: "${AC_ELUNA_AUTO_RELOAD}"
|
|
||||||
AC_ELUNA_BYTECODE_CACHE: "${AC_ELUNA_BYTECODE_CACHE}"
|
|
||||||
AC_ELUNA_SCRIPT_PATH: "${AC_ELUNA_SCRIPT_PATH}"
|
|
||||||
AC_ELUNA_REQUIRE_PATHS: "${AC_ELUNA_REQUIRE_PATHS}"
|
|
||||||
AC_ELUNA_REQUIRE_CPATHS: "${AC_ELUNA_REQUIRE_CPATHS}"
|
|
||||||
AC_ELUNA_AUTO_RELOAD_INTERVAL: "${AC_ELUNA_AUTO_RELOAD_INTERVAL}"
|
|
||||||
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
|
|
||||||
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
|
||||||
AC_LOG_LEVEL: "2"
|
|
||||||
ports:
|
|
||||||
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
|
|
||||||
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
|
|
||||||
volumes:
|
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
|
||||||
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
|
||||||
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
|
||||||
restart: unless-stopped
|
|
||||||
logging: *logging-default
|
|
||||||
networks:
|
|
||||||
- azerothcore
|
|
||||||
cap_add: ["SYS_NICE"]
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
|
|
||||||
interval: ${WORLD_HEALTHCHECK_INTERVAL}
|
|
||||||
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
|
|
||||||
retries: ${WORLD_HEALTHCHECK_RETRIES}
|
|
||||||
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
|
|
||||||
|
|
||||||
# =====================
|
# =====================
|
||||||
# Services - Playerbots (services-playerbots)
|
# Services - Playerbots (services-playerbots)
|
||||||
# =====================
|
# =====================
|
||||||
ac-authserver-playerbots:
|
ac-authserver-playerbots:
|
||||||
|
<<: *authserver-common
|
||||||
profiles: ["services-playerbots"]
|
profiles: ["services-playerbots"]
|
||||||
image: ${AC_AUTHSERVER_IMAGE_PLAYERBOTS}
|
image: ${AC_AUTHSERVER_IMAGE_PLAYERBOTS}
|
||||||
container_name: ac-authserver
|
container_name: ac-authserver
|
||||||
user: "${CONTAINER_USER}"
|
|
||||||
depends_on:
|
depends_on:
|
||||||
ac-mysql:
|
ac-mysql:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -607,7 +653,7 @@ services:
|
|||||||
ac-db-init:
|
ac-db-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
environment:
|
environment:
|
||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
<<: *azerothcore-databases
|
||||||
AC_UPDATES_ENABLE_DATABASES: "0"
|
AC_UPDATES_ENABLE_DATABASES: "0"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
TZ: "${TZ}"
|
TZ: "${TZ}"
|
||||||
@@ -615,21 +661,6 @@ services:
|
|||||||
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
AC_LOGGER_ROOT_CONFIG: "1,Console"
|
||||||
AC_LOGGER_SERVER_CONFIG: "1,Console"
|
AC_LOGGER_SERVER_CONFIG: "1,Console"
|
||||||
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
|
AC_APPENDER_CONSOLE_CONFIG: "1,2,0"
|
||||||
ports:
|
|
||||||
- "${AUTH_EXTERNAL_PORT}:${AUTH_PORT}"
|
|
||||||
restart: unless-stopped
|
|
||||||
logging: *logging-default
|
|
||||||
networks:
|
|
||||||
- azerothcore
|
|
||||||
volumes:
|
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
|
||||||
cap_add: ["SYS_NICE"]
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
|
|
||||||
interval: ${AUTH_HEALTHCHECK_INTERVAL}
|
|
||||||
timeout: ${AUTH_HEALTHCHECK_TIMEOUT}
|
|
||||||
retries: ${AUTH_HEALTHCHECK_RETRIES}
|
|
||||||
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
|
|
||||||
|
|
||||||
ac-authserver-modules:
|
ac-authserver-modules:
|
||||||
profiles: ["services-modules"]
|
profiles: ["services-modules"]
|
||||||
@@ -658,7 +689,7 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
cap_add: ["SYS_NICE"]
|
cap_add: ["SYS_NICE"]
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
|
test: ["CMD", "sh", "-c", "ps aux | grep '[a]uthserver' | grep -v grep || exit 1"]
|
||||||
@@ -668,12 +699,10 @@ services:
|
|||||||
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
|
start_period: ${AUTH_HEALTHCHECK_START_PERIOD}
|
||||||
|
|
||||||
ac-worldserver-playerbots:
|
ac-worldserver-playerbots:
|
||||||
|
<<: *worldserver-common
|
||||||
profiles: ["services-playerbots"]
|
profiles: ["services-playerbots"]
|
||||||
image: ${AC_WORLDSERVER_IMAGE_PLAYERBOTS}
|
image: ${AC_WORLDSERVER_IMAGE_PLAYERBOTS}
|
||||||
container_name: ac-worldserver
|
container_name: ac-worldserver
|
||||||
user: "${CONTAINER_USER}"
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
depends_on:
|
depends_on:
|
||||||
ac-authserver-playerbots:
|
ac-authserver-playerbots:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
@@ -682,9 +711,7 @@ services:
|
|||||||
ac-db-guard:
|
ac-db-guard:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
environment:
|
environment:
|
||||||
AC_LOGIN_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_AUTH_NAME}"
|
<<: *azerothcore-databases
|
||||||
AC_WORLD_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_WORLD_NAME}"
|
|
||||||
AC_CHARACTER_DATABASE_INFO: "${CONTAINER_MYSQL};${MYSQL_PORT};${MYSQL_USER};${MYSQL_ROOT_PASSWORD};${DB_CHARACTERS_NAME}"
|
|
||||||
AC_UPDATES_ENABLE_DATABASES: "7"
|
AC_UPDATES_ENABLE_DATABASES: "7"
|
||||||
AC_BIND_IP: "0.0.0.0"
|
AC_BIND_IP: "0.0.0.0"
|
||||||
AC_DATA_DIR: "/azerothcore/data"
|
AC_DATA_DIR: "/azerothcore/data"
|
||||||
@@ -702,26 +729,6 @@ services:
|
|||||||
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
|
PLAYERBOT_ENABLED: "${PLAYERBOT_ENABLED}"
|
||||||
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
||||||
AC_LOG_LEVEL: "2"
|
AC_LOG_LEVEL: "2"
|
||||||
ports:
|
|
||||||
- "${WORLD_EXTERNAL_PORT}:${WORLD_PORT}"
|
|
||||||
- "${SOAP_EXTERNAL_PORT}:${SOAP_PORT}"
|
|
||||||
volumes:
|
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
|
||||||
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
|
||||||
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
|
||||||
restart: unless-stopped
|
|
||||||
logging: *logging-default
|
|
||||||
networks:
|
|
||||||
- azerothcore
|
|
||||||
cap_add: ["SYS_NICE"]
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "sh", "-c", "ps aux | grep '[w]orldserver' | grep -v grep || exit 1"]
|
|
||||||
interval: ${WORLD_HEALTHCHECK_INTERVAL}
|
|
||||||
timeout: ${WORLD_HEALTHCHECK_TIMEOUT}
|
|
||||||
retries: ${WORLD_HEALTHCHECK_RETRIES}
|
|
||||||
start_period: ${WORLD_HEALTHCHECK_START_PERIOD}
|
|
||||||
|
|
||||||
ac-worldserver-modules:
|
ac-worldserver-modules:
|
||||||
profiles: ["services-modules"]
|
profiles: ["services-modules"]
|
||||||
@@ -758,11 +765,12 @@ services:
|
|||||||
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
PLAYERBOT_MAX_BOTS: "${PLAYERBOT_MAX_BOTS}"
|
||||||
AC_LOG_LEVEL: "2"
|
AC_LOG_LEVEL: "2"
|
||||||
volumes:
|
volumes:
|
||||||
- ${CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}:/azerothcore/data
|
- ${CLIENT_DATA_PATH:-${STORAGE_CLIENT_DATA_PATH:-${STORAGE_PATH}/client-data}}:/azerothcore/data
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
- ${STORAGE_PATH}/logs:/azerothcore/logs
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/logs
|
||||||
- ${STORAGE_PATH}/modules:/azerothcore/modules
|
- ${STORAGE_LOGS_PATH:-${STORAGE_PATH}/logs}:/azerothcore/env/dist/logs
|
||||||
- ${STORAGE_PATH}/lua_scripts:/azerothcore/lua_scripts
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/azerothcore/modules
|
||||||
|
- ${STORAGE_LUA_SCRIPTS_PATH:-${STORAGE_PATH}/lua_scripts}:/azerothcore/lua_scripts
|
||||||
networks:
|
networks:
|
||||||
- azerothcore
|
- azerothcore
|
||||||
ports:
|
ports:
|
||||||
@@ -796,8 +804,8 @@ services:
|
|||||||
ac-storage-init:
|
ac-storage-init:
|
||||||
condition: service_completed_successfully
|
condition: service_completed_successfully
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}/modules:/modules
|
- ${STORAGE_MODULES_PATH:-${STORAGE_PATH}/modules}:/modules
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/env/dist/etc
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/env/dist/etc
|
||||||
- ./scripts:/tmp/scripts:ro
|
- ./scripts:/tmp/scripts:ro
|
||||||
- ./config:/tmp/config:ro
|
- ./config:/tmp/config:ro
|
||||||
env_file:
|
env_file:
|
||||||
@@ -822,8 +830,8 @@ services:
|
|||||||
container_name: ${CONTAINER_POST_INSTALL}
|
container_name: ${CONTAINER_POST_INSTALL}
|
||||||
user: "0:0"
|
user: "0:0"
|
||||||
volumes:
|
volumes:
|
||||||
- ${STORAGE_PATH}/config:/azerothcore/config
|
- ${STORAGE_CONFIG_PATH:-${STORAGE_PATH}/config}:/azerothcore/config
|
||||||
- ${STORAGE_PATH}/install-markers:/install-markers
|
- ${STORAGE_INSTALL_MARKERS_PATH:-${STORAGE_PATH}/install-markers}:/install-markers
|
||||||
- ./scripts:/tmp/scripts:ro
|
- ./scripts:/tmp/scripts:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:rw
|
- /var/run/docker.sock:/var/run/docker.sock:rw
|
||||||
working_dir: /tmp
|
working_dir: /tmp
|
||||||
|
|||||||
@@ -1,261 +0,0 @@
|
|||||||
# Generated by azerothcore-rm/setup.sh
|
|
||||||
|
|
||||||
# Compose overrides (set to 1 to include matching file under compose-overrides/)
|
|
||||||
# mysql-expose.yml -> exposes MySQL externally via COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED
|
|
||||||
# worldserver-debug-logging.yml -> raises log verbosity via COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED
|
|
||||||
COMPOSE_OVERRIDE_MYSQL_EXPOSE_ENABLED=0
|
|
||||||
COMPOSE_OVERRIDE_WORLDSERVER_DEBUG_LOGGING_ENABLED=0
|
|
||||||
|
|
||||||
COMPOSE_PROJECT_NAME=azerothcore-stack
|
|
||||||
|
|
||||||
STORAGE_PATH=/nfs/azerothcore
|
|
||||||
STORAGE_PATH_LOCAL=./local-storage
|
|
||||||
BACKUP_PATH=${STORAGE_PATH}/backups
|
|
||||||
TZ=America/New_York
|
|
||||||
|
|
||||||
# Database
|
|
||||||
MYSQL_IMAGE=mysql:8.0
|
|
||||||
MYSQL_ROOT_PASSWORD=azerothcore123
|
|
||||||
MYSQL_ROOT_HOST=%
|
|
||||||
MYSQL_USER=root
|
|
||||||
MYSQL_PORT=3306
|
|
||||||
MYSQL_EXTERNAL_PORT=64306
|
|
||||||
MYSQL_DISABLE_BINLOG=1
|
|
||||||
MYSQL_CONFIG_DIR=${STORAGE_PATH}/config/mysql/conf.d
|
|
||||||
MYSQL_CHARACTER_SET=utf8mb4
|
|
||||||
MYSQL_COLLATION=utf8mb4_unicode_ci
|
|
||||||
MYSQL_MAX_CONNECTIONS=1000
|
|
||||||
MYSQL_INNODB_BUFFER_POOL_SIZE=256M
|
|
||||||
MYSQL_INNODB_LOG_FILE_SIZE=64M
|
|
||||||
MYSQL_INNODB_REDO_LOG_CAPACITY=512M
|
|
||||||
MYSQL_RUNTIME_TMPFS_SIZE=8G
|
|
||||||
MYSQL_HOST=ac-mysql
|
|
||||||
DB_WAIT_RETRIES=60
|
|
||||||
DB_WAIT_SLEEP=10
|
|
||||||
DB_AUTH_NAME=acore_auth
|
|
||||||
DB_WORLD_NAME=acore_world
|
|
||||||
DB_CHARACTERS_NAME=acore_characters
|
|
||||||
DB_PLAYERBOTS_NAME=acore_playerbots
|
|
||||||
AC_DB_IMPORT_IMAGE=azerothcore-stack:db-import-playerbots
|
|
||||||
|
|
||||||
# Services (images)
|
|
||||||
AC_AUTHSERVER_IMAGE=acore/ac-wotlk-authserver:master
|
|
||||||
AC_WORLDSERVER_IMAGE=acore/ac-wotlk-worldserver:master
|
|
||||||
AC_AUTHSERVER_IMAGE_PLAYERBOTS=azerothcore-stack:authserver-playerbots
|
|
||||||
AC_WORLDSERVER_IMAGE_PLAYERBOTS=azerothcore-stack:worldserver-playerbots
|
|
||||||
AC_AUTHSERVER_IMAGE_MODULES=azerothcore-stack:authserver-modules-latest
|
|
||||||
AC_WORLDSERVER_IMAGE_MODULES=azerothcore-stack:worldserver-modules-latest
|
|
||||||
|
|
||||||
# Client data images
|
|
||||||
AC_CLIENT_DATA_IMAGE=acore/ac-wotlk-client-data:master
|
|
||||||
AC_CLIENT_DATA_IMAGE_PLAYERBOTS=azerothcore-stack:client-data-playerbots
|
|
||||||
CLIENT_DATA_CACHE_PATH=${STORAGE_PATH_LOCAL}/client-data-cache
|
|
||||||
CLIENT_DATA_PATH=${STORAGE_PATH}/client-data
|
|
||||||
|
|
||||||
# Build artifacts
|
|
||||||
DOCKER_IMAGE_TAG=master
|
|
||||||
AC_AUTHSERVER_IMAGE_BASE=acore/ac-wotlk-authserver
|
|
||||||
AC_WORLDSERVER_IMAGE_BASE=acore/ac-wotlk-worldserver
|
|
||||||
AC_DB_IMPORT_IMAGE_BASE=acore/ac-wotlk-db-import
|
|
||||||
AC_CLIENT_DATA_IMAGE_BASE=acore/ac-wotlk-client-data
|
|
||||||
|
|
||||||
# Container user
|
|
||||||
CONTAINER_USER=1001:1000
|
|
||||||
|
|
||||||
# Containers
|
|
||||||
CONTAINER_MYSQL=ac-mysql
|
|
||||||
CONTAINER_DB_IMPORT=ac-db-import
|
|
||||||
CONTAINER_DB_INIT=ac-db-init
|
|
||||||
CONTAINER_BACKUP=ac-backup
|
|
||||||
CONTAINER_MODULES=ac-modules
|
|
||||||
CONTAINER_POST_INSTALL=ac-post-install
|
|
||||||
|
|
||||||
# Ports
|
|
||||||
AUTH_EXTERNAL_PORT=3784
|
|
||||||
AUTH_PORT=3724
|
|
||||||
WORLD_EXTERNAL_PORT=8215
|
|
||||||
WORLD_PORT=8085
|
|
||||||
SOAP_EXTERNAL_PORT=7778
|
|
||||||
SOAP_PORT=7878
|
|
||||||
|
|
||||||
# Realm
|
|
||||||
SERVER_ADDRESS=192.168.0.179
|
|
||||||
REALM_PORT=8215
|
|
||||||
|
|
||||||
# Backups
|
|
||||||
BACKUP_RETENTION_DAYS=3
|
|
||||||
BACKUP_RETENTION_HOURS=6
|
|
||||||
BACKUP_DAILY_TIME=09
|
|
||||||
BACKUP_HEALTHCHECK_MAX_MINUTES=1440
|
|
||||||
BACKUP_HEALTHCHECK_GRACE_SECONDS=4500
|
|
||||||
|
|
||||||
|
|
||||||
# Modules
|
|
||||||
MODULE_PLAYERBOTS=1
|
|
||||||
MODULE_AOE_LOOT=0
|
|
||||||
MODULE_LEARN_SPELLS=1
|
|
||||||
MODULE_FIREWORKS=1
|
|
||||||
MODULE_INDIVIDUAL_PROGRESSION=0
|
|
||||||
MODULE_AHBOT=0
|
|
||||||
MODULE_AUTOBALANCE=0
|
|
||||||
MODULE_TRANSMOG=1
|
|
||||||
MODULE_NPC_BUFFER=1
|
|
||||||
MODULE_DYNAMIC_XP=0
|
|
||||||
MODULE_SOLO_LFG=1
|
|
||||||
MODULE_1V1_ARENA=1
|
|
||||||
MODULE_PHASED_DUELS=0
|
|
||||||
MODULE_BREAKING_NEWS=1
|
|
||||||
MODULE_BOSS_ANNOUNCER=1
|
|
||||||
MODULE_ACCOUNT_ACHIEVEMENTS=1
|
|
||||||
MODULE_AUTO_REVIVE=1
|
|
||||||
MODULE_GAIN_HONOR_GUARD=1
|
|
||||||
MODULE_ELUNA=1
|
|
||||||
MODULE_TIME_IS_TIME=1
|
|
||||||
MODULE_POCKET_PORTAL=0
|
|
||||||
MODULE_RANDOM_ENCHANTS=1
|
|
||||||
MODULE_SOLOCRAFT=1
|
|
||||||
MODULE_PVP_TITLES=0
|
|
||||||
MODULE_NPC_BEASTMASTER=1
|
|
||||||
MODULE_NPC_ENCHANTER=1
|
|
||||||
MODULE_INSTANCE_RESET=1
|
|
||||||
MODULE_LEVEL_GRANT=0
|
|
||||||
MODULE_ARAC=1
|
|
||||||
MODULE_ASSISTANT=1
|
|
||||||
MODULE_REAGENT_BANK=1
|
|
||||||
MODULE_BLACK_MARKET_AUCTION_HOUSE=1
|
|
||||||
MODULE_CHALLENGE_MODES=0
|
|
||||||
MODULE_OLLAMA_CHAT=0
|
|
||||||
MODULE_PLAYER_BOT_LEVEL_BRACKETS=0
|
|
||||||
MODULE_STATBOOSTER=0
|
|
||||||
MODULE_DUNGEON_RESPAWN=0
|
|
||||||
MODULE_SKELETON_MODULE=0
|
|
||||||
MODULE_BG_SLAVERYVALLEY=0
|
|
||||||
MODULE_AZEROTHSHARD=0
|
|
||||||
MODULE_WORGOBLIN=0
|
|
||||||
MODULE_ELUNA_TS=1
|
|
||||||
MODULE_AIO=1
|
|
||||||
MODULE_ELUNA_SCRIPTS=1
|
|
||||||
MODULE_TRANSMOG_AIO=0
|
|
||||||
MODULE_EVENT_SCRIPTS=1
|
|
||||||
MODULE_LEVEL_UP_REWARD=0
|
|
||||||
MODULE_ACCOUNTWIDE_SYSTEMS=0
|
|
||||||
MODULE_EXCHANGE_NPC=0
|
|
||||||
MODULE_RECRUIT_A_FRIEND=0
|
|
||||||
MODULE_PRESTIGE_DRAFT_MODE=0
|
|
||||||
MODULE_LUA_AH_BOT=0
|
|
||||||
MODULE_HARDCORE_MODE=0
|
|
||||||
MODULE_NPCBOT_EXTENDED_COMMANDS=0
|
|
||||||
MODULE_MULTIVENDOR=0
|
|
||||||
MODULE_TREASURE_CHEST_SYSTEM=0
|
|
||||||
MODULE_ACTIVE_CHAT=1
|
|
||||||
MODULE_ULTIMATE_FULL_LOOT_PVP=0
|
|
||||||
MODULE_HORADRIC_CUBE=0
|
|
||||||
MODULE_CARBON_COPY=0
|
|
||||||
MODULE_TEMP_ANNOUNCEMENTS=0
|
|
||||||
MODULE_ZONE_CHECK=0
|
|
||||||
MODULE_AIO_BLACKJACK=0
|
|
||||||
MODULE_SEND_AND_BIND=0
|
|
||||||
MODULE_DYNAMIC_TRADER=0
|
|
||||||
MODULE_LOTTERY_LUA=0
|
|
||||||
MODULE_DISCORD_NOTIFIER=0
|
|
||||||
MODULE_GLOBAL_MAIL_BANKING_AUCTIONS=0
|
|
||||||
MODULE_GUILDHOUSE=1
|
|
||||||
MODULE_PROGRESSION_SYSTEM=0
|
|
||||||
MODULE_NPC_FREE_PROFESSIONS=1
|
|
||||||
MODULE_DUEL_RESET=0
|
|
||||||
MODULE_ZONE_DIFFICULTY=0
|
|
||||||
MODULE_MORPHSUMMON=1
|
|
||||||
MODULE_SPELL_REGULATOR=0
|
|
||||||
MODULE_WEEKEND_XP=0
|
|
||||||
MODULE_REWARD_PLAYED_TIME=0
|
|
||||||
MODULE_RESURRECTION_SCROLL=0
|
|
||||||
MODULE_ITEM_LEVEL_UP=1
|
|
||||||
MODULE_NPC_TALENT_TEMPLATE=0
|
|
||||||
MODULE_GLOBAL_CHAT=1
|
|
||||||
MODULE_PREMIUM=0
|
|
||||||
MODULE_SYSTEM_VIP=0
|
|
||||||
MODULE_ACORE_SUBSCRIPTIONS=0
|
|
||||||
MODULE_KEEP_OUT=0
|
|
||||||
MODULE_SERVER_AUTO_SHUTDOWN=0
|
|
||||||
MODULE_WHO_LOGGED=0
|
|
||||||
MODULE_ACCOUNT_MOUNTS=0
|
|
||||||
MODULE_ANTIFARMING=0
|
|
||||||
MODULE_ARENA_REPLAY=0
|
|
||||||
MODULE_TIC_TAC_TOE=0
|
|
||||||
MODULE_WAR_EFFORT=0
|
|
||||||
MODULE_PROMOTION_AZEROTHCORE=0
|
|
||||||
|
|
||||||
# Client data
|
|
||||||
CLIENT_DATA_VERSION=
|
|
||||||
|
|
||||||
# Server configuration
|
|
||||||
SERVER_CONFIG_PRESET=none
|
|
||||||
|
|
||||||
# Playerbot runtime
|
|
||||||
PLAYERBOT_ENABLED=1
|
|
||||||
PLAYERBOT_MIN_BOTS=2000
|
|
||||||
PLAYERBOT_MAX_BOTS=4000
|
|
||||||
STACK_IMAGE_MODE=playerbots
|
|
||||||
STACK_SOURCE_VARIANT=playerbots
|
|
||||||
MODULES_ENABLED_LIST=MODULE_PLAYERBOTS,MODULE_LEARN_SPELLS,MODULE_FIREWORKS,MODULE_TRANSMOG,MODULE_NPC_BUFFER,MODULE_SOLO_LFG,MODULE_1V1_ARENA,MODULE_BREAKING_NEWS,MODULE_BOSS_ANNOUNCER,MODULE_ACCOUNT_ACHIEVEMENTS,MODULE_AUTO_REVIVE,MODULE_GAIN_HONOR_GUARD,MODULE_ELUNA,MODULE_TIME_IS_TIME,MODULE_RANDOM_ENCHANTS,MODULE_SOLOCRAFT,MODULE_NPC_BEASTMASTER,MODULE_NPC_ENCHANTER,MODULE_INSTANCE_RESET,MODULE_ARAC,MODULE_ASSISTANT,MODULE_REAGENT_BANK,MODULE_BLACK_MARKET_AUCTION_HOUSE,MODULE_STATBOOSTER,MODULE_ELUNA_TS,MODULE_AIO,MODULE_ELUNA_SCRIPTS,MODULE_EVENT_SCRIPTS,MODULE_ACTIVE_CHAT,MODULE_GUILDHOUSE,MODULE_NPC_FREE_PROFESSIONS,MODULE_MORPHSUMMON,MODULE_ITEM_LEVEL_UP,MODULE_GLOBAL_CHAT
|
|
||||||
MODULES_CPP_LIST=MODULE_LEARN_SPELLS,MODULE_FIREWORKS,MODULE_TRANSMOG,MODULE_NPC_BUFFER,MODULE_SOLO_LFG,MODULE_1V1_ARENA,MODULE_BREAKING_NEWS,MODULE_BOSS_ANNOUNCER,MODULE_ACCOUNT_ACHIEVEMENTS,MODULE_AUTO_REVIVE,MODULE_GAIN_HONOR_GUARD,MODULE_ELUNA,MODULE_TIME_IS_TIME,MODULE_RANDOM_ENCHANTS,MODULE_SOLOCRAFT,MODULE_NPC_BEASTMASTER,MODULE_NPC_ENCHANTER,MODULE_INSTANCE_RESET,MODULE_ARAC,MODULE_ASSISTANT,MODULE_REAGENT_BANK,MODULE_STATBOOSTER,MODULE_AIO,MODULE_GUILDHOUSE,MODULE_NPC_FREE_PROFESSIONS,MODULE_MORPHSUMMON,MODULE_ITEM_LEVEL_UP,MODULE_GLOBAL_CHAT
|
|
||||||
MODULES_REQUIRES_CUSTOM_BUILD=1
|
|
||||||
MODULES_REQUIRES_PLAYERBOT_SOURCE=1
|
|
||||||
|
|
||||||
# Rebuild automation
|
|
||||||
AUTO_REBUILD_ON_DEPLOY=0
|
|
||||||
MODULES_REBUILD_SOURCE_PATH=./local-storage/source/azerothcore-playerbots
|
|
||||||
|
|
||||||
# Eluna
|
|
||||||
AC_ELUNA_ENABLED=1 # Power users may set to 0 to turn off bundled Eluna runtime
|
|
||||||
AC_ELUNA_TRACE_BACK=1
|
|
||||||
AC_ELUNA_AUTO_RELOAD=1
|
|
||||||
AC_ELUNA_BYTECODE_CACHE=1
|
|
||||||
AC_ELUNA_SCRIPT_PATH=lua_scripts
|
|
||||||
AC_ELUNA_REQUIRE_PATHS=
|
|
||||||
AC_ELUNA_REQUIRE_CPATHS=
|
|
||||||
AC_ELUNA_AUTO_RELOAD_INTERVAL=1
|
|
||||||
|
|
||||||
# Tools
|
|
||||||
PMA_HOST=ac-mysql
|
|
||||||
PMA_PORT=3306
|
|
||||||
PMA_USER=root
|
|
||||||
PMA_EXTERNAL_PORT=8081
|
|
||||||
PMA_ARBITRARY=1
|
|
||||||
PMA_ABSOLUTE_URI=
|
|
||||||
PMA_UPLOAD_LIMIT=300M
|
|
||||||
PMA_MEMORY_LIMIT=512M
|
|
||||||
PMA_MAX_EXECUTION_TIME=600
|
|
||||||
KEIRA3_EXTERNAL_PORT=4201
|
|
||||||
KEIRA_DATABASE_HOST=ac-mysql
|
|
||||||
KEIRA_DATABASE_PORT=3306
|
|
||||||
|
|
||||||
# Health checks
|
|
||||||
MYSQL_HEALTHCHECK_INTERVAL=20s
|
|
||||||
MYSQL_HEALTHCHECK_TIMEOUT=15s
|
|
||||||
MYSQL_HEALTHCHECK_RETRIES=25
|
|
||||||
MYSQL_HEALTHCHECK_START_PERIOD=120s
|
|
||||||
AUTH_HEALTHCHECK_INTERVAL=30s
|
|
||||||
AUTH_HEALTHCHECK_TIMEOUT=10s
|
|
||||||
AUTH_HEALTHCHECK_RETRIES=3
|
|
||||||
AUTH_HEALTHCHECK_START_PERIOD=60s
|
|
||||||
WORLD_HEALTHCHECK_INTERVAL=30s
|
|
||||||
WORLD_HEALTHCHECK_TIMEOUT=10s
|
|
||||||
WORLD_HEALTHCHECK_RETRIES=3
|
|
||||||
WORLD_HEALTHCHECK_START_PERIOD=120s
|
|
||||||
BACKUP_HEALTHCHECK_INTERVAL=60s
|
|
||||||
BACKUP_HEALTHCHECK_TIMEOUT=30s
|
|
||||||
BACKUP_HEALTHCHECK_RETRIES=3
|
|
||||||
BACKUP_HEALTHCHECK_START_PERIOD=120s
|
|
||||||
|
|
||||||
# Networking
|
|
||||||
NETWORK_NAME=azerothcore
|
|
||||||
NETWORK_SUBNET=172.20.0.0/16
|
|
||||||
NETWORK_GATEWAY=172.20.0.1
|
|
||||||
|
|
||||||
# Storage helpers
|
|
||||||
HOST_ZONEINFO_PATH=/usr/share/zoneinfo
|
|
||||||
|
|
||||||
# Helper images
|
|
||||||
ALPINE_GIT_IMAGE=alpine/git:latest
|
|
||||||
ALPINE_IMAGE=alpine:latest
|
|
||||||
@@ -170,8 +170,12 @@ Optional flags:
|
|||||||
- `--remote-port 2222` - Custom SSH port
|
- `--remote-port 2222` - Custom SSH port
|
||||||
- `--remote-identity ~/.ssh/custom_key` - Specific SSH key
|
- `--remote-identity ~/.ssh/custom_key` - Specific SSH key
|
||||||
- `--remote-skip-storage` - Don't sync storage directory (fresh install on remote)
|
- `--remote-skip-storage` - Don't sync storage directory (fresh install on remote)
|
||||||
|
- `--remote-clean-containers` - Stop/remove existing `ac-*` containers and project images during migration
|
||||||
|
- `--remote-skip-env` - Leave the remote `.env` untouched (won't upload local one)
|
||||||
|
- `--remote-preserve-containers` - Do not stop/remove existing `ac-*` containers/images during migration
|
||||||
- `--remote-storage-path /mnt/acore-storage` - Override STORAGE_PATH on the remote host (local-storage stays per .env)
|
- `--remote-storage-path /mnt/acore-storage` - Override STORAGE_PATH on the remote host (local-storage stays per .env)
|
||||||
- `--remote-container-user 1001:1001` - Override CONTAINER_USER on the remote host (uid:gid)
|
- `--remote-container-user 1001:1001` - Override CONTAINER_USER on the remote host (uid:gid)
|
||||||
|
- Note: do not combine `--remote-clean-containers` with `--remote-preserve-containers`; the flags are mutually exclusive.
|
||||||
|
|
||||||
### Step 3: Deploy on Remote Host
|
### Step 3: Deploy on Remote Host
|
||||||
```bash
|
```bash
|
||||||
@@ -216,11 +220,12 @@ The remote deployment process transfers:
|
|||||||
```
|
```
|
||||||
- `setup.sh` automatically adds these presets to the module menu and enables the listed modules when selected or when `--module-config <name>` is provided.
|
- `setup.sh` automatically adds these presets to the module menu and enables the listed modules when selected or when `--module-config <name>` is provided.
|
||||||
- Built-in presets:
|
- Built-in presets:
|
||||||
- - `config/module-profiles/RealmMaster.json` – 33-module baseline used for testing.
|
- `config/module-profiles/RealmMaster.json` – 33-module baseline used for testing.
|
||||||
- - `config/module-profiles/suggested-modules.json` – default solo-friendly QoL stack.
|
- `config/module-profiles/suggested-modules.json` – light AzerothCore QoL stack without playerbots.
|
||||||
- - `config/module-profiles/playerbots-suggested-modules.json` – suggested stack plus playerbots.
|
- `config/module-profiles/playerbots-suggested-modules.json` – suggested QoL stack plus playerbots.
|
||||||
- - `config/module-profiles/playerbots-only.json` – playerbot-focused profile (adjust `--playerbot-max-bots`).
|
- `config/module-profiles/azerothcore-vanilla.json` – pure AzerothCore (no optional modules).
|
||||||
- - `config/module-profiles/all-modules.json` – enable everything currently marked supported/active.
|
- `config/module-profiles/playerbots-only.json` – playerbot prerequisites only (tune bot counts separately).
|
||||||
|
- `config/module-profiles/all-modules.json` – enable everything currently marked supported/active (not recommended).
|
||||||
- Module metadata lives in `config/module-manifest.json`; update that file if you need to add new modules or change repositories/branches.
|
- Module metadata lives in `config/module-manifest.json`; update that file if you need to add new modules or change repositories/branches.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -233,10 +233,13 @@ This will present a menu for selecting individual modules or choosing from prede
|
|||||||
|
|
||||||
Pre-configured module combinations are available in `config/module-profiles/`:
|
Pre-configured module combinations are available in `config/module-profiles/`:
|
||||||
|
|
||||||
- **Suggested Modules** - Baseline solo-friendly quality of life mix
|
- `RealmMaster` - 33-module baseline used for day-to-day testing
|
||||||
- **Playerbots Suggested** - Suggested stack plus playerbots
|
- `suggested-modules` - Light AzerothCore QoL stack without playerbots
|
||||||
- **Playerbots Only** - Playerbot-focused profile
|
- `playerbots-suggested-modules` - Suggested QoL stack plus playerbots
|
||||||
- **Custom Profiles** - Additional specialized configurations
|
- `azerothcore-vanilla` - Pure AzerothCore with no optional modules
|
||||||
|
- `playerbots-only` - Playerbot prerequisites only
|
||||||
|
- `all-modules` - Everything in the manifest (not recommended)
|
||||||
|
- Custom profiles - Drop new JSON files to add your own combinations
|
||||||
|
|
||||||
### Manual Configuration
|
### Manual Configuration
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ Interactive `.env` generator with module selection, server configuration, and de
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
./setup.sh # Interactive configuration
|
./setup.sh # Interactive configuration
|
||||||
./setup.sh --module-config RealmMaster # Use predefined module profile, check profiles directory
|
./setup.sh --module-config RealmMaster # Use predefined module profile (see config/module-profiles)
|
||||||
./setup.sh --playerbot-max-bots 3000 # Set playerbot limits
|
./setup.sh --playerbot-max-bots 3000 # Set playerbot limits
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -195,8 +195,10 @@ else
|
|||||||
# Step 3: Update realmlist table
|
# Step 3: Update realmlist table
|
||||||
echo ""
|
echo ""
|
||||||
echo "🌐 Step 3: Updating realmlist table..."
|
echo "🌐 Step 3: Updating realmlist table..."
|
||||||
|
echo " 🔧 Setting realm address to: ${SERVER_ADDRESS}:${REALM_PORT}"
|
||||||
mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" -e "
|
mysql -h "${MYSQL_HOST}" -u"${MYSQL_USER}" -p"${MYSQL_ROOT_PASSWORD}" --skip-ssl-verify "${DB_AUTH_NAME}" -e "
|
||||||
UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1;
|
UPDATE realmlist SET address='${SERVER_ADDRESS}', port=${REALM_PORT} WHERE id=1;
|
||||||
|
SELECT CONCAT(' ✓ Realm configured: ', name, ' at ', address, ':', port) AS status FROM realmlist WHERE id=1;
|
||||||
" || echo "⚠️ Could not update realmlist table"
|
" || echo "⚠️ Could not update realmlist table"
|
||||||
|
|
||||||
echo "✅ Realmlist updated"
|
echo "✅ Realmlist updated"
|
||||||
|
|||||||
@@ -7,6 +7,17 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|||||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
cd "$SCRIPT_DIR"
|
cd "$SCRIPT_DIR"
|
||||||
|
|
||||||
|
# Source common libraries for standardized functionality
|
||||||
|
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Source utility libraries
|
||||||
|
source "$SCRIPT_DIR/lib/mysql-utils.sh" 2>/dev/null || warn "MySQL utilities not available"
|
||||||
|
source "$SCRIPT_DIR/lib/docker-utils.sh" 2>/dev/null || warn "Docker utilities not available"
|
||||||
|
source "$SCRIPT_DIR/lib/env-utils.sh" 2>/dev/null || warn "Environment utilities not available"
|
||||||
|
|
||||||
# Load environment defaults if present
|
# Load environment defaults if present
|
||||||
if [ -f "$PROJECT_ROOT/.env" ]; then
|
if [ -f "$PROJECT_ROOT/.env" ]; then
|
||||||
set -a
|
set -a
|
||||||
@@ -63,7 +74,7 @@ Examples:
|
|||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
err(){ printf 'Error: %s\n' "$*" >&2; }
|
# Use standardized error function from lib/common.sh
|
||||||
die(){ err "$1"; exit 1; }
|
die(){ err "$1"; exit 1; }
|
||||||
|
|
||||||
normalize_token(){
|
normalize_token(){
|
||||||
@@ -104,10 +115,14 @@ remove_from_list(){
|
|||||||
arr=("${filtered[@]}")
|
arr=("${filtered[@]}")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Use env-utils.sh function if available, fallback to local implementation
|
||||||
resolve_relative(){
|
resolve_relative(){
|
||||||
local base="$1" path="$2"
|
if command -v path_resolve_absolute >/dev/null 2>&1; then
|
||||||
if command -v python3 >/dev/null 2>&1; then
|
path_resolve_absolute "$2" "$1"
|
||||||
python3 - "$base" "$path" <<'PY'
|
else
|
||||||
|
local base="$1" path="$2"
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
python3 - "$base" "$path" <<'PY'
|
||||||
import os, sys
|
import os, sys
|
||||||
base, path = sys.argv[1:3]
|
base, path = sys.argv[1:3]
|
||||||
if not path:
|
if not path:
|
||||||
@@ -117,8 +132,9 @@ elif os.path.isabs(path):
|
|||||||
else:
|
else:
|
||||||
print(os.path.normpath(os.path.join(base, path)))
|
print(os.path.normpath(os.path.join(base, path)))
|
||||||
PY
|
PY
|
||||||
else
|
else
|
||||||
die "python3 is required but was not found on PATH"
|
die "python3 is required but was not found on PATH"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,7 +264,13 @@ generated_at="$(date --iso-8601=seconds)"
|
|||||||
dump_db(){
|
dump_db(){
|
||||||
local schema="$1" outfile="$2"
|
local schema="$1" outfile="$2"
|
||||||
echo "Dumping ${schema} -> ${outfile}"
|
echo "Dumping ${schema} -> ${outfile}"
|
||||||
docker exec "$MYSQL_CONTAINER" mysqldump -uroot -p"$MYSQL_PW" "$schema" | gzip > "$outfile"
|
|
||||||
|
# Use mysql-utils.sh function if available, fallback to direct command
|
||||||
|
if command -v mysql_backup_database >/dev/null 2>&1; then
|
||||||
|
mysql_backup_database "$schema" "$outfile" "gzip" "$MYSQL_CONTAINER" "$MYSQL_PW"
|
||||||
|
else
|
||||||
|
docker exec "$MYSQL_CONTAINER" mysqldump -uroot -p"$MYSQL_PW" "$schema" | gzip > "$outfile"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
for db in "${ACTIVE_DBS[@]}"; do
|
for db in "${ACTIVE_DBS[@]}"; do
|
||||||
|
|||||||
@@ -6,15 +6,19 @@ INVOCATION_DIR="$PWD"
|
|||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
cd "$SCRIPT_DIR"
|
cd "$SCRIPT_DIR"
|
||||||
|
|
||||||
COLOR_RED='\033[0;31m'
|
# Source common libraries for standardized functionality
|
||||||
COLOR_GREEN='\033[0;32m'
|
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
|
||||||
COLOR_YELLOW='\033[1;33m'
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
|
||||||
COLOR_RESET='\033[0m'
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
|
# Source utility libraries
|
||||||
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
|
source "$SCRIPT_DIR/lib/mysql-utils.sh" 2>/dev/null || warn "MySQL utilities not available"
|
||||||
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
|
source "$SCRIPT_DIR/lib/docker-utils.sh" 2>/dev/null || warn "Docker utilities not available"
|
||||||
fatal(){ err "$*"; exit 1; }
|
source "$SCRIPT_DIR/lib/env-utils.sh" 2>/dev/null || warn "Environment utilities not available"
|
||||||
|
|
||||||
|
# Use log() for main output to maintain existing behavior
|
||||||
|
log() { ok "$*"; }
|
||||||
|
|
||||||
SUPPORTED_DBS=(auth characters world)
|
SUPPORTED_DBS=(auth characters world)
|
||||||
declare -A SUPPORTED_SET=()
|
declare -A SUPPORTED_SET=()
|
||||||
@@ -102,10 +106,14 @@ remove_from_list(){
|
|||||||
arr=("${filtered[@]}")
|
arr=("${filtered[@]}")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Use env-utils.sh function if available, fallback to local implementation
|
||||||
resolve_relative(){
|
resolve_relative(){
|
||||||
local base="$1" path="$2"
|
if command -v path_resolve_absolute >/dev/null 2>&1; then
|
||||||
if command -v python3 >/dev/null 2>&1; then
|
path_resolve_absolute "$2" "$1"
|
||||||
python3 - "$base" "$path" <<'PY'
|
else
|
||||||
|
local base="$1" path="$2"
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
python3 - "$base" "$path" <<'PY'
|
||||||
import os, sys
|
import os, sys
|
||||||
base, path = sys.argv[1:3]
|
base, path = sys.argv[1:3]
|
||||||
if not path:
|
if not path:
|
||||||
@@ -115,8 +123,9 @@ elif os.path.isabs(path):
|
|||||||
else:
|
else:
|
||||||
print(os.path.normpath(os.path.join(base, path)))
|
print(os.path.normpath(os.path.join(base, path)))
|
||||||
PY
|
PY
|
||||||
else
|
else
|
||||||
fatal "python3 is required but was not found on PATH"
|
fatal "python3 is required but was not found on PATH"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,7 +289,13 @@ backup_db(){
|
|||||||
local out="manual-backups/${label}-pre-import-$(timestamp).sql"
|
local out="manual-backups/${label}-pre-import-$(timestamp).sql"
|
||||||
mkdir -p manual-backups
|
mkdir -p manual-backups
|
||||||
log "Backing up current ${schema} to ${out}"
|
log "Backing up current ${schema} to ${out}"
|
||||||
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" > "$out"
|
|
||||||
|
# Use mysql-utils.sh function if available, fallback to direct command
|
||||||
|
if command -v mysql_backup_database >/dev/null 2>&1; then
|
||||||
|
mysql_backup_database "$schema" "$out" "none" "ac-mysql" "$MYSQL_PW"
|
||||||
|
else
|
||||||
|
docker exec ac-mysql mysqldump -uroot -p"$MYSQL_PW" "$schema" > "$out"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
restore(){
|
restore(){
|
||||||
@@ -302,7 +317,22 @@ db_selected(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
count_rows(){
|
count_rows(){
|
||||||
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$1"
|
# Use mysql-utils.sh function if available, fallback to direct command
|
||||||
|
if command -v docker_mysql_query >/dev/null 2>&1; then
|
||||||
|
# Extract database name from query for mysql-utils function
|
||||||
|
local query="$1"
|
||||||
|
local db_name
|
||||||
|
# Simple extraction - assumes "FROM database.table" or "database.table" pattern
|
||||||
|
if [[ "$query" =~ FROM[[:space:]]+([^.[:space:]]+)\. ]]; then
|
||||||
|
db_name="${BASH_REMATCH[1]}"
|
||||||
|
docker_mysql_query "$db_name" "$query" "ac-mysql" "$MYSQL_PW"
|
||||||
|
else
|
||||||
|
# Fallback to original method if can't parse database
|
||||||
|
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$query"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
docker exec ac-mysql mysql -uroot -p"$MYSQL_PW" -N -B -e "$1"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
case "${1:-}" in
|
case "${1:-}" in
|
||||||
|
|||||||
@@ -6,18 +6,14 @@ set -euo pipefail
|
|||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
cd "$SCRIPT_DIR"
|
cd "$SCRIPT_DIR"
|
||||||
|
|
||||||
COLOR_RED='\033[0;31m'
|
# Source common library for standardized logging
|
||||||
COLOR_GREEN='\033[0;32m'
|
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
|
||||||
COLOR_YELLOW='\033[1;33m'
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
|
||||||
COLOR_BLUE='\033[0;34m'
|
exit 1
|
||||||
COLOR_CYAN='\033[0;36m'
|
fi
|
||||||
COLOR_RESET='\033[0m'
|
|
||||||
|
|
||||||
log(){ printf '%b\n' "${COLOR_GREEN}$*${COLOR_RESET}"; }
|
# Use log() instead of info() for main output to maintain existing behavior
|
||||||
info(){ printf '%b\n' "${COLOR_CYAN}$*${COLOR_RESET}"; }
|
log() { ok "$*"; }
|
||||||
warn(){ printf '%b\n' "${COLOR_YELLOW}$*${COLOR_RESET}"; }
|
|
||||||
err(){ printf '%b\n' "${COLOR_RED}$*${COLOR_RESET}"; }
|
|
||||||
fatal(){ err "$*"; exit 1; }
|
|
||||||
|
|
||||||
MYSQL_PW=""
|
MYSQL_PW=""
|
||||||
BACKUP_DIR=""
|
BACKUP_DIR=""
|
||||||
|
|||||||
@@ -4,9 +4,31 @@
|
|||||||
# automatically rerun db-import-conditional to hydrate from backups.
|
# automatically rerun db-import-conditional to hydrate from backups.
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
log(){ echo "🛡️ [db-guard] $*"; }
|
# Source common library if available (container environment)
|
||||||
warn(){ echo "⚠️ [db-guard] $*" >&2; }
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
err(){ echo "❌ [db-guard] $*" >&2; }
|
if [ -f "$SCRIPT_DIR/../scripts/bash/lib/common.sh" ]; then
|
||||||
|
# Running from project root
|
||||||
|
source "$SCRIPT_DIR/../scripts/bash/lib/common.sh"
|
||||||
|
db_guard_log() { info "🛡️ [db-guard] $*"; }
|
||||||
|
db_guard_warn() { warn "[db-guard] $*"; }
|
||||||
|
db_guard_err() { err "[db-guard] $*"; }
|
||||||
|
elif [ -f "$SCRIPT_DIR/lib/common.sh" ]; then
|
||||||
|
# Running from scripts/bash directory
|
||||||
|
source "$SCRIPT_DIR/lib/common.sh"
|
||||||
|
db_guard_log() { info "🛡️ [db-guard] $*"; }
|
||||||
|
db_guard_warn() { warn "[db-guard] $*"; }
|
||||||
|
db_guard_err() { err "[db-guard] $*"; }
|
||||||
|
else
|
||||||
|
# Fallback for container environment where lib/common.sh may not be available
|
||||||
|
db_guard_log(){ echo "🛡️ [db-guard] $*"; }
|
||||||
|
db_guard_warn(){ echo "⚠️ [db-guard] $*" >&2; }
|
||||||
|
db_guard_err(){ echo "❌ [db-guard] $*" >&2; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Maintain compatibility with existing function names
|
||||||
|
log() { db_guard_log "$*"; }
|
||||||
|
warn() { db_guard_warn "$*"; }
|
||||||
|
err() { db_guard_err "$*"; }
|
||||||
|
|
||||||
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
|
MYSQL_HOST="${CONTAINER_MYSQL:-ac-mysql}"
|
||||||
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||||
|
|||||||
@@ -6,6 +6,13 @@ set -euo pipefail
|
|||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
|
||||||
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Source common library for standardized logging
|
||||||
|
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
|
DEFAULT_COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
|
||||||
ENV_FILE="$ROOT_DIR/.env"
|
ENV_FILE="$ROOT_DIR/.env"
|
||||||
TEMPLATE_FILE="$ROOT_DIR/.env.template"
|
TEMPLATE_FILE="$ROOT_DIR/.env.template"
|
||||||
@@ -16,17 +23,6 @@ DEFAULT_PROJECT_NAME="$(project_name::resolve "$ENV_FILE" "$TEMPLATE_FILE")"
|
|||||||
source "$ROOT_DIR/scripts/bash/compose_overrides.sh"
|
source "$ROOT_DIR/scripts/bash/compose_overrides.sh"
|
||||||
declare -a COMPOSE_FILE_ARGS=()
|
declare -a COMPOSE_FILE_ARGS=()
|
||||||
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
RED='\033[0;31m'
|
|
||||||
NC='\033[0m'
|
|
||||||
|
|
||||||
info(){ echo -e "${BLUE}ℹ️ $*${NC}"; }
|
|
||||||
ok(){ echo -e "${GREEN}✅ $*${NC}"; }
|
|
||||||
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
|
|
||||||
err(){ echo -e "${RED}❌ $*${NC}"; }
|
|
||||||
|
|
||||||
read_env(){
|
read_env(){
|
||||||
local key="$1" default="${2:-}" value=""
|
local key="$1" default="${2:-}" value=""
|
||||||
if [ -f "$ENV_FILE" ]; then
|
if [ -f "$ENV_FILE" ]; then
|
||||||
|
|||||||
@@ -50,9 +50,9 @@ log() {
|
|||||||
printf '%b\n' "${GREEN}$*${NC}"
|
printf '%b\n' "${GREEN}$*${NC}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Log warning messages (yellow with warning icon)
|
# Log warning messages (yellow with warning icon, to stderr for compatibility)
|
||||||
warn() {
|
warn() {
|
||||||
printf '%b\n' "${YELLOW}⚠️ $*${NC}"
|
printf '%b\n' "${YELLOW}⚠️ $*${NC}" >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
# Log error messages (red with error icon, continues execution)
|
# Log error messages (red with error icon, continues execution)
|
||||||
|
|||||||
530
scripts/bash/lib/docker-utils.sh
Normal file
530
scripts/bash/lib/docker-utils.sh
Normal file
@@ -0,0 +1,530 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Docker utility library for AzerothCore RealmMaster scripts
|
||||||
|
# This library provides standardized Docker operations, container management,
|
||||||
|
# and deployment functions.
|
||||||
|
#
|
||||||
|
# Usage: source /path/to/scripts/bash/lib/docker-utils.sh
|
||||||
|
#
|
||||||
|
|
||||||
|
# Prevent multiple sourcing
|
||||||
|
if [ -n "${_DOCKER_UTILS_LIB_LOADED:-}" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
_DOCKER_UTILS_LIB_LOADED=1
|
||||||
|
|
||||||
|
# Source common library for logging functions
|
||||||
|
DOCKER_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
if [ -f "$DOCKER_UTILS_DIR/common.sh" ]; then
|
||||||
|
source "$DOCKER_UTILS_DIR/common.sh"
|
||||||
|
elif command -v info >/dev/null 2>&1; then
|
||||||
|
# Common functions already available
|
||||||
|
:
|
||||||
|
else
|
||||||
|
# Fallback logging functions
|
||||||
|
info() { printf '\033[0;34mℹ️ %s\033[0m\n' "$*"; }
|
||||||
|
warn() { printf '\033[1;33m⚠️ %s\033[0m\n' "$*" >&2; }
|
||||||
|
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
|
||||||
|
fatal() { err "$*"; exit 1; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER CONTAINER MANAGEMENT
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Get container status
|
||||||
|
# Returns: running, exited, paused, restarting, removing, dead, created, or "not_found"
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# status=$(docker_get_container_status "ac-mysql")
|
||||||
|
# if [ "$status" = "running" ]; then
|
||||||
|
# echo "Container is running"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_get_container_status() {
|
||||||
|
local container_name="$1"
|
||||||
|
|
||||||
|
if ! docker ps -a --format "table {{.Names}}\t{{.Status}}" | grep -q "^$container_name"; then
|
||||||
|
echo "not_found"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker inspect --format='{{.State.Status}}' "$container_name" 2>/dev/null || echo "not_found"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if container is running
|
||||||
|
# Returns 0 if running, 1 if not running or not found
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if docker_is_container_running "ac-mysql"; then
|
||||||
|
# echo "MySQL container is running"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_is_container_running() {
|
||||||
|
local container_name="$1"
|
||||||
|
local status
|
||||||
|
|
||||||
|
status=$(docker_get_container_status "$container_name")
|
||||||
|
[ "$status" = "running" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait for container to reach desired state
|
||||||
|
# Returns 0 if container reaches state within timeout, 1 if timeout
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_wait_for_container_state "ac-mysql" "running" 30
|
||||||
|
# docker_wait_for_container_state "ac-mysql" "exited" 10
|
||||||
|
#
|
||||||
|
docker_wait_for_container_state() {
|
||||||
|
local container_name="$1"
|
||||||
|
local desired_state="$2"
|
||||||
|
local timeout="${3:-30}"
|
||||||
|
local check_interval="${4:-2}"
|
||||||
|
local elapsed=0
|
||||||
|
|
||||||
|
info "Waiting for container '$container_name' to reach state '$desired_state' (timeout: ${timeout}s)"
|
||||||
|
|
||||||
|
while [ $elapsed -lt $timeout ]; do
|
||||||
|
local current_state
|
||||||
|
current_state=$(docker_get_container_status "$container_name")
|
||||||
|
|
||||||
|
if [ "$current_state" = "$desired_state" ]; then
|
||||||
|
info "Container '$container_name' reached desired state: $desired_state"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep "$check_interval"
|
||||||
|
elapsed=$((elapsed + check_interval))
|
||||||
|
done
|
||||||
|
|
||||||
|
err "Container '$container_name' did not reach state '$desired_state' within ${timeout}s (current: $current_state)"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute command in container with retry logic
|
||||||
|
# Handles container availability and connection issues
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_exec_with_retry "ac-mysql" "mysql -uroot -ppassword -e 'SELECT 1'"
|
||||||
|
# echo "SELECT 1" | docker_exec_with_retry "ac-mysql" "mysql -uroot -ppassword"
|
||||||
|
#
|
||||||
|
docker_exec_with_retry() {
|
||||||
|
local container_name="$1"
|
||||||
|
local command="$2"
|
||||||
|
local max_attempts="${3:-3}"
|
||||||
|
local retry_delay="${4:-2}"
|
||||||
|
local interactive="${5:-false}"
|
||||||
|
|
||||||
|
if ! docker_is_container_running "$container_name"; then
|
||||||
|
err "Container '$container_name' is not running"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local attempt=1
|
||||||
|
while [ $attempt -le $max_attempts ]; do
|
||||||
|
if [ "$interactive" = "true" ]; then
|
||||||
|
if docker exec -i "$container_name" sh -c "$command"; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if docker exec "$container_name" sh -c "$command"; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $attempt -lt $max_attempts ]; then
|
||||||
|
warn "Docker exec failed in '$container_name' (attempt $attempt/$max_attempts), retrying in ${retry_delay}s..."
|
||||||
|
sleep "$retry_delay"
|
||||||
|
fi
|
||||||
|
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
err "Docker exec failed in '$container_name' after $max_attempts attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER COMPOSE PROJECT MANAGEMENT
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Get project name from environment or docker-compose.yml
|
||||||
|
# Returns the Docker Compose project name
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# project_name=$(docker_get_project_name)
|
||||||
|
# echo "Project: $project_name"
|
||||||
|
#
|
||||||
|
docker_get_project_name() {
|
||||||
|
# Check environment variable first
|
||||||
|
if [ -n "${COMPOSE_PROJECT_NAME:-}" ]; then
|
||||||
|
echo "$COMPOSE_PROJECT_NAME"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for docker-compose.yml name directive
|
||||||
|
if [ -f "docker-compose.yml" ] && command -v python3 >/dev/null 2>&1; then
|
||||||
|
local project_name
|
||||||
|
project_name=$(python3 -c "
|
||||||
|
import yaml
|
||||||
|
try:
|
||||||
|
with open('docker-compose.yml', 'r') as f:
|
||||||
|
data = yaml.safe_load(f)
|
||||||
|
print(data.get('name', ''))
|
||||||
|
except:
|
||||||
|
print('')
|
||||||
|
" 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -n "$project_name" ]; then
|
||||||
|
echo "$project_name"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Fallback to directory name
|
||||||
|
basename "$PWD" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]//g'
|
||||||
|
}
|
||||||
|
|
||||||
|
# List containers for current project
|
||||||
|
# Returns list of container names with optional filtering
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# containers=$(docker_list_project_containers)
|
||||||
|
# running_containers=$(docker_list_project_containers "running")
|
||||||
|
#
|
||||||
|
docker_list_project_containers() {
|
||||||
|
local status_filter="${1:-}"
|
||||||
|
local project_name
|
||||||
|
project_name=$(docker_get_project_name)
|
||||||
|
|
||||||
|
local filter_arg=""
|
||||||
|
if [ -n "$status_filter" ]; then
|
||||||
|
filter_arg="--filter status=$status_filter"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use project label to find containers
|
||||||
|
docker ps -a $filter_arg --filter "label=com.docker.compose.project=$project_name" --format "{{.Names}}" 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Stop project containers gracefully
|
||||||
|
# Stops containers with configurable timeout
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_stop_project_containers 30 # Stop with 30s timeout
|
||||||
|
# docker_stop_project_containers # Use default 10s timeout
|
||||||
|
#
|
||||||
|
docker_stop_project_containers() {
|
||||||
|
local timeout="${1:-10}"
|
||||||
|
local containers
|
||||||
|
|
||||||
|
containers=$(docker_list_project_containers "running")
|
||||||
|
if [ -z "$containers" ]; then
|
||||||
|
info "No running containers found for project"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Stopping project containers with ${timeout}s timeout: $containers"
|
||||||
|
echo "$containers" | xargs -r docker stop -t "$timeout"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start project containers
|
||||||
|
# Starts containers that are stopped but exist
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_start_project_containers
|
||||||
|
#
|
||||||
|
docker_start_project_containers() {
|
||||||
|
local containers
|
||||||
|
|
||||||
|
containers=$(docker_list_project_containers "exited")
|
||||||
|
if [ -z "$containers" ]; then
|
||||||
|
info "No stopped containers found for project"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Starting project containers: $containers"
|
||||||
|
echo "$containers" | xargs -r docker start
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER IMAGE MANAGEMENT
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Get image information for container
|
||||||
|
# Returns image name:tag for specified container
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# image=$(docker_get_container_image "ac-mysql")
|
||||||
|
# echo "MySQL container using image: $image"
|
||||||
|
#
|
||||||
|
docker_get_container_image() {
|
||||||
|
local container_name="$1"
|
||||||
|
|
||||||
|
if ! docker_is_container_running "$container_name"; then
|
||||||
|
# Try to get from stopped container
|
||||||
|
docker inspect --format='{{.Config.Image}}' "$container_name" 2>/dev/null || echo "unknown"
|
||||||
|
else
|
||||||
|
docker inspect --format='{{.Config.Image}}' "$container_name" 2>/dev/null || echo "unknown"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if image exists locally
|
||||||
|
# Returns 0 if image exists, 1 if not found
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if docker_image_exists "mysql:8.0"; then
|
||||||
|
# echo "MySQL image is available"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_image_exists() {
|
||||||
|
local image_name="$1"
|
||||||
|
|
||||||
|
docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^${image_name}$"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Pull image with retry logic
|
||||||
|
# Handles temporary network issues and registry problems
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_pull_image_with_retry "mysql:8.0"
|
||||||
|
# docker_pull_image_with_retry "azerothcore/ac-wotlk-worldserver:latest" 5 10
|
||||||
|
#
|
||||||
|
docker_pull_image_with_retry() {
|
||||||
|
local image_name="$1"
|
||||||
|
local max_attempts="${2:-3}"
|
||||||
|
local retry_delay="${3:-5}"
|
||||||
|
|
||||||
|
if docker_image_exists "$image_name"; then
|
||||||
|
info "Image '$image_name' already exists locally"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local attempt=1
|
||||||
|
while [ $attempt -le $max_attempts ]; do
|
||||||
|
info "Pulling image '$image_name' (attempt $attempt/$max_attempts)"
|
||||||
|
|
||||||
|
if docker pull "$image_name"; then
|
||||||
|
info "Successfully pulled image '$image_name'"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $attempt -lt $max_attempts ]; then
|
||||||
|
warn "Failed to pull image '$image_name', retrying in ${retry_delay}s..."
|
||||||
|
sleep "$retry_delay"
|
||||||
|
fi
|
||||||
|
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
err "Failed to pull image '$image_name' after $max_attempts attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER COMPOSE OPERATIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Validate docker-compose.yml configuration
|
||||||
|
# Returns 0 if valid, 1 if invalid or errors found
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if docker_compose_validate; then
|
||||||
|
# echo "Docker Compose configuration is valid"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_compose_validate() {
|
||||||
|
local compose_file="${1:-docker-compose.yml}"
|
||||||
|
|
||||||
|
if [ ! -f "$compose_file" ]; then
|
||||||
|
err "Docker Compose file not found: $compose_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if docker compose -f "$compose_file" config --quiet; then
|
||||||
|
info "Docker Compose configuration is valid"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
err "Docker Compose configuration validation failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get service status from docker-compose
|
||||||
|
# Returns service status or "not_found" if service doesn't exist
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# status=$(docker_compose_get_service_status "ac-mysql")
|
||||||
|
#
|
||||||
|
docker_compose_get_service_status() {
|
||||||
|
local service_name="$1"
|
||||||
|
local project_name
|
||||||
|
project_name=$(docker_get_project_name)
|
||||||
|
|
||||||
|
# Get container name for the service
|
||||||
|
local container_name="${project_name}-${service_name}-1"
|
||||||
|
|
||||||
|
docker_get_container_status "$container_name"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy with profile and options
|
||||||
|
# Wrapper around docker compose up with standardized options
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_compose_deploy "services-standard" "--detach"
|
||||||
|
# docker_compose_deploy "services-modules" "--no-deps ac-worldserver"
|
||||||
|
#
|
||||||
|
docker_compose_deploy() {
|
||||||
|
local profile="${1:-services-standard}"
|
||||||
|
local additional_options="${2:-}"
|
||||||
|
|
||||||
|
if ! docker_compose_validate; then
|
||||||
|
err "Cannot deploy: Docker Compose configuration is invalid"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Deploying with profile: $profile"
|
||||||
|
|
||||||
|
# Use exec to replace current shell for proper signal handling
|
||||||
|
if [ -n "$additional_options" ]; then
|
||||||
|
docker compose --profile "$profile" up $additional_options
|
||||||
|
else
|
||||||
|
docker compose --profile "$profile" up --detach
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER SYSTEM UTILITIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Check Docker daemon availability
|
||||||
|
# Returns 0 if Docker is available, 1 if not
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if docker_check_daemon; then
|
||||||
|
# echo "Docker daemon is available"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_check_daemon() {
|
||||||
|
if docker info >/dev/null 2>&1; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
err "Docker daemon is not available or accessible"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get Docker system information
|
||||||
|
# Returns formatted system info for debugging
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_print_system_info
|
||||||
|
#
|
||||||
|
docker_print_system_info() {
|
||||||
|
info "Docker System Information:"
|
||||||
|
|
||||||
|
if ! docker_check_daemon; then
|
||||||
|
err "Cannot retrieve Docker system information - daemon not available"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local docker_version compose_version
|
||||||
|
docker_version=$(docker --version 2>/dev/null | cut -d' ' -f3 | tr -d ',' || echo "unknown")
|
||||||
|
compose_version=$(docker compose version --short 2>/dev/null || echo "unknown")
|
||||||
|
|
||||||
|
info " Docker Version: $docker_version"
|
||||||
|
info " Compose Version: $compose_version"
|
||||||
|
info " Project Name: $(docker_get_project_name)"
|
||||||
|
|
||||||
|
local running_containers
|
||||||
|
running_containers=$(docker_list_project_containers "running" | wc -l)
|
||||||
|
info " Running Containers: $running_containers"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cleanup unused Docker resources
|
||||||
|
# Removes stopped containers, unused networks, and dangling images
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_cleanup_system true # Include unused volumes
|
||||||
|
# docker_cleanup_system false # Preserve volumes (default)
|
||||||
|
#
|
||||||
|
docker_cleanup_system() {
|
||||||
|
local include_volumes="${1:-false}"
|
||||||
|
|
||||||
|
info "Cleaning up Docker system resources..."
|
||||||
|
|
||||||
|
# Remove stopped containers
|
||||||
|
local stopped_containers
|
||||||
|
stopped_containers=$(docker ps -aq --filter "status=exited")
|
||||||
|
if [ -n "$stopped_containers" ]; then
|
||||||
|
info "Removing stopped containers"
|
||||||
|
echo "$stopped_containers" | xargs docker rm
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove unused networks
|
||||||
|
info "Removing unused networks"
|
||||||
|
docker network prune -f
|
||||||
|
|
||||||
|
# Remove dangling images
|
||||||
|
info "Removing dangling images"
|
||||||
|
docker image prune -f
|
||||||
|
|
||||||
|
# Remove unused volumes if requested
|
||||||
|
if [ "$include_volumes" = "true" ]; then
|
||||||
|
warn "Removing unused volumes (this may delete data!)"
|
||||||
|
docker volume prune -f
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Docker system cleanup completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CONTAINER HEALTH AND MONITORING
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Get container resource usage
|
||||||
|
# Returns CPU and memory usage statistics
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_get_container_stats "ac-mysql"
|
||||||
|
#
|
||||||
|
docker_get_container_stats() {
|
||||||
|
local container_name="$1"
|
||||||
|
|
||||||
|
if ! docker_is_container_running "$container_name"; then
|
||||||
|
err "Container '$container_name' is not running"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}" "$container_name"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check container logs for errors
|
||||||
|
# Searches recent logs for error patterns
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_check_container_errors "ac-mysql" 100
|
||||||
|
#
|
||||||
|
docker_check_container_errors() {
|
||||||
|
local container_name="$1"
|
||||||
|
local lines="${2:-50}"
|
||||||
|
|
||||||
|
if ! docker ps -a --format "{{.Names}}" | grep -q "^${container_name}$"; then
|
||||||
|
err "Container '$container_name' not found"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Checking last $lines log lines for errors in '$container_name'"
|
||||||
|
|
||||||
|
# Look for common error patterns
|
||||||
|
docker logs --tail "$lines" "$container_name" 2>&1 | grep -i "error\|exception\|fail\|fatal" || {
|
||||||
|
info "No obvious errors found in recent logs"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# INITIALIZATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Library loaded successfully
|
||||||
|
# Scripts can check for $_DOCKER_UTILS_LIB_LOADED to verify library is loaded
|
||||||
613
scripts/bash/lib/env-utils.sh
Normal file
613
scripts/bash/lib/env-utils.sh
Normal file
@@ -0,0 +1,613 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Environment and file utility library for AzerothCore RealmMaster scripts
|
||||||
|
# This library provides enhanced environment variable handling, file operations,
|
||||||
|
# and path management functions.
|
||||||
|
#
|
||||||
|
# Usage: source /path/to/scripts/bash/lib/env-utils.sh
|
||||||
|
#
|
||||||
|
|
||||||
|
# Prevent multiple sourcing
|
||||||
|
if [ -n "${_ENV_UTILS_LIB_LOADED:-}" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
_ENV_UTILS_LIB_LOADED=1
|
||||||
|
|
||||||
|
# Source common library for logging functions
|
||||||
|
ENV_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
if [ -f "$ENV_UTILS_DIR/common.sh" ]; then
|
||||||
|
source "$ENV_UTILS_DIR/common.sh"
|
||||||
|
elif command -v info >/dev/null 2>&1; then
|
||||||
|
# Common functions already available
|
||||||
|
:
|
||||||
|
else
|
||||||
|
# Fallback logging functions
|
||||||
|
info() { printf '\033[0;34mℹ️ %s\033[0m\n' "$*"; }
|
||||||
|
warn() { printf '\033[1;33m⚠️ %s\033[0m\n' "$*" >&2; }
|
||||||
|
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
|
||||||
|
fatal() { err "$*"; exit 1; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# ENVIRONMENT VARIABLE MANAGEMENT
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Enhanced read_env function with advanced features
|
||||||
|
# Supports multiple .env files, environment variable precedence, and validation
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# value=$(env_read_with_fallback "MYSQL_PASSWORD" "default_password")
|
||||||
|
# value=$(env_read_with_fallback "PORT" "" ".env.local" "validate_port")
|
||||||
|
#
|
||||||
|
env_read_with_fallback() {
|
||||||
|
local key="$1"
|
||||||
|
local default="${2:-}"
|
||||||
|
local env_file="${3:-${ENV_PATH:-${DEFAULT_ENV_PATH:-.env}}}"
|
||||||
|
local validator_func="${4:-}"
|
||||||
|
local value=""
|
||||||
|
|
||||||
|
# 1. Check if variable is already set in environment (highest precedence)
|
||||||
|
if [ -n "${!key:-}" ]; then
|
||||||
|
value="${!key}"
|
||||||
|
else
|
||||||
|
# 2. Read from .env file if it exists
|
||||||
|
if [ -f "$env_file" ]; then
|
||||||
|
# Extract value using grep and cut, handling various formats
|
||||||
|
value="$(grep -E "^${key}=" "$env_file" 2>/dev/null | tail -n1 | cut -d'=' -f2- | tr -d '\r')"
|
||||||
|
|
||||||
|
# Remove inline comments (everything after # that's not inside quotes)
|
||||||
|
value="$(echo "$value" | sed 's/[[:space:]]*#.*//' | sed 's/[[:space:]]*$//')"
|
||||||
|
|
||||||
|
# Strip quotes if present
|
||||||
|
if [[ "$value" == \"*\" && "$value" == *\" ]]; then
|
||||||
|
# Double quotes
|
||||||
|
value="${value:1:-1}"
|
||||||
|
elif [[ "$value" == \'*\' && "$value" == *\' ]]; then
|
||||||
|
# Single quotes
|
||||||
|
value="${value:1:-1}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 3. Use default if still empty
|
||||||
|
if [ -z "${value:-}" ]; then
|
||||||
|
value="$default"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 4. Validate if validator function provided
|
||||||
|
if [ -n "$validator_func" ] && command -v "$validator_func" >/dev/null 2>&1; then
|
||||||
|
if ! "$validator_func" "$value"; then
|
||||||
|
err "Validation failed for $key: $value"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s\n' "${value}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Read environment variable with type conversion
|
||||||
|
# Supports string, int, bool, and path types
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# port=$(env_read_typed "MYSQL_PORT" "int" "3306")
|
||||||
|
# debug=$(env_read_typed "DEBUG" "bool" "false")
|
||||||
|
# path=$(env_read_typed "DATA_PATH" "path" "/data")
|
||||||
|
#
|
||||||
|
env_read_typed() {
|
||||||
|
local key="$1"
|
||||||
|
local type="$2"
|
||||||
|
local default="${3:-}"
|
||||||
|
local value
|
||||||
|
|
||||||
|
value=$(env_read_with_fallback "$key" "$default")
|
||||||
|
|
||||||
|
case "$type" in
|
||||||
|
int|integer)
|
||||||
|
if ! [[ "$value" =~ ^[0-9]+$ ]]; then
|
||||||
|
err "Environment variable $key must be an integer: $value"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo "$value"
|
||||||
|
;;
|
||||||
|
bool|boolean)
|
||||||
|
case "${value,,}" in
|
||||||
|
true|yes|1|on|enabled) echo "true" ;;
|
||||||
|
false|no|0|off|disabled) echo "false" ;;
|
||||||
|
*) err "Environment variable $key must be boolean: $value"; return 1 ;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
path)
|
||||||
|
# Expand relative paths to absolute
|
||||||
|
if [ -n "$value" ]; then
|
||||||
|
path_resolve_absolute "$value"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
string|*)
|
||||||
|
echo "$value"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update or add environment variable in .env file with backup
|
||||||
|
# Creates backup and maintains file integrity
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# env_update_value "MYSQL_PASSWORD" "new_password"
|
||||||
|
# env_update_value "DEBUG" "true" ".env.local"
|
||||||
|
# env_update_value "PORT" "8080" ".env" "true" # create backup
|
||||||
|
#
|
||||||
|
env_update_value() {
|
||||||
|
local key="$1"
|
||||||
|
local value="$2"
|
||||||
|
local env_file="${3:-${ENV_PATH:-${DEFAULT_ENV_PATH:-.env}}}"
|
||||||
|
local create_backup="${4:-false}"
|
||||||
|
|
||||||
|
[ -n "$env_file" ] || return 0
|
||||||
|
|
||||||
|
# Create backup if requested
|
||||||
|
if [ "$create_backup" = "true" ] && [ -f "$env_file" ]; then
|
||||||
|
file_create_backup "$env_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create file if it doesn't exist
|
||||||
|
if [ ! -f "$env_file" ]; then
|
||||||
|
file_ensure_writable_dir "$(dirname "$env_file")"
|
||||||
|
printf '%s=%s\n' "$key" "$value" >> "$env_file"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update existing or append new
|
||||||
|
if grep -q "^${key}=" "$env_file"; then
|
||||||
|
# Use platform-appropriate sed in-place editing
|
||||||
|
local sed_opts=""
|
||||||
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
sed_opts="-i ''"
|
||||||
|
else
|
||||||
|
sed_opts="-i"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use a temporary file for safer editing
|
||||||
|
local temp_file="${env_file}.tmp.$$"
|
||||||
|
sed "s|^${key}=.*|${key}=${value}|" "$env_file" > "$temp_file" && mv "$temp_file" "$env_file"
|
||||||
|
else
|
||||||
|
printf '\n%s=%s\n' "$key" "$value" >> "$env_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Updated $key in $env_file"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load multiple environment files with precedence
|
||||||
|
# Later files override earlier ones
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# env_load_multiple ".env" ".env.local" ".env.production"
|
||||||
|
#
|
||||||
|
env_load_multiple() {
|
||||||
|
local files=("$@")
|
||||||
|
local loaded_count=0
|
||||||
|
|
||||||
|
for env_file in "${files[@]}"; do
|
||||||
|
if [ -f "$env_file" ]; then
|
||||||
|
info "Loading environment from: $env_file"
|
||||||
|
set -a
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
source "$env_file"
|
||||||
|
set +a
|
||||||
|
loaded_count=$((loaded_count + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $loaded_count -eq 0 ]; then
|
||||||
|
warn "No environment files found: ${files[*]}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Loaded $loaded_count environment file(s)"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PATH AND FILE UTILITIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Resolve path to absolute form with proper error handling
|
||||||
|
# Handles both existing and non-existing paths
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# abs_path=$(path_resolve_absolute "./relative/path")
|
||||||
|
# abs_path=$(path_resolve_absolute "/already/absolute")
|
||||||
|
#
|
||||||
|
path_resolve_absolute() {
|
||||||
|
local path="$1"
|
||||||
|
local base_dir="${2:-$PWD}"
|
||||||
|
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
python3 - "$base_dir" "$path" <<'PY'
|
||||||
|
import os, sys
|
||||||
|
base, path = sys.argv[1:3]
|
||||||
|
if not path:
|
||||||
|
print(os.path.abspath(base))
|
||||||
|
elif os.path.isabs(path):
|
||||||
|
print(os.path.normpath(path))
|
||||||
|
else:
|
||||||
|
print(os.path.normpath(os.path.join(base, path)))
|
||||||
|
PY
|
||||||
|
elif command -v realpath >/dev/null 2>&1; then
|
||||||
|
if [ "${path:0:1}" = "/" ]; then
|
||||||
|
echo "$path"
|
||||||
|
else
|
||||||
|
realpath -m "$base_dir/$path"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Fallback manual resolution
|
||||||
|
if [ "${path:0:1}" = "/" ]; then
|
||||||
|
echo "$path"
|
||||||
|
else
|
||||||
|
echo "$base_dir/$path"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ensure directory exists and is writable with proper permissions
|
||||||
|
# Creates parent directories if needed
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# file_ensure_writable_dir "/path/to/directory"
|
||||||
|
# file_ensure_writable_dir "/path/to/directory" "0755"
|
||||||
|
#
|
||||||
|
file_ensure_writable_dir() {
|
||||||
|
local dir="$1"
|
||||||
|
local permissions="${2:-0755}"
|
||||||
|
|
||||||
|
if [ ! -d "$dir" ]; then
|
||||||
|
if mkdir -p "$dir" 2>/dev/null; then
|
||||||
|
info "Created directory: $dir"
|
||||||
|
chmod "$permissions" "$dir" 2>/dev/null || warn "Could not set permissions on $dir"
|
||||||
|
else
|
||||||
|
err "Failed to create directory: $dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -w "$dir" ]; then
|
||||||
|
if chmod u+w "$dir" 2>/dev/null; then
|
||||||
|
info "Made directory writable: $dir"
|
||||||
|
else
|
||||||
|
err "Directory not writable and cannot fix permissions: $dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create timestamped backup of file
|
||||||
|
# Supports custom backup directory and compression
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# file_create_backup "/path/to/important.conf"
|
||||||
|
# file_create_backup "/path/to/file" "/backup/dir" "gzip"
|
||||||
|
#
|
||||||
|
file_create_backup() {
|
||||||
|
local file="$1"
|
||||||
|
local backup_dir="${2:-$(dirname "$file")}"
|
||||||
|
local compression="${3:-none}"
|
||||||
|
|
||||||
|
if [ ! -f "$file" ]; then
|
||||||
|
warn "File does not exist, skipping backup: $file"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
file_ensure_writable_dir "$backup_dir"
|
||||||
|
|
||||||
|
local filename basename backup_file
|
||||||
|
filename=$(basename "$file")
|
||||||
|
basename="${filename%.*}"
|
||||||
|
local extension="${filename##*.}"
|
||||||
|
|
||||||
|
# Create backup filename with timestamp
|
||||||
|
if [ "$filename" = "$basename" ]; then
|
||||||
|
# No extension
|
||||||
|
backup_file="${backup_dir}/${filename}.backup.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
else
|
||||||
|
# Has extension
|
||||||
|
backup_file="${backup_dir}/${basename}.backup.$(date +%Y%m%d_%H%M%S).${extension}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$compression" in
|
||||||
|
gzip|gz)
|
||||||
|
if gzip -c "$file" > "${backup_file}.gz"; then
|
||||||
|
info "Created compressed backup: ${backup_file}.gz"
|
||||||
|
else
|
||||||
|
err "Failed to create compressed backup: ${backup_file}.gz"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
none|*)
|
||||||
|
if cp "$file" "$backup_file"; then
|
||||||
|
info "Created backup: $backup_file"
|
||||||
|
else
|
||||||
|
err "Failed to create backup: $backup_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set file permissions safely with validation
|
||||||
|
# Handles both numeric and symbolic modes
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# file_set_permissions "/path/to/file" "0644"
|
||||||
|
# file_set_permissions "/path/to/script" "u+x"
|
||||||
|
#
|
||||||
|
file_set_permissions() {
|
||||||
|
local file="$1"
|
||||||
|
local permissions="$2"
|
||||||
|
local recursive="${3:-false}"
|
||||||
|
|
||||||
|
if [ ! -e "$file" ]; then
|
||||||
|
err "File or directory does not exist: $file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local chmod_opts=""
|
||||||
|
if [ "$recursive" = "true" ] && [ -d "$file" ]; then
|
||||||
|
chmod_opts="-R"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if chmod $chmod_opts "$permissions" "$file" 2>/dev/null; then
|
||||||
|
info "Set permissions $permissions on $file"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
err "Failed to set permissions $permissions on $file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CONFIGURATION FILE UTILITIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Read value from template file with variable expansion support
|
||||||
|
# Enhanced version supporting more template formats
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# value=$(config_read_template_value "MYSQL_PASSWORD" ".env.template")
|
||||||
|
# value=$(config_read_template_value "PORT" "config.template.yml" "yaml")
|
||||||
|
#
|
||||||
|
config_read_template_value() {
|
||||||
|
local key="$1"
|
||||||
|
local template_file="${2:-${TEMPLATE_FILE:-${TEMPLATE_PATH:-.env.template}}}"
|
||||||
|
local format="${3:-env}"
|
||||||
|
|
||||||
|
if [ ! -f "$template_file" ]; then
|
||||||
|
err "Template file not found: $template_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$format" in
|
||||||
|
env)
|
||||||
|
local raw_line value
|
||||||
|
raw_line=$(grep "^${key}=" "$template_file" 2>/dev/null | head -1)
|
||||||
|
|
||||||
|
if [ -z "$raw_line" ]; then
|
||||||
|
err "Key '$key' not found in template: $template_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
value="${raw_line#*=}"
|
||||||
|
value=$(echo "$value" | sed 's/^"\(.*\)"$/\1/')
|
||||||
|
|
||||||
|
# Handle ${VAR:-default} syntax by extracting the default value
|
||||||
|
if [[ "$value" =~ ^\$\{[^}]*:-([^}]*)\}$ ]]; then
|
||||||
|
value="${BASH_REMATCH[1]}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$value"
|
||||||
|
;;
|
||||||
|
yaml|yml)
|
||||||
|
if command -v python3 >/dev/null 2>&1; then
|
||||||
|
python3 -c "
|
||||||
|
import yaml, sys
|
||||||
|
try:
|
||||||
|
with open('$template_file', 'r') as f:
|
||||||
|
data = yaml.safe_load(f)
|
||||||
|
# Simple key lookup - can be enhanced for nested keys
|
||||||
|
print(data.get('$key', ''))
|
||||||
|
except:
|
||||||
|
sys.exit(1)
|
||||||
|
" 2>/dev/null
|
||||||
|
else
|
||||||
|
err "python3 required for YAML template parsing"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
err "Unsupported template format: $format"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate configuration against schema
|
||||||
|
# Supports basic validation rules
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# config_validate_env ".env" "required:MYSQL_PASSWORD,PORT;optional:DEBUG"
|
||||||
|
#
|
||||||
|
config_validate_env() {
|
||||||
|
local env_file="$1"
|
||||||
|
local rules="${2:-}"
|
||||||
|
|
||||||
|
if [ ! -f "$env_file" ]; then
|
||||||
|
err "Environment file not found: $env_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$rules" ]; then
|
||||||
|
info "No validation rules specified"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local validation_failed=false
|
||||||
|
|
||||||
|
# Parse validation rules
|
||||||
|
IFS=';' read -ra rule_sets <<< "$rules"
|
||||||
|
for rule_set in "${rule_sets[@]}"; do
|
||||||
|
IFS=':' read -ra rule_parts <<< "$rule_set"
|
||||||
|
local rule_type="${rule_parts[0]}"
|
||||||
|
local variables="${rule_parts[1]}"
|
||||||
|
|
||||||
|
case "$rule_type" in
|
||||||
|
required)
|
||||||
|
IFS=',' read -ra req_vars <<< "$variables"
|
||||||
|
for var in "${req_vars[@]}"; do
|
||||||
|
if ! grep -q "^${var}=" "$env_file" || [ -z "$(env_read_with_fallback "$var" "" "$env_file")" ]; then
|
||||||
|
err "Required environment variable missing or empty: $var"
|
||||||
|
validation_failed=true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
optional)
|
||||||
|
# Optional variables - just log if missing
|
||||||
|
IFS=',' read -ra opt_vars <<< "$variables"
|
||||||
|
for var in "${opt_vars[@]}"; do
|
||||||
|
if ! grep -q "^${var}=" "$env_file"; then
|
||||||
|
info "Optional environment variable not set: $var"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$validation_failed" = "true" ]; then
|
||||||
|
err "Environment validation failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Environment validation passed"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SYSTEM UTILITIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Detect operating system and distribution
|
||||||
|
# Returns standardized OS identifier
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# os=$(system_detect_os)
|
||||||
|
# if [ "$os" = "ubuntu" ]; then
|
||||||
|
# echo "Running on Ubuntu"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
system_detect_os() {
|
||||||
|
local os="unknown"
|
||||||
|
|
||||||
|
if [ -f /etc/os-release ]; then
|
||||||
|
# Source os-release for distribution info
|
||||||
|
local id
|
||||||
|
id=$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d '"')
|
||||||
|
case "$id" in
|
||||||
|
ubuntu|debian|centos|rhel|fedora|alpine|arch)
|
||||||
|
os="$id"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
os="linux"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
elif [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
os="macos"
|
||||||
|
elif [[ "$OSTYPE" == "cygwin" || "$OSTYPE" == "msys" ]]; then
|
||||||
|
os="windows"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$os"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check system requirements
|
||||||
|
# Validates required commands and versions
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# system_check_requirements "docker:20.0,python3:3.6"
|
||||||
|
#
|
||||||
|
system_check_requirements() {
|
||||||
|
local requirements="${1:-}"
|
||||||
|
|
||||||
|
if [ -z "$requirements" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local check_failed=false
|
||||||
|
|
||||||
|
IFS=',' read -ra req_list <<< "$requirements"
|
||||||
|
for requirement in "${req_list[@]}"; do
|
||||||
|
IFS=':' read -ra req_parts <<< "$requirement"
|
||||||
|
local command="${req_parts[0]}"
|
||||||
|
local min_version="${req_parts[1]:-}"
|
||||||
|
|
||||||
|
if ! command -v "$command" >/dev/null 2>&1; then
|
||||||
|
err "Required command not found: $command"
|
||||||
|
check_failed=true
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$min_version" ]; then
|
||||||
|
# Basic version checking - can be enhanced
|
||||||
|
info "Found $command (version checking not fully implemented)"
|
||||||
|
else
|
||||||
|
info "Found required command: $command"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$check_failed" = "true" ]; then
|
||||||
|
err "System requirements check failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "System requirements check passed"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# INITIALIZATION AND VALIDATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Validate environment utility configuration
|
||||||
|
# Checks that utilities are working correctly
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# env_utils_validate
|
||||||
|
#
|
||||||
|
env_utils_validate() {
|
||||||
|
info "Validating environment utilities..."
|
||||||
|
|
||||||
|
# Test path resolution
|
||||||
|
local test_path
|
||||||
|
test_path=$(path_resolve_absolute "." 2>/dev/null)
|
||||||
|
if [ -z "$test_path" ]; then
|
||||||
|
err "Path resolution utility not working"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test directory operations
|
||||||
|
if ! file_ensure_writable_dir "/tmp/env-utils-test.$$"; then
|
||||||
|
err "Directory utility not working"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
rmdir "/tmp/env-utils-test.$$" 2>/dev/null || true
|
||||||
|
|
||||||
|
info "Environment utilities validation successful"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# INITIALIZATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Library loaded successfully
|
||||||
|
# Scripts can check for $_ENV_UTILS_LIB_LOADED to verify library is loaded
|
||||||
376
scripts/bash/lib/mysql-utils.sh
Normal file
376
scripts/bash/lib/mysql-utils.sh
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# MySQL utility library for AzerothCore RealmMaster scripts
|
||||||
|
# This library provides standardized MySQL operations, connection management,
|
||||||
|
# and database interaction functions.
|
||||||
|
#
|
||||||
|
# Usage: source /path/to/scripts/bash/lib/mysql-utils.sh
|
||||||
|
#
|
||||||
|
|
||||||
|
# Prevent multiple sourcing
|
||||||
|
if [ -n "${_MYSQL_UTILS_LIB_LOADED:-}" ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
_MYSQL_UTILS_LIB_LOADED=1
|
||||||
|
|
||||||
|
# Source common library for logging functions
|
||||||
|
MYSQL_UTILS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
if [ -f "$MYSQL_UTILS_DIR/common.sh" ]; then
|
||||||
|
source "$MYSQL_UTILS_DIR/common.sh"
|
||||||
|
elif command -v info >/dev/null 2>&1; then
|
||||||
|
# Common functions already available
|
||||||
|
:
|
||||||
|
else
|
||||||
|
# Fallback logging functions
|
||||||
|
info() { printf '\033[0;34mℹ️ %s\033[0m\n' "$*"; }
|
||||||
|
warn() { printf '\033[1;33m⚠️ %s\033[0m\n' "$*" >&2; }
|
||||||
|
err() { printf '\033[0;31m❌ %s\033[0m\n' "$*" >&2; }
|
||||||
|
fatal() { err "$*"; exit 1; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# MYSQL CONNECTION CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Default MySQL configuration - can be overridden by environment
|
||||||
|
MYSQL_HOST="${MYSQL_HOST:-${CONTAINER_MYSQL:-ac-mysql}}"
|
||||||
|
MYSQL_PORT="${MYSQL_PORT:-3306}"
|
||||||
|
MYSQL_USER="${MYSQL_USER:-root}"
|
||||||
|
MYSQL_ROOT_PASSWORD="${MYSQL_ROOT_PASSWORD:-${MYSQL_PW:-azerothcore}}"
|
||||||
|
MYSQL_CONTAINER="${MYSQL_CONTAINER:-ac-mysql}"
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# MYSQL CONNECTION FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Test MySQL connection with current configuration
|
||||||
|
# Returns 0 if connection successful, 1 if failed
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if mysql_test_connection; then
|
||||||
|
# echo "MySQL is available"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
mysql_test_connection() {
|
||||||
|
local host="${1:-$MYSQL_HOST}"
|
||||||
|
local port="${2:-$MYSQL_PORT}"
|
||||||
|
local user="${3:-$MYSQL_USER}"
|
||||||
|
local password="${4:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
MYSQL_PWD="$password" mysql -h "$host" -P "$port" -u "$user" -e "SELECT 1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait for MySQL to be ready with timeout
|
||||||
|
# Returns 0 if MySQL becomes available within timeout, 1 if timeout reached
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_wait_for_connection 60 # Wait up to 60 seconds
|
||||||
|
# mysql_wait_for_connection # Use default 30 second timeout
|
||||||
|
#
|
||||||
|
mysql_wait_for_connection() {
|
||||||
|
local timeout="${1:-30}"
|
||||||
|
local retry_interval="${2:-2}"
|
||||||
|
local elapsed=0
|
||||||
|
|
||||||
|
info "Waiting for MySQL connection (${MYSQL_HOST}:${MYSQL_PORT}) with ${timeout}s timeout..."
|
||||||
|
|
||||||
|
while [ $elapsed -lt $timeout ]; do
|
||||||
|
if mysql_test_connection; then
|
||||||
|
info "MySQL connection established"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
sleep "$retry_interval"
|
||||||
|
elapsed=$((elapsed + retry_interval))
|
||||||
|
done
|
||||||
|
|
||||||
|
err "MySQL connection failed after ${timeout}s timeout"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute MySQL command with retry logic
|
||||||
|
# Handles both direct queries and piped input
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_exec_with_retry "database_name" "SELECT COUNT(*) FROM table;"
|
||||||
|
# echo "SELECT 1;" | mysql_exec_with_retry "database_name"
|
||||||
|
# mysql_exec_with_retry "database_name" < script.sql
|
||||||
|
#
|
||||||
|
mysql_exec_with_retry() {
|
||||||
|
local database="$1"
|
||||||
|
local query="${2:-}"
|
||||||
|
local max_attempts="${3:-3}"
|
||||||
|
local retry_delay="${4:-2}"
|
||||||
|
|
||||||
|
local attempt=1
|
||||||
|
while [ $attempt -le $max_attempts ]; do
|
||||||
|
if [ -n "$query" ]; then
|
||||||
|
# Direct query execution
|
||||||
|
if MYSQL_PWD="$MYSQL_ROOT_PASSWORD" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" "$database" -e "$query"; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Input from pipe/stdin
|
||||||
|
if MYSQL_PWD="$MYSQL_ROOT_PASSWORD" mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" "$database"; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $attempt -lt $max_attempts ]; then
|
||||||
|
warn "MySQL query failed (attempt $attempt/$max_attempts), retrying in ${retry_delay}s..."
|
||||||
|
sleep "$retry_delay"
|
||||||
|
fi
|
||||||
|
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
err "MySQL query failed after $max_attempts attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute MySQL query and return result (no table headers)
|
||||||
|
# Optimized for single values and parsing
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# count=$(mysql_query "acore_characters" "SELECT COUNT(*) FROM characters")
|
||||||
|
# tables=$(mysql_query "information_schema" "SHOW TABLES")
|
||||||
|
#
|
||||||
|
mysql_query() {
|
||||||
|
local database="$1"
|
||||||
|
local query="$2"
|
||||||
|
local host="${3:-$MYSQL_HOST}"
|
||||||
|
local port="${4:-$MYSQL_PORT}"
|
||||||
|
local user="${5:-$MYSQL_USER}"
|
||||||
|
local password="${6:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
MYSQL_PWD="$password" mysql -h "$host" -P "$port" -u "$user" -N -B "$database" -e "$query" 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER MYSQL FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Execute MySQL command inside Docker container
|
||||||
|
# Wrapper around docker exec with standardized MySQL connection
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# docker_mysql_exec "acore_auth" "SELECT COUNT(*) FROM account;"
|
||||||
|
# echo "SELECT 1;" | docker_mysql_exec "acore_auth"
|
||||||
|
#
|
||||||
|
docker_mysql_exec() {
|
||||||
|
local database="$1"
|
||||||
|
local query="${2:-}"
|
||||||
|
local container="${3:-$MYSQL_CONTAINER}"
|
||||||
|
local password="${4:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
if [ -n "$query" ]; then
|
||||||
|
docker exec "$container" mysql -uroot -p"$password" "$database" -e "$query"
|
||||||
|
else
|
||||||
|
docker exec -i "$container" mysql -uroot -p"$password" "$database"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute MySQL query in Docker container (no table headers)
|
||||||
|
# Optimized for single values and parsing
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# count=$(docker_mysql_query "acore_characters" "SELECT COUNT(*) FROM characters")
|
||||||
|
#
|
||||||
|
docker_mysql_query() {
|
||||||
|
local database="$1"
|
||||||
|
local query="$2"
|
||||||
|
local container="${3:-$MYSQL_CONTAINER}"
|
||||||
|
local password="${4:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
docker exec "$container" mysql -uroot -p"$password" -N -B "$database" -e "$query" 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if MySQL container is healthy and accepting connections
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if docker_mysql_is_ready; then
|
||||||
|
# echo "MySQL container is ready"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
docker_mysql_is_ready() {
|
||||||
|
local container="${1:-$MYSQL_CONTAINER}"
|
||||||
|
local password="${2:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
docker exec "$container" mysqladmin ping -uroot -p"$password" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DATABASE UTILITY FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Check if database exists
|
||||||
|
# Returns 0 if database exists, 1 if not found
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# if mysql_database_exists "acore_world"; then
|
||||||
|
# echo "World database found"
|
||||||
|
# fi
|
||||||
|
#
|
||||||
|
mysql_database_exists() {
|
||||||
|
local database_name="$1"
|
||||||
|
local result
|
||||||
|
|
||||||
|
result=$(mysql_query "information_schema" "SELECT COUNT(*) FROM SCHEMATA WHERE SCHEMA_NAME='$database_name'" 2>/dev/null || echo "0")
|
||||||
|
[ "$result" -gt 0 ] 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get table count for database(s)
|
||||||
|
# Supports both single database and multiple database patterns
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# count=$(mysql_get_table_count "acore_world")
|
||||||
|
# count=$(mysql_get_table_count "acore_auth,acore_characters")
|
||||||
|
#
|
||||||
|
mysql_get_table_count() {
|
||||||
|
local databases="$1"
|
||||||
|
local schema_list
|
||||||
|
|
||||||
|
# Convert comma-separated list to SQL IN clause format
|
||||||
|
schema_list=$(echo "$databases" | sed "s/,/','/g" | sed "s/^/'/" | sed "s/$/'/")
|
||||||
|
|
||||||
|
mysql_query "information_schema" "SELECT COUNT(*) FROM tables WHERE table_schema IN ($schema_list)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get database connection string for applications
|
||||||
|
# Returns connection string in format: host;port;user;password;database
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# conn_str=$(mysql_get_connection_string "acore_auth")
|
||||||
|
#
|
||||||
|
mysql_get_connection_string() {
|
||||||
|
local database="$1"
|
||||||
|
local host="${2:-$MYSQL_HOST}"
|
||||||
|
local port="${3:-$MYSQL_PORT}"
|
||||||
|
local user="${4:-$MYSQL_USER}"
|
||||||
|
local password="${5:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
printf '%s;%s;%s;%s;%s\n' "$host" "$port" "$user" "$password" "$database"
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# BACKUP AND RESTORE UTILITIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Create database backup using mysqldump
|
||||||
|
# Supports both compressed and uncompressed output
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_backup_database "acore_characters" "/path/to/backup.sql"
|
||||||
|
# mysql_backup_database "acore_world" "/path/to/backup.sql.gz" "gzip"
|
||||||
|
#
|
||||||
|
mysql_backup_database() {
|
||||||
|
local database="$1"
|
||||||
|
local output_file="$2"
|
||||||
|
local compression="${3:-none}"
|
||||||
|
local container="${4:-$MYSQL_CONTAINER}"
|
||||||
|
local password="${5:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
info "Creating backup of $database -> $output_file"
|
||||||
|
|
||||||
|
case "$compression" in
|
||||||
|
gzip|gz)
|
||||||
|
docker exec "$container" mysqldump -uroot -p"$password" "$database" | gzip > "$output_file"
|
||||||
|
;;
|
||||||
|
none|*)
|
||||||
|
docker exec "$container" mysqldump -uroot -p"$password" "$database" > "$output_file"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Restore database from backup file
|
||||||
|
# Handles both compressed and uncompressed files automatically
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_restore_database "acore_characters" "/path/to/backup.sql"
|
||||||
|
# mysql_restore_database "acore_world" "/path/to/backup.sql.gz"
|
||||||
|
#
|
||||||
|
mysql_restore_database() {
|
||||||
|
local database="$1"
|
||||||
|
local backup_file="$2"
|
||||||
|
local container="${3:-$MYSQL_CONTAINER}"
|
||||||
|
local password="${4:-$MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
if [ ! -f "$backup_file" ]; then
|
||||||
|
err "Backup file not found: $backup_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "Restoring $database from $backup_file"
|
||||||
|
|
||||||
|
case "$backup_file" in
|
||||||
|
*.gz)
|
||||||
|
gzip -dc "$backup_file" | docker exec -i "$container" mysql -uroot -p"$password" "$database"
|
||||||
|
;;
|
||||||
|
*.sql)
|
||||||
|
docker exec -i "$container" mysql -uroot -p"$password" "$database" < "$backup_file"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
warn "Unknown backup file format, treating as uncompressed SQL"
|
||||||
|
docker exec -i "$container" mysql -uroot -p"$password" "$database" < "$backup_file"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# VALIDATION AND DIAGNOSTICS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Validate MySQL configuration and connectivity
|
||||||
|
# Comprehensive health check for MySQL setup
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_validate_configuration
|
||||||
|
#
|
||||||
|
mysql_validate_configuration() {
|
||||||
|
info "Validating MySQL configuration..."
|
||||||
|
|
||||||
|
# Check required environment variables
|
||||||
|
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
err "MYSQL_ROOT_PASSWORD is not set"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test basic connectivity
|
||||||
|
if ! mysql_test_connection; then
|
||||||
|
err "Cannot connect to MySQL at ${MYSQL_HOST}:${MYSQL_PORT}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Docker container if using container setup
|
||||||
|
if docker ps --format "table {{.Names}}" | grep -q "$MYSQL_CONTAINER"; then
|
||||||
|
if ! docker_mysql_is_ready; then
|
||||||
|
err "MySQL container $MYSQL_CONTAINER is not ready"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
info "MySQL container $MYSQL_CONTAINER is healthy"
|
||||||
|
fi
|
||||||
|
|
||||||
|
info "MySQL configuration validation successful"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print MySQL configuration summary
|
||||||
|
# Useful for debugging and verification
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# mysql_print_configuration
|
||||||
|
#
|
||||||
|
mysql_print_configuration() {
|
||||||
|
info "MySQL Configuration Summary:"
|
||||||
|
info " Host: $MYSQL_HOST"
|
||||||
|
info " Port: $MYSQL_PORT"
|
||||||
|
info " User: $MYSQL_USER"
|
||||||
|
info " Container: $MYSQL_CONTAINER"
|
||||||
|
info " Password: $([ -n "$MYSQL_ROOT_PASSWORD" ] && echo "***SET***" || echo "***NOT SET***")"
|
||||||
|
}
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# INITIALIZATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Library loaded successfully
|
||||||
|
# Scripts can check for $_MYSQL_UTILS_LIB_LOADED to verify library is loaded
|
||||||
@@ -148,8 +148,10 @@ Options:
|
|||||||
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
|
--tarball PATH Output path for the image tar (default: ./local-storage/images/acore-modules-images.tar)
|
||||||
--storage PATH Remote storage directory (default: <project-dir>/storage)
|
--storage PATH Remote storage directory (default: <project-dir>/storage)
|
||||||
--skip-storage Do not sync the storage directory
|
--skip-storage Do not sync the storage directory
|
||||||
|
--skip-env Do not upload .env to the remote host
|
||||||
|
--preserve-containers Skip stopping/removing existing remote containers and images
|
||||||
|
--clean-containers Stop/remove existing ac-* containers and project images on remote
|
||||||
--copy-source Copy the full local project directory instead of syncing via git
|
--copy-source Copy the full local project directory instead of syncing via git
|
||||||
--cleanup-runtime Stop/remove existing ac-* containers and project images on remote
|
|
||||||
--yes, -y Auto-confirm prompts (for existing deployments)
|
--yes, -y Auto-confirm prompts (for existing deployments)
|
||||||
--help Show this help
|
--help Show this help
|
||||||
EOF_HELP
|
EOF_HELP
|
||||||
@@ -165,7 +167,9 @@ REMOTE_STORAGE=""
|
|||||||
SKIP_STORAGE=0
|
SKIP_STORAGE=0
|
||||||
ASSUME_YES=0
|
ASSUME_YES=0
|
||||||
COPY_SOURCE=0
|
COPY_SOURCE=0
|
||||||
CLEANUP_RUNTIME=0
|
SKIP_ENV=0
|
||||||
|
PRESERVE_CONTAINERS=0
|
||||||
|
CLEAN_CONTAINERS=0
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
@@ -178,8 +182,10 @@ while [[ $# -gt 0 ]]; do
|
|||||||
--tarball) TARBALL="$2"; shift 2;;
|
--tarball) TARBALL="$2"; shift 2;;
|
||||||
--storage) REMOTE_STORAGE="$2"; shift 2;;
|
--storage) REMOTE_STORAGE="$2"; shift 2;;
|
||||||
--skip-storage) SKIP_STORAGE=1; shift;;
|
--skip-storage) SKIP_STORAGE=1; shift;;
|
||||||
|
--skip-env) SKIP_ENV=1; shift;;
|
||||||
|
--preserve-containers) PRESERVE_CONTAINERS=1; shift;;
|
||||||
|
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||||
--copy-source) COPY_SOURCE=1; shift;;
|
--copy-source) COPY_SOURCE=1; shift;;
|
||||||
--cleanup-runtime) CLEANUP_RUNTIME=1; shift;;
|
|
||||||
--yes|-y) ASSUME_YES=1; shift;;
|
--yes|-y) ASSUME_YES=1; shift;;
|
||||||
--help|-h) usage; exit 0;;
|
--help|-h) usage; exit 0;;
|
||||||
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||||
@@ -192,6 +198,11 @@ if [[ -z "$HOST" || -z "$USER" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$CLEAN_CONTAINERS" -eq 1 && "$PRESERVE_CONTAINERS" -eq 1 ]]; then
|
||||||
|
echo "Cannot combine --clean-containers with --preserve-containers." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Normalize env file path if provided and recompute defaults
|
# Normalize env file path if provided and recompute defaults
|
||||||
if [ -n "$ENV_FILE" ] && [ -f "$ENV_FILE" ]; then
|
if [ -n "$ENV_FILE" ] && [ -f "$ENV_FILE" ]; then
|
||||||
ENV_FILE="$(cd "$(dirname "$ENV_FILE")" && pwd)/$(basename "$ENV_FILE")"
|
ENV_FILE="$(cd "$(dirname "$ENV_FILE")" && pwd)/$(basename "$ENV_FILE")"
|
||||||
@@ -302,14 +313,35 @@ validate_remote_environment(){
|
|||||||
local running_containers
|
local running_containers
|
||||||
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
|
running_containers=$(run_ssh "docker ps --filter 'name=ac-' --format '{{.Names}}' 2>/dev/null | wc -l")
|
||||||
if [ "$running_containers" -gt 0 ]; then
|
if [ "$running_containers" -gt 0 ]; then
|
||||||
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||||
echo " Migration will overwrite existing deployment"
|
echo "⚠️ Found $running_containers running AzerothCore containers; --preserve-containers set, leaving them running."
|
||||||
if [ "$ASSUME_YES" != "1" ]; then
|
if [ "$ASSUME_YES" != "1" ]; then
|
||||||
read -r -p " Continue with migration? [y/N]: " reply
|
read -r -p " Continue without stopping containers? [y/N]: " reply
|
||||||
case "$reply" in
|
case "$reply" in
|
||||||
[Yy]*) echo " Proceeding with migration..." ;;
|
[Yy]*) echo " Proceeding with migration (containers preserved)..." ;;
|
||||||
*) echo " Migration cancelled."; exit 1 ;;
|
*) echo " Migration cancelled."; exit 1 ;;
|
||||||
esac
|
esac
|
||||||
|
fi
|
||||||
|
elif [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||||
|
echo "⚠️ Found $running_containers running AzerothCore containers"
|
||||||
|
echo " --clean-containers set: they will be stopped/removed during migration."
|
||||||
|
if [ "$ASSUME_YES" != "1" ]; then
|
||||||
|
read -r -p " Continue with cleanup? [y/N]: " reply
|
||||||
|
case "$reply" in
|
||||||
|
[Yy]*) echo " Proceeding with cleanup..." ;;
|
||||||
|
*) echo " Migration cancelled."; exit 1 ;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "⚠️ Warning: Found $running_containers running AzerothCore containers"
|
||||||
|
echo " Migration will NOT stop them automatically. Use --clean-containers to stop/remove."
|
||||||
|
if [ "$ASSUME_YES" != "1" ]; then
|
||||||
|
read -r -p " Continue with migration? [y/N]: " reply
|
||||||
|
case "$reply" in
|
||||||
|
[Yy]*) echo " Proceeding with migration..." ;;
|
||||||
|
*) echo " Migration cancelled."; exit 1 ;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -325,6 +357,25 @@ validate_remote_environment(){
|
|||||||
echo "✅ Remote environment validation complete"
|
echo "✅ Remote environment validation complete"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
confirm_remote_storage_overwrite(){
|
||||||
|
if [[ $SKIP_STORAGE -ne 0 ]]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
if [[ "$ASSUME_YES" = "1" ]]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
local has_content
|
||||||
|
has_content=$(run_ssh "if [ -d '$REMOTE_STORAGE' ]; then find '$REMOTE_STORAGE' -mindepth 1 -maxdepth 1 -print -quit; fi")
|
||||||
|
if [ -n "$has_content" ]; then
|
||||||
|
echo "⚠️ Remote storage at $REMOTE_STORAGE contains existing data."
|
||||||
|
read -r -p " Continue and sync local storage over it? [y/N]: " reply
|
||||||
|
case "${reply,,}" in
|
||||||
|
y|yes) echo " Proceeding with storage sync..." ;;
|
||||||
|
*) echo " Skipping storage sync for this run."; SKIP_STORAGE=1 ;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
copy_source_tree(){
|
copy_source_tree(){
|
||||||
echo " • Copying full local project directory..."
|
echo " • Copying full local project directory..."
|
||||||
ensure_remote_temp_dir
|
ensure_remote_temp_dir
|
||||||
@@ -388,11 +439,14 @@ setup_remote_repository(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
cleanup_stale_docker_resources(){
|
cleanup_stale_docker_resources(){
|
||||||
if [ "$CLEANUP_RUNTIME" -ne 1 ]; then
|
if [ "$PRESERVE_CONTAINERS" -eq 1 ]; then
|
||||||
|
echo "⋅ Skipping remote container/image cleanup (--preserve-containers)"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
if [ "$CLEAN_CONTAINERS" -ne 1 ]; then
|
||||||
echo "⋅ Skipping remote runtime cleanup (containers and images preserved)."
|
echo "⋅ Skipping remote runtime cleanup (containers and images preserved)."
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "⋅ Cleaning up stale Docker resources on remote..."
|
echo "⋅ Cleaning up stale Docker resources on remote..."
|
||||||
|
|
||||||
# Stop and remove old containers
|
# Stop and remove old containers
|
||||||
@@ -446,6 +500,8 @@ if [ ${#MISSING_IMAGES[@]} -gt 0 ]; then
|
|||||||
printf ' • %s\n' "${MISSING_IMAGES[@]}"
|
printf ' • %s\n' "${MISSING_IMAGES[@]}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
confirm_remote_storage_overwrite
|
||||||
|
|
||||||
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
if [[ $SKIP_STORAGE -eq 0 ]]; then
|
||||||
if [[ -d storage ]]; then
|
if [[ -d storage ]]; then
|
||||||
echo "⋅ Syncing storage to remote"
|
echo "⋅ Syncing storage to remote"
|
||||||
@@ -513,8 +569,34 @@ run_scp "$TARBALL" "$USER@$HOST:$REMOTE_TEMP_DIR/acore-modules-images.tar"
|
|||||||
run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'"
|
run_ssh "docker load < '$REMOTE_TEMP_DIR/acore-modules-images.tar' && rm '$REMOTE_TEMP_DIR/acore-modules-images.tar'"
|
||||||
|
|
||||||
if [[ -f "$ENV_FILE" ]]; then
|
if [[ -f "$ENV_FILE" ]]; then
|
||||||
echo "⋅ Uploading .env"
|
if [[ $SKIP_ENV -eq 1 ]]; then
|
||||||
run_scp "$ENV_FILE" "$USER@$HOST:$PROJECT_DIR/.env"
|
echo "⋅ Skipping .env upload (--skip-env)"
|
||||||
|
else
|
||||||
|
remote_env_path="$PROJECT_DIR/.env"
|
||||||
|
upload_env=1
|
||||||
|
|
||||||
|
if run_ssh "test -f '$remote_env_path'"; then
|
||||||
|
if [ "$ASSUME_YES" = "1" ]; then
|
||||||
|
echo "⋅ Overwriting existing remote .env (auto-confirm)"
|
||||||
|
elif [ -t 0 ]; then
|
||||||
|
read -r -p "⚠️ Remote .env exists at $remote_env_path. Overwrite? [y/N]: " reply
|
||||||
|
case "$reply" in
|
||||||
|
[Yy]*) ;;
|
||||||
|
*) upload_env=0 ;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
echo "⚠️ Remote .env exists at $remote_env_path; skipping upload (no confirmation available)"
|
||||||
|
upload_env=0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $upload_env -eq 1 ]]; then
|
||||||
|
echo "⋅ Uploading .env"
|
||||||
|
run_scp "$ENV_FILE" "$USER@$HOST:$remote_env_path"
|
||||||
|
else
|
||||||
|
echo "⋅ Keeping existing remote .env"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "⋅ Remote prepares completed"
|
echo "⋅ Remote prepares completed"
|
||||||
|
|||||||
@@ -3,8 +3,21 @@
|
|||||||
# to re-copy SQL files.
|
# to re-copy SQL files.
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
info(){ echo "🔧 [restore-stage] $*"; }
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
warn(){ echo "⚠️ [restore-stage] $*" >&2; }
|
|
||||||
|
# Source common library for standardized logging
|
||||||
|
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Specialized prefixed logging for this restoration context
|
||||||
|
restore_info() { info "🔧 [restore-stage] $*"; }
|
||||||
|
restore_warn() { warn "[restore-stage] $*"; }
|
||||||
|
|
||||||
|
# Maintain compatibility with existing function calls
|
||||||
|
info() { restore_info "$*"; }
|
||||||
|
warn() { restore_warn "$*"; }
|
||||||
|
|
||||||
MODULES_DIR="${MODULES_DIR:-/modules}"
|
MODULES_DIR="${MODULES_DIR:-/modules}"
|
||||||
MODULES_META_DIR="${MODULES_DIR}/.modules-meta"
|
MODULES_META_DIR="${MODULES_DIR}/.modules-meta"
|
||||||
|
|||||||
@@ -9,6 +9,10 @@ from pathlib import Path
|
|||||||
|
|
||||||
PROJECT_DIR = Path(__file__).resolve().parents[2]
|
PROJECT_DIR = Path(__file__).resolve().parents[2]
|
||||||
ENV_FILE = PROJECT_DIR / ".env"
|
ENV_FILE = PROJECT_DIR / ".env"
|
||||||
|
DEFAULT_ACORE_STANDARD_REPO = "https://github.com/azerothcore/azerothcore-wotlk.git"
|
||||||
|
DEFAULT_ACORE_PLAYERBOTS_REPO = "https://github.com/mod-playerbots/azerothcore-wotlk.git"
|
||||||
|
DEFAULT_ACORE_STANDARD_BRANCH = "master"
|
||||||
|
DEFAULT_ACORE_PLAYERBOTS_BRANCH = "Playerbot"
|
||||||
|
|
||||||
def load_env():
|
def load_env():
|
||||||
env = {}
|
env = {}
|
||||||
@@ -150,6 +154,195 @@ def volume_info(name, fallback=None):
|
|||||||
pass
|
pass
|
||||||
return {"name": name, "exists": False, "mountpoint": "-"}
|
return {"name": name, "exists": False, "mountpoint": "-"}
|
||||||
|
|
||||||
|
def detect_source_variant(env):
|
||||||
|
variant = read_env(env, "STACK_SOURCE_VARIANT", "").strip().lower()
|
||||||
|
if variant in ("playerbots", "playerbot"):
|
||||||
|
return "playerbots"
|
||||||
|
if variant == "core":
|
||||||
|
return "core"
|
||||||
|
if read_env(env, "STACK_IMAGE_MODE", "").strip().lower() == "playerbots":
|
||||||
|
return "playerbots"
|
||||||
|
if read_env(env, "MODULE_PLAYERBOTS", "0") == "1" or read_env(env, "PLAYERBOT_ENABLED", "0") == "1":
|
||||||
|
return "playerbots"
|
||||||
|
return "core"
|
||||||
|
|
||||||
|
def repo_config_for_variant(env, variant):
|
||||||
|
if variant == "playerbots":
|
||||||
|
repo = read_env(env, "ACORE_REPO_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_REPO)
|
||||||
|
branch = read_env(env, "ACORE_BRANCH_PLAYERBOTS", DEFAULT_ACORE_PLAYERBOTS_BRANCH)
|
||||||
|
else:
|
||||||
|
repo = read_env(env, "ACORE_REPO_STANDARD", DEFAULT_ACORE_STANDARD_REPO)
|
||||||
|
branch = read_env(env, "ACORE_BRANCH_STANDARD", DEFAULT_ACORE_STANDARD_BRANCH)
|
||||||
|
return repo, branch
|
||||||
|
|
||||||
|
def image_labels(image):
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
["docker", "image", "inspect", "--format", "{{json .Config.Labels}}", image],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
timeout=3,
|
||||||
|
)
|
||||||
|
labels = json.loads(result.stdout or "{}")
|
||||||
|
if isinstance(labels, dict):
|
||||||
|
return {k: (v or "").strip() for k, v in labels.items()}
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def first_label(labels, keys):
|
||||||
|
for key in keys:
|
||||||
|
value = labels.get(key, "")
|
||||||
|
if value:
|
||||||
|
return value
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def short_commit(commit):
|
||||||
|
commit = commit.strip()
|
||||||
|
if re.fullmatch(r"[0-9a-fA-F]{12,}", commit):
|
||||||
|
return commit[:12]
|
||||||
|
return commit
|
||||||
|
|
||||||
|
def git_info_from_path(path):
|
||||||
|
repo_path = Path(path)
|
||||||
|
if not (repo_path / ".git").exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run_git(args):
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
["git"] + args,
|
||||||
|
cwd=repo_path,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except Exception:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
commit = run_git(["rev-parse", "HEAD"])
|
||||||
|
if not commit:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {
|
||||||
|
"commit": commit,
|
||||||
|
"commit_short": run_git(["rev-parse", "--short", "HEAD"]) or short_commit(commit),
|
||||||
|
"date": run_git(["log", "-1", "--format=%cd", "--date=iso-strict"]),
|
||||||
|
"repo": run_git(["remote", "get-url", "origin"]),
|
||||||
|
"branch": run_git(["rev-parse", "--abbrev-ref", "HEAD"]),
|
||||||
|
"path": str(repo_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
def candidate_source_paths(env, variant):
|
||||||
|
paths = []
|
||||||
|
for key in ("MODULES_REBUILD_SOURCE_PATH", "SOURCE_DIR"):
|
||||||
|
value = read_env(env, key, "")
|
||||||
|
if value:
|
||||||
|
paths.append(value)
|
||||||
|
|
||||||
|
local_root = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
||||||
|
primary_dir = "azerothcore-playerbots" if variant == "playerbots" else "azerothcore"
|
||||||
|
fallback_dir = "azerothcore" if variant == "playerbots" else "azerothcore-playerbots"
|
||||||
|
paths.append(os.path.join(local_root, "source", primary_dir))
|
||||||
|
paths.append(os.path.join(local_root, "source", fallback_dir))
|
||||||
|
|
||||||
|
normalized = []
|
||||||
|
for p in paths:
|
||||||
|
expanded = expand_path(p, env)
|
||||||
|
try:
|
||||||
|
normalized.append(str(Path(expanded).expanduser().resolve()))
|
||||||
|
except Exception:
|
||||||
|
normalized.append(str(Path(expanded).expanduser()))
|
||||||
|
# Deduplicate while preserving order
|
||||||
|
seen = set()
|
||||||
|
unique_paths = []
|
||||||
|
for p in normalized:
|
||||||
|
if p not in seen:
|
||||||
|
seen.add(p)
|
||||||
|
unique_paths.append(p)
|
||||||
|
return unique_paths
|
||||||
|
|
||||||
|
def build_info(service_data, env):
|
||||||
|
variant = detect_source_variant(env)
|
||||||
|
repo, branch = repo_config_for_variant(env, variant)
|
||||||
|
info = {
|
||||||
|
"variant": variant,
|
||||||
|
"repo": repo,
|
||||||
|
"branch": branch,
|
||||||
|
"image": "",
|
||||||
|
"commit": "",
|
||||||
|
"commit_date": "",
|
||||||
|
"commit_source": "",
|
||||||
|
"source_path": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
image_candidates = []
|
||||||
|
for svc in service_data:
|
||||||
|
if svc.get("name") in ("ac-worldserver", "ac-authserver", "ac-db-import"):
|
||||||
|
image = svc.get("image") or ""
|
||||||
|
if image:
|
||||||
|
image_candidates.append(image)
|
||||||
|
|
||||||
|
for env_key in (
|
||||||
|
"AC_WORLDSERVER_IMAGE_PLAYERBOTS",
|
||||||
|
"AC_WORLDSERVER_IMAGE_MODULES",
|
||||||
|
"AC_WORLDSERVER_IMAGE",
|
||||||
|
"AC_AUTHSERVER_IMAGE_PLAYERBOTS",
|
||||||
|
"AC_AUTHSERVER_IMAGE_MODULES",
|
||||||
|
"AC_AUTHSERVER_IMAGE",
|
||||||
|
):
|
||||||
|
value = read_env(env, env_key, "")
|
||||||
|
if value:
|
||||||
|
image_candidates.append(value)
|
||||||
|
|
||||||
|
seen = set()
|
||||||
|
deduped_images = []
|
||||||
|
for img in image_candidates:
|
||||||
|
if img not in seen:
|
||||||
|
seen.add(img)
|
||||||
|
deduped_images.append(img)
|
||||||
|
|
||||||
|
commit_label_keys = [
|
||||||
|
"build.source_commit",
|
||||||
|
"org.opencontainers.image.revision",
|
||||||
|
"org.opencontainers.image.version",
|
||||||
|
]
|
||||||
|
date_label_keys = [
|
||||||
|
"build.source_date",
|
||||||
|
"org.opencontainers.image.created",
|
||||||
|
"build.timestamp",
|
||||||
|
]
|
||||||
|
|
||||||
|
for image in deduped_images:
|
||||||
|
labels = image_labels(image)
|
||||||
|
if not info["image"]:
|
||||||
|
info["image"] = image
|
||||||
|
if not labels:
|
||||||
|
continue
|
||||||
|
commit = short_commit(first_label(labels, commit_label_keys))
|
||||||
|
date = first_label(labels, date_label_keys)
|
||||||
|
if commit or date:
|
||||||
|
info["commit"] = commit
|
||||||
|
info["commit_date"] = date
|
||||||
|
info["commit_source"] = "image-label"
|
||||||
|
info["image"] = image
|
||||||
|
return info
|
||||||
|
|
||||||
|
for path in candidate_source_paths(env, variant):
|
||||||
|
git_meta = git_info_from_path(path)
|
||||||
|
if git_meta:
|
||||||
|
info["commit"] = git_meta.get("commit_short") or short_commit(git_meta.get("commit", ""))
|
||||||
|
info["commit_date"] = git_meta.get("date", "")
|
||||||
|
info["commit_source"] = "source-tree"
|
||||||
|
info["source_path"] = git_meta.get("path", "")
|
||||||
|
info["repo"] = git_meta.get("repo") or info["repo"]
|
||||||
|
info["branch"] = git_meta.get("branch") or info["branch"]
|
||||||
|
return info
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
def expand_path(value, env):
|
def expand_path(value, env):
|
||||||
storage = read_env(env, "STORAGE_PATH", "./storage")
|
storage = read_env(env, "STORAGE_PATH", "./storage")
|
||||||
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
local_storage = read_env(env, "STORAGE_PATH_LOCAL", "./local-storage")
|
||||||
@@ -175,13 +368,61 @@ def mysql_query(env, database, query):
|
|||||||
except Exception:
|
except Exception:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
def escape_like_prefix(prefix):
|
||||||
|
# Basic escape for single quotes in SQL literals
|
||||||
|
return prefix.replace("'", "''")
|
||||||
|
|
||||||
|
def bot_prefixes(env):
|
||||||
|
prefixes = []
|
||||||
|
for key in ("PLAYERBOT_ACCOUNT_PREFIXES", "PLAYERBOT_ACCOUNT_PREFIX"):
|
||||||
|
raw = read_env(env, key, "")
|
||||||
|
for part in raw.replace(",", " ").split():
|
||||||
|
part = part.strip()
|
||||||
|
if part:
|
||||||
|
prefixes.append(part)
|
||||||
|
# Default fallback if nothing configured
|
||||||
|
if not prefixes:
|
||||||
|
prefixes.extend(["playerbot", "rndbot", "bot"])
|
||||||
|
return prefixes
|
||||||
|
|
||||||
def user_stats(env):
|
def user_stats(env):
|
||||||
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
|
db_auth = read_env(env, "DB_AUTH_NAME", "acore_auth")
|
||||||
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
|
db_characters = read_env(env, "DB_CHARACTERS_NAME", "acore_characters")
|
||||||
accounts = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account;")
|
prefixes = bot_prefixes(env)
|
||||||
online = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE online = 1;")
|
account_conditions = []
|
||||||
|
for prefix in prefixes:
|
||||||
|
prefix = escape_like_prefix(prefix)
|
||||||
|
upper_prefix = prefix.upper()
|
||||||
|
account_conditions.append(f"UPPER(username) NOT LIKE '{upper_prefix}%%'")
|
||||||
|
account_query = "SELECT COUNT(*) FROM account"
|
||||||
|
if account_conditions:
|
||||||
|
account_query += " WHERE " + " AND ".join(account_conditions)
|
||||||
|
accounts = mysql_query(env, db_auth, account_query + ";")
|
||||||
|
|
||||||
|
online_conditions = ["c.online = 1"]
|
||||||
|
for prefix in prefixes:
|
||||||
|
prefix = escape_like_prefix(prefix)
|
||||||
|
upper_prefix = prefix.upper()
|
||||||
|
online_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
|
||||||
|
online_query = (
|
||||||
|
f"SELECT COUNT(DISTINCT a.id) FROM `{db_characters}`.characters c "
|
||||||
|
f"JOIN `{db_auth}`.account a ON a.id = c.account "
|
||||||
|
f"WHERE {' AND '.join(online_conditions)};"
|
||||||
|
)
|
||||||
|
online = mysql_query(env, db_characters, online_query)
|
||||||
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
|
active = mysql_query(env, db_auth, "SELECT COUNT(*) FROM account WHERE last_login >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 7 DAY);")
|
||||||
characters = mysql_query(env, db_characters, "SELECT COUNT(*) FROM characters;")
|
character_conditions = []
|
||||||
|
for prefix in prefixes:
|
||||||
|
prefix = escape_like_prefix(prefix)
|
||||||
|
upper_prefix = prefix.upper()
|
||||||
|
character_conditions.append(f"UPPER(a.username) NOT LIKE '{upper_prefix}%%'")
|
||||||
|
characters_query = (
|
||||||
|
f"SELECT COUNT(*) FROM `{db_characters}`.characters c "
|
||||||
|
f"JOIN `{db_auth}`.account a ON a.id = c.account"
|
||||||
|
)
|
||||||
|
if character_conditions:
|
||||||
|
characters_query += " WHERE " + " AND ".join(character_conditions)
|
||||||
|
characters = mysql_query(env, db_characters, characters_query + ";")
|
||||||
return {
|
return {
|
||||||
"accounts": accounts,
|
"accounts": accounts,
|
||||||
"online": online,
|
"online": online,
|
||||||
@@ -274,6 +515,8 @@ def main():
|
|||||||
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
|
"mysql_data": volume_info(f"{project}_mysql-data", "mysql-data"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
build = build_info(service_data, env)
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||||
"project": project,
|
"project": project,
|
||||||
@@ -285,6 +528,7 @@ def main():
|
|||||||
"volumes": volumes,
|
"volumes": volumes,
|
||||||
"users": user_stats(env),
|
"users": user_stats(env),
|
||||||
"stats": docker_stats(),
|
"stats": docker_stats(),
|
||||||
|
"build": build,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(json.dumps(data))
|
print(json.dumps(data))
|
||||||
|
|||||||
121
scripts/bash/update-remote.sh
Executable file
121
scripts/bash/update-remote.sh
Executable file
@@ -0,0 +1,121 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Helper to push a fresh build to a remote host with minimal downtime and no data touch by default.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||||
|
DEFAULT_PROJECT_DIR="~$(printf '/%s' "$(basename "$ROOT_DIR")")"
|
||||||
|
|
||||||
|
HOST=""
|
||||||
|
USER=""
|
||||||
|
PORT=22
|
||||||
|
IDENTITY=""
|
||||||
|
PROJECT_DIR="$DEFAULT_PROJECT_DIR"
|
||||||
|
PUSH_ENV=0
|
||||||
|
PUSH_STORAGE=0
|
||||||
|
CLEAN_CONTAINERS=0
|
||||||
|
AUTO_DEPLOY=1
|
||||||
|
ASSUME_YES=0
|
||||||
|
|
||||||
|
usage(){
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: scripts/bash/update-remote.sh --host HOST --user USER [options]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--host HOST Remote hostname or IP (required)
|
||||||
|
--user USER SSH username on remote host (required)
|
||||||
|
--port PORT SSH port (default: 22)
|
||||||
|
--identity PATH SSH private key
|
||||||
|
--project-dir DIR Remote project directory (default: ~/<repo-name>)
|
||||||
|
--remote-path DIR Alias for --project-dir (backward compat)
|
||||||
|
--push-env Upload local .env to remote (default: skip)
|
||||||
|
--push-storage Sync ./storage to remote (default: skip)
|
||||||
|
--clean-containers Stop/remove remote ac-* containers & project images during migration (default: preserve)
|
||||||
|
--no-auto-deploy Do not trigger remote deploy after migration
|
||||||
|
--yes Auto-confirm prompts
|
||||||
|
--help Show this help
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--host) HOST="$2"; shift 2;;
|
||||||
|
--user) USER="$2"; shift 2;;
|
||||||
|
--port) PORT="$2"; shift 2;;
|
||||||
|
--identity) IDENTITY="$2"; shift 2;;
|
||||||
|
--project-dir) PROJECT_DIR="$2"; shift 2;;
|
||||||
|
--remote-path) PROJECT_DIR="$2"; shift 2;;
|
||||||
|
--push-env) PUSH_ENV=1; shift;;
|
||||||
|
--push-storage) PUSH_STORAGE=1; shift;;
|
||||||
|
--clean-containers) CLEAN_CONTAINERS=1; shift;;
|
||||||
|
--no-auto-deploy) AUTO_DEPLOY=0; shift;;
|
||||||
|
--yes) ASSUME_YES=1; shift;;
|
||||||
|
--help|-h) usage; exit 0;;
|
||||||
|
*) echo "Unknown option: $1" >&2; usage; exit 1;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "$HOST" || -z "$USER" ]]; then
|
||||||
|
echo "--host and --user are required" >&2
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
deploy_args=(--remote --remote-host "$HOST" --remote-user "$USER")
|
||||||
|
|
||||||
|
if [ -n "$PROJECT_DIR" ]; then
|
||||||
|
deploy_args+=(--remote-project-dir "$PROJECT_DIR")
|
||||||
|
fi
|
||||||
|
if [ -n "$IDENTITY" ]; then
|
||||||
|
deploy_args+=(--remote-identity "$IDENTITY")
|
||||||
|
fi
|
||||||
|
if [ "$PORT" != "22" ]; then
|
||||||
|
deploy_args+=(--remote-port "$PORT")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$PUSH_STORAGE" -ne 1 ]; then
|
||||||
|
deploy_args+=(--remote-skip-storage)
|
||||||
|
fi
|
||||||
|
if [ "$PUSH_ENV" -ne 1 ]; then
|
||||||
|
deploy_args+=(--remote-skip-env)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CLEAN_CONTAINERS" -eq 1 ]; then
|
||||||
|
deploy_args+=(--remote-clean-containers)
|
||||||
|
else
|
||||||
|
deploy_args+=(--remote-preserve-containers)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$AUTO_DEPLOY" -eq 1 ]; then
|
||||||
|
deploy_args+=(--remote-auto-deploy)
|
||||||
|
fi
|
||||||
|
|
||||||
|
deploy_args+=(--no-watch)
|
||||||
|
|
||||||
|
if [ "$ASSUME_YES" -eq 1 ]; then
|
||||||
|
deploy_args+=(--yes)
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Remote update plan:"
|
||||||
|
echo " Host/User : ${USER}@${HOST}:${PORT}"
|
||||||
|
echo " Project Dir : ${PROJECT_DIR}"
|
||||||
|
echo " Push .env : $([ "$PUSH_ENV" -eq 1 ] && echo yes || echo no)"
|
||||||
|
echo " Push storage : $([ "$PUSH_STORAGE" -eq 1 ] && echo yes || echo no)"
|
||||||
|
echo " Cleanup mode : $([ "$CLEAN_CONTAINERS" -eq 1 ] && echo 'clean containers' || echo 'preserve containers')"
|
||||||
|
echo " Auto deploy : $([ "$AUTO_DEPLOY" -eq 1 ] && echo yes || echo no)"
|
||||||
|
if [ "$AUTO_DEPLOY" -eq 1 ] && [ "$PUSH_ENV" -ne 1 ]; then
|
||||||
|
echo " ⚠️ Auto-deploy is enabled but push-env is off; remote deploy will fail without a valid .env."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$ASSUME_YES" -ne 1 ]; then
|
||||||
|
read -r -p "Proceed with remote update? [y/N]: " reply
|
||||||
|
reply="${reply:-n}"
|
||||||
|
case "${reply,,}" in
|
||||||
|
y|yes) ;;
|
||||||
|
*) echo "Aborted."; exit 1 ;;
|
||||||
|
esac
|
||||||
|
deploy_args+=(--yes)
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$ROOT_DIR"
|
||||||
|
./deploy.sh "${deploy_args[@]}"
|
||||||
@@ -4,13 +4,14 @@ set -e
|
|||||||
|
|
||||||
# Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml
|
# Simple profile-aware deploy + health check for profiles-verify/docker-compose.yml
|
||||||
|
|
||||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
|
||||||
info(){ echo -e "${BLUE}ℹ️ $*${NC}"; }
|
|
||||||
ok(){ echo -e "${GREEN}✅ $*${NC}"; }
|
|
||||||
warn(){ echo -e "${YELLOW}⚠️ $*${NC}"; }
|
|
||||||
err(){ echo -e "${RED}❌ $*${NC}"; }
|
|
||||||
|
|
||||||
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
|
# Source common library for standardized logging
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
if ! source "$SCRIPT_DIR/lib/common.sh" 2>/dev/null; then
|
||||||
|
echo "❌ FATAL: Cannot load $SCRIPT_DIR/lib/common.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
|
COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml"
|
||||||
ENV_FILE=""
|
ENV_FILE=""
|
||||||
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
TEMPLATE_FILE="$PROJECT_DIR/.env.template"
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -61,6 +63,17 @@ type Module struct {
|
|||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BuildInfo struct {
|
||||||
|
Variant string `json:"variant"`
|
||||||
|
Repo string `json:"repo"`
|
||||||
|
Branch string `json:"branch"`
|
||||||
|
Image string `json:"image"`
|
||||||
|
Commit string `json:"commit"`
|
||||||
|
CommitDate string `json:"commit_date"`
|
||||||
|
CommitSource string `json:"commit_source"`
|
||||||
|
SourcePath string `json:"source_path"`
|
||||||
|
}
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
Timestamp string `json:"timestamp"`
|
Timestamp string `json:"timestamp"`
|
||||||
Project string `json:"project"`
|
Project string `json:"project"`
|
||||||
@@ -72,6 +85,7 @@ type Snapshot struct {
|
|||||||
Volumes map[string]VolumeInfo `json:"volumes"`
|
Volumes map[string]VolumeInfo `json:"volumes"`
|
||||||
Users UserStats `json:"users"`
|
Users UserStats `json:"users"`
|
||||||
Stats map[string]ContainerStats `json:"stats"`
|
Stats map[string]ContainerStats `json:"stats"`
|
||||||
|
Build BuildInfo `json:"build"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var persistentServiceOrder = []string{
|
var persistentServiceOrder = []string{
|
||||||
@@ -84,6 +98,81 @@ var persistentServiceOrder = []string{
|
|||||||
"ac-backup",
|
"ac-backup",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func humanDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return "<1m"
|
||||||
|
}
|
||||||
|
days := d / (24 * time.Hour)
|
||||||
|
d -= days * 24 * time.Hour
|
||||||
|
hours := d / time.Hour
|
||||||
|
d -= hours * time.Hour
|
||||||
|
mins := d / time.Minute
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case days > 0:
|
||||||
|
return fmt.Sprintf("%dd %dh", days, hours)
|
||||||
|
case hours > 0:
|
||||||
|
return fmt.Sprintf("%dh %dm", hours, mins)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%dm", mins)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatUptime(startedAt string) string {
|
||||||
|
if startedAt == "" {
|
||||||
|
return "-"
|
||||||
|
}
|
||||||
|
parsed, err := time.Parse(time.RFC3339Nano, startedAt)
|
||||||
|
if err != nil {
|
||||||
|
parsed, err = time.Parse(time.RFC3339, startedAt)
|
||||||
|
if err != nil {
|
||||||
|
return "-"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if parsed.IsZero() {
|
||||||
|
return "-"
|
||||||
|
}
|
||||||
|
uptime := time.Since(parsed)
|
||||||
|
if uptime < 0 {
|
||||||
|
uptime = 0
|
||||||
|
}
|
||||||
|
return humanDuration(uptime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func primaryIPv4() string {
|
||||||
|
ifaces, err := net.Interfaces()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
for _, iface := range ifaces {
|
||||||
|
if iface.Flags&net.FlagUp == 0 || iface.Flags&net.FlagLoopback != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addrs, err := iface.Addrs()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
var ip net.IP
|
||||||
|
switch v := addr.(type) {
|
||||||
|
case *net.IPNet:
|
||||||
|
ip = v.IP
|
||||||
|
case *net.IPAddr:
|
||||||
|
ip = v.IP
|
||||||
|
}
|
||||||
|
if ip == nil || ip.IsLoopback() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ip = ip.To4()
|
||||||
|
if ip == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return ip.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
func runSnapshot() (*Snapshot, error) {
|
func runSnapshot() (*Snapshot, error) {
|
||||||
cmd := exec.Command("./scripts/bash/statusjson.sh")
|
cmd := exec.Command("./scripts/bash/statusjson.sh")
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
@@ -126,8 +215,8 @@ func buildServicesTable(s *Snapshot) *TableNoCol {
|
|||||||
runningServices, setupServices := partitionServices(s.Services)
|
runningServices, setupServices := partitionServices(s.Services)
|
||||||
|
|
||||||
table := NewTableNoCol()
|
table := NewTableNoCol()
|
||||||
rows := [][]string{{"Group", "Service", "Status", "Health", "CPU%", "Memory"}}
|
rows := [][]string{{"Service", "Status", "Health", "Uptime", "CPU%", "Memory"}}
|
||||||
appendRows := func(groupLabel string, services []Service) {
|
appendRows := func(services []Service) {
|
||||||
for _, svc := range services {
|
for _, svc := range services {
|
||||||
cpu := "-"
|
cpu := "-"
|
||||||
mem := "-"
|
mem := "-"
|
||||||
@@ -139,12 +228,12 @@ func buildServicesTable(s *Snapshot) *TableNoCol {
|
|||||||
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
|
if svc.Status != "running" && svc.ExitCode != "0" && svc.ExitCode != "" {
|
||||||
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
|
health = fmt.Sprintf("%s (%s)", svc.Health, svc.ExitCode)
|
||||||
}
|
}
|
||||||
rows = append(rows, []string{groupLabel, svc.Label, svc.Status, health, cpu, mem})
|
rows = append(rows, []string{svc.Label, svc.Status, health, formatUptime(svc.StartedAt), cpu, mem})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
appendRows("Persistent", runningServices)
|
appendRows(runningServices)
|
||||||
appendRows("Setup", setupServices)
|
appendRows(setupServices)
|
||||||
|
|
||||||
table.Rows = rows
|
table.Rows = rows
|
||||||
table.RowSeparator = false
|
table.RowSeparator = false
|
||||||
@@ -174,9 +263,9 @@ func buildPortsTable(s *Snapshot) *TableNoCol {
|
|||||||
table := NewTableNoCol()
|
table := NewTableNoCol()
|
||||||
rows := [][]string{{"Port", "Number", "Reachable"}}
|
rows := [][]string{{"Port", "Number", "Reachable"}}
|
||||||
for _, p := range s.Ports {
|
for _, p := range s.Ports {
|
||||||
state := "down"
|
state := "Closed"
|
||||||
if p.Reachable {
|
if p.Reachable {
|
||||||
state = "up"
|
state = "Open"
|
||||||
}
|
}
|
||||||
rows = append(rows, []string{p.Name, p.Port, state})
|
rows = append(rows, []string{p.Name, p.Port, state})
|
||||||
}
|
}
|
||||||
@@ -223,9 +312,11 @@ func buildStorageParagraph(s *Snapshot) *widgets.Paragraph {
|
|||||||
}
|
}
|
||||||
par := widgets.NewParagraph()
|
par := widgets.NewParagraph()
|
||||||
par.Title = "Storage"
|
par.Title = "Storage"
|
||||||
par.Text = b.String()
|
par.Text = strings.TrimRight(b.String(), "\n")
|
||||||
par.Border = true
|
par.Border = true
|
||||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||||
|
par.PaddingLeft = 0
|
||||||
|
par.PaddingRight = 0
|
||||||
return par
|
return par
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,23 +338,85 @@ func buildVolumesParagraph(s *Snapshot) *widgets.Paragraph {
|
|||||||
}
|
}
|
||||||
par := widgets.NewParagraph()
|
par := widgets.NewParagraph()
|
||||||
par.Title = "Volumes"
|
par.Title = "Volumes"
|
||||||
par.Text = b.String()
|
par.Text = strings.TrimRight(b.String(), "\n")
|
||||||
|
par.Border = true
|
||||||
|
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||||
|
par.PaddingLeft = 0
|
||||||
|
par.PaddingRight = 0
|
||||||
|
return par
|
||||||
|
}
|
||||||
|
|
||||||
|
func simplifyRepo(repo string) string {
|
||||||
|
repo = strings.TrimSpace(repo)
|
||||||
|
repo = strings.TrimSuffix(repo, ".git")
|
||||||
|
repo = strings.TrimPrefix(repo, "https://")
|
||||||
|
repo = strings.TrimPrefix(repo, "http://")
|
||||||
|
repo = strings.TrimPrefix(repo, "git@")
|
||||||
|
repo = strings.TrimPrefix(repo, "github.com:")
|
||||||
|
repo = strings.TrimPrefix(repo, "gitlab.com:")
|
||||||
|
repo = strings.TrimPrefix(repo, "github.com/")
|
||||||
|
repo = strings.TrimPrefix(repo, "gitlab.com/")
|
||||||
|
return repo
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildInfoParagraph(s *Snapshot) *widgets.Paragraph {
|
||||||
|
build := s.Build
|
||||||
|
var lines []string
|
||||||
|
|
||||||
|
if build.Branch != "" {
|
||||||
|
lines = append(lines, fmt.Sprintf("Branch: %s", build.Branch))
|
||||||
|
}
|
||||||
|
|
||||||
|
if repo := simplifyRepo(build.Repo); repo != "" {
|
||||||
|
lines = append(lines, fmt.Sprintf("Repo: %s", repo))
|
||||||
|
}
|
||||||
|
|
||||||
|
commitLine := "Git: unknown"
|
||||||
|
if build.Commit != "" {
|
||||||
|
commitLine = fmt.Sprintf("Git: %s", build.Commit)
|
||||||
|
switch build.CommitSource {
|
||||||
|
case "image-label":
|
||||||
|
commitLine += " [image]"
|
||||||
|
case "source-tree":
|
||||||
|
commitLine += " [source]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines = append(lines, commitLine)
|
||||||
|
|
||||||
|
if build.Image != "" {
|
||||||
|
// Skip image line to keep header compact
|
||||||
|
}
|
||||||
|
|
||||||
|
lines = append(lines, fmt.Sprintf("Updated: %s", s.Timestamp))
|
||||||
|
|
||||||
|
par := widgets.NewParagraph()
|
||||||
|
par.Title = "Build"
|
||||||
|
par.Text = strings.Join(lines, "\n")
|
||||||
par.Border = true
|
par.Border = true
|
||||||
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
par.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
||||||
return par
|
return par
|
||||||
}
|
}
|
||||||
|
|
||||||
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil || hostname == "" {
|
||||||
|
hostname = "unknown"
|
||||||
|
}
|
||||||
|
ip := primaryIPv4()
|
||||||
|
if ip == "" {
|
||||||
|
ip = "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
servicesTable := buildServicesTable(s)
|
servicesTable := buildServicesTable(s)
|
||||||
portsTable := buildPortsTable(s)
|
portsTable := buildPortsTable(s)
|
||||||
for i := 1; i < len(portsTable.Rows); i++ {
|
for i := 1; i < len(portsTable.Rows); i++ {
|
||||||
if portsTable.RowStyles == nil {
|
if portsTable.RowStyles == nil {
|
||||||
portsTable.RowStyles = make(map[int]ui.Style)
|
portsTable.RowStyles = make(map[int]ui.Style)
|
||||||
}
|
}
|
||||||
if portsTable.Rows[i][2] == "up" {
|
if portsTable.Rows[i][2] == "Open" {
|
||||||
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
|
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorGreen)
|
||||||
} else {
|
} else {
|
||||||
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorRed)
|
portsTable.RowStyles[i] = ui.NewStyle(ui.ColorYellow)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
modulesList := buildModulesList(s)
|
modulesList := buildModulesList(s)
|
||||||
@@ -287,43 +440,81 @@ func renderSnapshot(s *Snapshot, selectedModule int) (*widgets.List, *ui.Grid) {
|
|||||||
moduleInfoPar.Border = true
|
moduleInfoPar.Border = true
|
||||||
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
|
moduleInfoPar.BorderStyle = ui.NewStyle(ui.ColorMagenta)
|
||||||
storagePar := buildStorageParagraph(s)
|
storagePar := buildStorageParagraph(s)
|
||||||
storagePar.Border = true
|
|
||||||
storagePar.BorderStyle = ui.NewStyle(ui.ColorYellow)
|
|
||||||
storagePar.PaddingLeft = 1
|
|
||||||
storagePar.PaddingRight = 1
|
|
||||||
volumesPar := buildVolumesParagraph(s)
|
volumesPar := buildVolumesParagraph(s)
|
||||||
|
|
||||||
header := widgets.NewParagraph()
|
header := widgets.NewParagraph()
|
||||||
header.Text = fmt.Sprintf("Project: %s\nNetwork: %s\nUpdated: %s", s.Project, s.Network, s.Timestamp)
|
header.Text = fmt.Sprintf("Host: %s\nIP: %s\nProject: %s\nNetwork: %s", hostname, ip, s.Project, s.Network)
|
||||||
header.Border = true
|
header.Border = true
|
||||||
|
|
||||||
|
buildPar := buildInfoParagraph(s)
|
||||||
|
|
||||||
usersPar := widgets.NewParagraph()
|
usersPar := widgets.NewParagraph()
|
||||||
usersPar.Text = fmt.Sprintf("USERS:\n Accounts: %d\n Online: %d\n Characters: %d\n Active 7d: %d", s.Users.Accounts, s.Users.Online, s.Users.Characters, s.Users.Active7d)
|
usersPar.Title = "Users"
|
||||||
|
usersPar.Text = fmt.Sprintf(" Online: %d\n Accounts: %d\n Characters: %d\n Active 7d: %d", s.Users.Online, s.Users.Accounts, s.Users.Characters, s.Users.Active7d)
|
||||||
usersPar.Border = true
|
usersPar.Border = true
|
||||||
|
|
||||||
|
const headerRowFrac = 0.18
|
||||||
|
const middleRowFrac = 0.43
|
||||||
|
const bottomRowFrac = 0.39
|
||||||
|
|
||||||
|
// Derive inner row ratios from the computed bottom row height so that
|
||||||
|
// internal containers tile their parent with the same spacing behavior
|
||||||
|
// as top-level rows.
|
||||||
grid := ui.NewGrid()
|
grid := ui.NewGrid()
|
||||||
termWidth, termHeight := ui.TerminalDimensions()
|
termWidth, termHeight := ui.TerminalDimensions()
|
||||||
|
|
||||||
|
headerHeight := int(float64(termHeight) * headerRowFrac)
|
||||||
|
middleHeight := int(float64(termHeight) * middleRowFrac)
|
||||||
|
bottomHeight := termHeight - headerHeight - middleHeight
|
||||||
|
if bottomHeight <= 0 {
|
||||||
|
bottomHeight = int(float64(termHeight) * bottomRowFrac)
|
||||||
|
}
|
||||||
|
|
||||||
|
helpHeight := int(float64(bottomHeight) * 0.32)
|
||||||
|
if helpHeight < 1 {
|
||||||
|
helpHeight = 1
|
||||||
|
}
|
||||||
|
moduleInfoHeight := bottomHeight - helpHeight
|
||||||
|
if moduleInfoHeight < 1 {
|
||||||
|
moduleInfoHeight = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
storageHeight := int(float64(bottomHeight) * 0.513)
|
||||||
|
if storageHeight < 1 {
|
||||||
|
storageHeight = 1
|
||||||
|
}
|
||||||
|
volumesHeight := bottomHeight - storageHeight
|
||||||
|
if volumesHeight < 1 {
|
||||||
|
volumesHeight = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
helpRatio := float64(helpHeight) / float64(bottomHeight)
|
||||||
|
moduleInfoRatio := float64(moduleInfoHeight) / float64(bottomHeight)
|
||||||
|
storageRatio := float64(storageHeight) / float64(bottomHeight)
|
||||||
|
volumesRatio := float64(volumesHeight) / float64(bottomHeight)
|
||||||
|
|
||||||
grid.SetRect(0, 0, termWidth, termHeight)
|
grid.SetRect(0, 0, termWidth, termHeight)
|
||||||
grid.Set(
|
grid.Set(
|
||||||
ui.NewRow(0.15,
|
ui.NewRow(headerRowFrac,
|
||||||
ui.NewCol(0.6, header),
|
ui.NewCol(0.34, header),
|
||||||
ui.NewCol(0.4, usersPar),
|
ui.NewCol(0.33, buildPar),
|
||||||
|
ui.NewCol(0.33, usersPar),
|
||||||
),
|
),
|
||||||
ui.NewRow(0.46,
|
ui.NewRow(middleRowFrac,
|
||||||
ui.NewCol(0.6, servicesTable),
|
ui.NewCol(0.6, servicesTable),
|
||||||
ui.NewCol(0.4, portsTable),
|
ui.NewCol(0.4, portsTable),
|
||||||
),
|
),
|
||||||
ui.NewRow(0.39,
|
ui.NewRow(bottomRowFrac,
|
||||||
ui.NewCol(0.25, modulesList),
|
ui.NewCol(0.25, modulesList),
|
||||||
ui.NewCol(0.15,
|
ui.NewCol(0.15,
|
||||||
ui.NewRow(0.30, helpPar),
|
ui.NewRow(helpRatio, helpPar),
|
||||||
ui.NewRow(0.70, moduleInfoPar),
|
ui.NewRow(moduleInfoRatio, moduleInfoPar),
|
||||||
),
|
),
|
||||||
ui.NewCol(0.6,
|
ui.NewCol(0.6,
|
||||||
ui.NewRow(0.55,
|
ui.NewRow(storageRatio,
|
||||||
ui.NewCol(1.0, storagePar),
|
ui.NewCol(1.0, storagePar),
|
||||||
),
|
),
|
||||||
ui.NewRow(0.45,
|
ui.NewRow(volumesRatio,
|
||||||
ui.NewCol(1.0, volumesPar),
|
ui.NewCol(1.0, volumesPar),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
|||||||
80
setup.sh
80
setup.sh
@@ -578,8 +578,6 @@ main(){
|
|||||||
local CLI_PLAYERBOT_ENABLED=""
|
local CLI_PLAYERBOT_ENABLED=""
|
||||||
local CLI_PLAYERBOT_MIN=""
|
local CLI_PLAYERBOT_MIN=""
|
||||||
local CLI_PLAYERBOT_MAX=""
|
local CLI_PLAYERBOT_MAX=""
|
||||||
local CLI_AUTO_REBUILD=0
|
|
||||||
local CLI_MODULES_SOURCE=""
|
|
||||||
local FORCE_OVERWRITE=0
|
local FORCE_OVERWRITE=0
|
||||||
local CLI_ENABLE_MODULES_RAW=()
|
local CLI_ENABLE_MODULES_RAW=()
|
||||||
|
|
||||||
@@ -622,9 +620,6 @@ Options:
|
|||||||
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
|
--playerbot-enabled 0|1 Override PLAYERBOT_ENABLED flag
|
||||||
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
|
--playerbot-min-bots N Override PLAYERBOT_MIN_BOTS value
|
||||||
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
|
--playerbot-max-bots N Override PLAYERBOT_MAX_BOTS value
|
||||||
--auto-rebuild-on-deploy Enable automatic rebuild during deploys
|
|
||||||
--modules-rebuild-source PATH Source checkout used for module rebuilds
|
|
||||||
--deploy-after Run ./deploy.sh automatically after setup completes
|
|
||||||
--force Overwrite existing .env without prompting
|
--force Overwrite existing .env without prompting
|
||||||
EOF
|
EOF
|
||||||
exit 0
|
exit 0
|
||||||
@@ -779,25 +774,10 @@ EOF
|
|||||||
--playerbot-max-bots=*)
|
--playerbot-max-bots=*)
|
||||||
CLI_PLAYERBOT_MAX="${1#*=}"; shift
|
CLI_PLAYERBOT_MAX="${1#*=}"; shift
|
||||||
;;
|
;;
|
||||||
--auto-rebuild-on-deploy)
|
|
||||||
CLI_AUTO_REBUILD=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--modules-rebuild-source)
|
|
||||||
[[ $# -ge 2 ]] || { say ERROR "--modules-rebuild-source requires a value"; exit 1; }
|
|
||||||
CLI_MODULES_SOURCE="$2"; shift 2
|
|
||||||
;;
|
|
||||||
--modules-rebuild-source=*)
|
|
||||||
CLI_MODULES_SOURCE="${1#*=}"; shift
|
|
||||||
;;
|
|
||||||
--force)
|
--force)
|
||||||
FORCE_OVERWRITE=1
|
FORCE_OVERWRITE=1
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--deploy-after)
|
|
||||||
CLI_DEPLOY_AFTER=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "Unknown argument: $1" >&2
|
echo "Unknown argument: $1" >&2
|
||||||
echo "Use --help for usage" >&2
|
echo "Use --help for usage" >&2
|
||||||
@@ -1210,8 +1190,6 @@ fi
|
|||||||
local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
|
local PLAYERBOT_MIN_BOTS="${DEFAULT_PLAYERBOT_MIN:-40}"
|
||||||
local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
|
local PLAYERBOT_MAX_BOTS="${DEFAULT_PLAYERBOT_MAX:-40}"
|
||||||
|
|
||||||
local AUTO_REBUILD_ON_DEPLOY=$CLI_AUTO_REBUILD
|
|
||||||
local MODULES_REBUILD_SOURCE_PATH_VALUE="${CLI_MODULES_SOURCE}"
|
|
||||||
local NEEDS_CXX_REBUILD=0
|
local NEEDS_CXX_REBUILD=0
|
||||||
|
|
||||||
local module_mode_label=""
|
local module_mode_label=""
|
||||||
@@ -1473,7 +1451,6 @@ fi
|
|||||||
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
|
printf " %-18s %s\n" "Storage Path:" "$STORAGE_PATH"
|
||||||
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
|
printf " %-18s %s\n" "Container User:" "$CONTAINER_USER"
|
||||||
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
|
printf " %-18s Daily %s:00 UTC, keep %sd/%sh\n" "Backups:" "$BACKUP_DAILY_TIME" "$BACKUP_RETENTION_DAYS" "$BACKUP_RETENTION_HOURS"
|
||||||
printf " %-18s %s\n" "Source checkout:" "$default_source_rel"
|
|
||||||
printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE"
|
printf " %-18s %s\n" "Modules images:" "$AC_AUTHSERVER_IMAGE_MODULES_VALUE | $AC_WORLDSERVER_IMAGE_MODULES_VALUE"
|
||||||
|
|
||||||
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
|
printf " %-18s %s\n" "Modules preset:" "$SUMMARY_MODE_TEXT"
|
||||||
@@ -1520,11 +1497,6 @@ fi
|
|||||||
echo ""
|
echo ""
|
||||||
say WARNING "These modules require compiling AzerothCore from source."
|
say WARNING "These modules require compiling AzerothCore from source."
|
||||||
say INFO "Run './build.sh' to compile your custom modules before deployment."
|
say INFO "Run './build.sh' to compile your custom modules before deployment."
|
||||||
if [ "$CLI_AUTO_REBUILD" = "1" ]; then
|
|
||||||
AUTO_REBUILD_ON_DEPLOY=1
|
|
||||||
else
|
|
||||||
AUTO_REBUILD_ON_DEPLOY=$(ask_yn "Enable automatic rebuild during future deploys?" "$( [ "$AUTO_REBUILD_ON_DEPLOY" = "1" ] && echo y || echo n )")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set build sentinel to indicate rebuild is needed
|
# Set build sentinel to indicate rebuild is needed
|
||||||
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
local sentinel="$LOCAL_STORAGE_ROOT_ABS/modules/.requires_rebuild"
|
||||||
@@ -1554,23 +1526,8 @@ fi
|
|||||||
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
|
default_source_rel="${LOCAL_STORAGE_ROOT}/source/azerothcore-playerbots"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
# Persist rebuild source path for downstream build scripts
|
||||||
local storage_abs="$STORAGE_PATH"
|
MODULES_REBUILD_SOURCE_PATH="$default_source_rel"
|
||||||
if [[ "$storage_abs" != /* ]]; then
|
|
||||||
storage_abs="$(pwd)/${storage_abs#./}"
|
|
||||||
fi
|
|
||||||
local candidate_path="$MODULES_REBUILD_SOURCE_PATH_VALUE"
|
|
||||||
if [[ "$candidate_path" != /* ]]; then
|
|
||||||
candidate_path="$(pwd)/${candidate_path#./}"
|
|
||||||
fi
|
|
||||||
if [[ "$candidate_path" == "$storage_abs"* ]]; then
|
|
||||||
say WARNING "MODULES_REBUILD_SOURCE_PATH is inside shared storage (${candidate_path}). Using local workspace ${default_source_rel} instead."
|
|
||||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Module staging will be handled directly in the rebuild section below
|
|
||||||
|
|
||||||
|
|
||||||
# Confirm write
|
# Confirm write
|
||||||
|
|
||||||
@@ -1586,10 +1543,6 @@ fi
|
|||||||
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
|
[ "$cont" = "1" ] || { say ERROR "Aborted"; exit 1; }
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$MODULES_REBUILD_SOURCE_PATH_VALUE" ]; then
|
|
||||||
MODULES_REBUILD_SOURCE_PATH_VALUE="$default_source_rel"
|
|
||||||
fi
|
|
||||||
|
|
||||||
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
|
DB_PLAYERBOTS_NAME=${DB_PLAYERBOTS_NAME:-$DEFAULT_DB_PLAYERBOTS_NAME}
|
||||||
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
|
HOST_ZONEINFO_PATH=${HOST_ZONEINFO_PATH:-$DEFAULT_HOST_ZONEINFO_PATH}
|
||||||
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
|
MYSQL_INNODB_REDO_LOG_CAPACITY=${MYSQL_INNODB_REDO_LOG_CAPACITY:-$DEFAULT_MYSQL_INNODB_REDO_LOG_CAPACITY}
|
||||||
@@ -1756,11 +1709,12 @@ BACKUP_HEALTHCHECK_GRACE_SECONDS=$BACKUP_HEALTHCHECK_GRACE_SECONDS
|
|||||||
|
|
||||||
EOF
|
EOF
|
||||||
echo
|
echo
|
||||||
echo "# Modules"
|
echo "# Modules"
|
||||||
for module_key in "${MODULE_KEYS[@]}"; do
|
for module_key in "${MODULE_KEYS[@]}"; do
|
||||||
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
printf "%s=%s\n" "$module_key" "${!module_key:-0}"
|
||||||
done
|
done
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH
|
||||||
|
|
||||||
# Client data
|
# Client data
|
||||||
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
|
CLIENT_DATA_VERSION=${CLIENT_DATA_VERSION:-$DEFAULT_CLIENT_DATA_VERSION}
|
||||||
@@ -1779,12 +1733,8 @@ MODULES_CPP_LIST=$MODULES_CPP_LIST
|
|||||||
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
|
MODULES_REQUIRES_CUSTOM_BUILD=$MODULES_REQUIRES_CUSTOM_BUILD
|
||||||
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
|
MODULES_REQUIRES_PLAYERBOT_SOURCE=$MODULES_REQUIRES_PLAYERBOT_SOURCE
|
||||||
|
|
||||||
# Rebuild automation
|
# Eluna
|
||||||
AUTO_REBUILD_ON_DEPLOY=$AUTO_REBUILD_ON_DEPLOY
|
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
||||||
MODULES_REBUILD_SOURCE_PATH=$MODULES_REBUILD_SOURCE_PATH_VALUE
|
|
||||||
|
|
||||||
# Eluna
|
|
||||||
AC_ELUNA_ENABLED=$DEFAULT_ELUNA_ENABLED
|
|
||||||
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
|
AC_ELUNA_TRACE_BACK=$DEFAULT_ELUNA_TRACE_BACK
|
||||||
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
|
AC_ELUNA_AUTO_RELOAD=$DEFAULT_ELUNA_AUTO_RELOAD
|
||||||
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
|
AC_ELUNA_BYTECODE_CACHE=$DEFAULT_ELUNA_BYTECODE_CACHE
|
||||||
@@ -1853,16 +1803,6 @@ EOF
|
|||||||
printf ' 🚀 Quick deploy: ./deploy.sh\n'
|
printf ' 🚀 Quick deploy: ./deploy.sh\n'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${CLI_DEPLOY_AFTER:-0}" = "1" ]; then
|
|
||||||
local deploy_args=(bash "./deploy.sh" --yes)
|
|
||||||
if [ "$MODULE_PLAYERBOTS" != "1" ]; then
|
|
||||||
deploy_args+=(--profile standard)
|
|
||||||
fi
|
|
||||||
say INFO "Launching deploy after setup (--deploy-after enabled)"
|
|
||||||
if ! "${deploy_args[@]}"; then
|
|
||||||
say WARNING "Automatic deploy failed; please run ./deploy.sh manually."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
main "$@"
|
main "$@"
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ set -euo pipefail
|
|||||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
cd "$ROOT_DIR"
|
cd "$ROOT_DIR"
|
||||||
|
|
||||||
BLUE='\033[0;34m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; RED='\033[0;31m'; NC='\033[0m'
|
# Source common library for standardized logging
|
||||||
info(){ printf '%b\n' "${BLUE}ℹ️ $*${NC}"; }
|
if ! source "$ROOT_DIR/scripts/bash/lib/common.sh" 2>/dev/null; then
|
||||||
ok(){ printf '%b\n' "${GREEN}✅ $*${NC}"; }
|
echo "❌ FATAL: Cannot load $ROOT_DIR/scripts/bash/lib/common.sh" >&2
|
||||||
warn(){ printf '%b\n' "${YELLOW}⚠️ $*${NC}"; }
|
exit 1
|
||||||
err(){ printf '%b\n' "${RED}❌ $*${NC}"; }
|
fi
|
||||||
|
|
||||||
FORCE_DIRTY=0
|
FORCE_DIRTY=0
|
||||||
DEPLOY_ARGS=()
|
DEPLOY_ARGS=()
|
||||||
|
|||||||
Reference in New Issue
Block a user