enhance backup process and remove dashboards

This commit is contained in:
uprightbass360
2025-10-03 02:13:59 -04:00
parent 4a5b7c80b5
commit fe1efc1c16
8 changed files with 426 additions and 172 deletions

15
.mcp.json Normal file
View File

@@ -0,0 +1,15 @@
{
"mcpServers": {
"mysql": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-mysql"],
"env": {
"MYSQL_HOST": "127.0.0.1",
"MYSQL_PORT": "64306",
"MYSQL_USER": "root",
"MYSQL_PASSWORD": "azerothcore123",
"MYSQL_DATABASE": "acore_world"
}
}
}
}

View File

@@ -97,10 +97,12 @@ NETWORK_GATEWAY=172.20.0.1
# ==============================================
# Host volume paths for backup
HOST_BACKUP_PATH=${STORAGE_PATH}/backups
HOST_BACKUP_SCRIPTS_PATH=${STORAGE_PATH}/scripts
# HOST_BACKUP_SCRIPTS_PATH - No longer needed, scripts downloaded from GitHub
# ==============================================
# Backup settings
# ==============================================
BACKUP_CRON_SCHEDULE="0 3 * * *"
BACKUP_RETENTION_DAYS=7
BACKUP_CRON_SCHEDULE="0 9 * * *"
BACKUP_RETENTION_DAYS=3
BACKUP_RETENTION_HOURS=6
BACKUP_DIR=/backups

View File

@@ -19,6 +19,7 @@ services:
- "${MYSQL_EXTERNAL_PORT}:${MYSQL_PORT}"
volumes:
- ${STORAGE_PATH}/mysql-data:/var/lib/mysql-persistent
- ${HOST_BACKUP_PATH}:/backups
- type: tmpfs
target: /var/lib/mysql-runtime
tmpfs:
@@ -26,26 +27,107 @@ services:
entrypoint: ["/bin/bash", "-c"]
command:
- |
echo "🔧 Starting MySQL with NFS-compatible setup..."
echo "🔧 Starting MySQL with NFS-compatible setup and auto-restore..."
mkdir -p /var/lib/mysql-runtime
chown -R mysql:mysql /var/lib/mysql-runtime
chmod 755 /var/lib/mysql-runtime
if [ -f "/var/lib/mysql-persistent/backup.sql" ]; then
echo "📦 SQL backup found, will restore after MySQL starts..."
# Check if MySQL data directory is empty (fresh start)
if [ ! -d "/var/lib/mysql-runtime/mysql" ]; then
echo "🆕 Fresh MySQL installation detected..."
# Check for available backups (prefer daily, fallback to hourly, then legacy)
if [ -d "/backups" ] && [ "$(ls -A /backups)" ]; then
# Try daily backups first
if [ -d "/backups/daily" ] && [ "$(ls -A /backups/daily)" ]; then
LATEST_BACKUP=$(ls -1t /backups/daily | head -n 1)
if [ -n "$LATEST_BACKUP" ] && [ -d "/backups/daily/$LATEST_BACKUP" ]; then
echo "📦 Latest daily backup found: $LATEST_BACKUP"
echo "🔄 Will restore after MySQL initializes..."
export RESTORE_BACKUP="/backups/daily/$LATEST_BACKUP"
fi
# Try hourly backups second
elif [ -d "/backups/hourly" ] && [ "$(ls -A /backups/hourly)" ]; then
LATEST_BACKUP=$(ls -1t /backups/hourly | head -n 1)
if [ -n "$LATEST_BACKUP" ] && [ -d "/backups/hourly/$LATEST_BACKUP" ]; then
echo "📦 Latest hourly backup found: $LATEST_BACKUP"
echo "🔄 Will restore after MySQL initializes..."
export RESTORE_BACKUP="/backups/hourly/$LATEST_BACKUP"
fi
# Try legacy backup structure last
else
LATEST_BACKUP=$(ls -1t /backups | head -n 1)
if [ -n "$LATEST_BACKUP" ] && [ -d "/backups/$LATEST_BACKUP" ]; then
echo "📦 Latest legacy backup found: $LATEST_BACKUP"
echo "🔄 Will restore after MySQL initializes..."
export RESTORE_BACKUP="/backups/$LATEST_BACKUP"
else
echo "🆕 No valid backups found, will initialize fresh..."
fi
fi
else
echo "🆕 No backup directory found, will initialize fresh..."
fi
else
echo "🆕 No backup found, will initialize fresh MySQL..."
echo "📁 Existing MySQL data found, skipping restore..."
fi
echo "🚀 Starting MySQL server with custom datadir..."
exec docker-entrypoint.sh mysqld \
--datadir=/var/lib/mysql-runtime \
--default-authentication-plugin=mysql_native_password \
--character-set-server=${MYSQL_CHARACTER_SET} \
--collation-server=${MYSQL_COLLATION} \
--max_connections=${MYSQL_MAX_CONNECTIONS} \
--innodb-buffer-pool-size=${MYSQL_INNODB_BUFFER_POOL_SIZE} \
--innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE}
# Start MySQL in background for potential restore
if [ -n "$RESTORE_BACKUP" ]; then
echo "⚡ Starting MySQL in background for restore operation..."
docker-entrypoint.sh mysqld \
--datadir=/var/lib/mysql-runtime \
--default-authentication-plugin=mysql_native_password \
--character-set-server=${MYSQL_CHARACTER_SET} \
--collation-server=${MYSQL_COLLATION} \
--max_connections=${MYSQL_MAX_CONNECTIONS} \
--innodb-buffer-pool-size=${MYSQL_INNODB_BUFFER_POOL_SIZE} \
--innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE} &
MYSQL_PID=$!
# Wait for MySQL to be ready
echo "⏳ Waiting for MySQL to become ready for restore..."
while ! mysqladmin ping -h localhost -u root --silent; do
sleep 2
done
echo "🔄 MySQL ready, starting restore from $RESTORE_BACKUP..."
# Install curl for downloading restore script
apt-get update && apt-get install -y curl
# Download restore script from GitHub
curl -fsSL https://raw.githubusercontent.com/uprightbass360/acore-compose/main/scripts/restore.sh -o /tmp/restore.sh
chmod +x /tmp/restore.sh
# Modify restore script to skip confirmation and use correct backup path
sed -i 's/sleep 10/echo "Auto-restore mode, skipping confirmation..."/' /tmp/restore.sh
sed -i 's/BACKUP_DIR=\${BACKUP_DIR:-\/backups}/BACKUP_DIR=\/backups/' /tmp/restore.sh
sed -i 's/MYSQL_PASSWORD=\${MYSQL_PASSWORD:-password}/MYSQL_PASSWORD=${MYSQL_ROOT_PASSWORD}/' /tmp/restore.sh
# Extract timestamp from backup path and run restore
BACKUP_TIMESTAMP=$(basename "$RESTORE_BACKUP")
echo "🗄️ Restoring databases from backup: $BACKUP_TIMESTAMP"
/tmp/restore.sh "$BACKUP_TIMESTAMP"
echo "✅ Database restore completed successfully!"
# Keep MySQL running in foreground
wait $MYSQL_PID
else
# Normal startup without restore
exec docker-entrypoint.sh mysqld \
--datadir=/var/lib/mysql-runtime \
--default-authentication-plugin=mysql_native_password \
--character-set-server=${MYSQL_CHARACTER_SET} \
--collation-server=${MYSQL_COLLATION} \
--max_connections=${MYSQL_MAX_CONNECTIONS} \
--innodb-buffer-pool-size=${MYSQL_INNODB_BUFFER_POOL_SIZE} \
--innodb-log-file-size=${MYSQL_INNODB_LOG_FILE_SIZE}
fi
restart: unless-stopped
healthcheck:
test: ["CMD", "sh", "-c", "mysqladmin ping -h localhost -u ${MYSQL_USER} -p${MYSQL_ROOT_PASSWORD} --silent || exit 1"]
@@ -67,41 +149,66 @@ services:
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_ROOT_PASSWORD}
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS}
BACKUP_RETENTION_HOURS: ${BACKUP_RETENTION_HOURS}
BACKUP_CRON_SCHEDULE: ${BACKUP_CRON_SCHEDULE}
BACKUP_DIR: ${BACKUP_DIR}
DB_AUTH_NAME: ${DB_AUTH_NAME}
DB_WORLD_NAME: ${DB_WORLD_NAME}
DB_CHARACTERS_NAME: ${DB_CHARACTERS_NAME}
TZ: ${TZ}
volumes:
- ${HOST_BACKUP_PATH}:/backups
- ${HOST_BACKUP_SCRIPTS_PATH}:/scripts
working_dir: /scripts
working_dir: /tmp
command:
- /bin/bash
- -c
- |
echo "🔧 Starting backup service using external script..."
echo "🔧 Starting enhanced backup service with hourly and daily schedules..."
# Make sure backup script is executable
chmod +x /scripts/backup.sh
# Install curl if not available
apt-get update && apt-get install -y curl
# Download backup scripts from GitHub
echo "📥 Downloading backup scripts from GitHub..."
curl -fsSL https://raw.githubusercontent.com/uprightbass360/acore-compose/main/scripts/backup.sh -o /tmp/backup.sh
curl -fsSL https://raw.githubusercontent.com/uprightbass360/acore-compose/main/scripts/backup-hourly.sh -o /tmp/backup-hourly.sh
curl -fsSL https://raw.githubusercontent.com/uprightbass360/acore-compose/main/scripts/backup-daily.sh -o /tmp/backup-daily.sh
chmod +x /tmp/backup.sh /tmp/backup-hourly.sh /tmp/backup-daily.sh
# Wait for MySQL to be ready before starting backup service
echo "⏳ Waiting for MySQL to be ready..."
sleep 30
# Run initial backup
echo "🚀 Running initial backup..."
/scripts/backup.sh
# Run initial daily backup
echo "🚀 Running initial daily backup..."
/tmp/backup-daily.sh
# Simple cron-like scheduler (runs backup at 3 AM daily)
echo "⏰ Starting backup scheduler with schedule: ${BACKUP_CRON_SCHEDULE}"
echo "📅 Backup retention: ${BACKUP_RETENTION_DAYS} days"
# Enhanced scheduler with hourly and daily backups
echo "⏰ Starting enhanced backup scheduler:"
echo " 📅 Daily backups: ${BACKUP_CRON_SCHEDULE} (retention: ${BACKUP_RETENTION_DAYS} days)"
echo " ⏰ Hourly backups: every hour (retention: ${BACKUP_RETENTION_HOURS} hours)"
# Track last backup times to avoid duplicates
last_daily_hour=""
last_hourly_minute=""
while true; do
current_hour=$(date +%H)
current_minute=$(date +%M)
current_time="$current_hour:$current_minute"
# Check if it's 3:00 AM (matching default cron schedule)
if [ "$$current_hour" = "03" ] && [ "$$current_minute" = "00" ]; then
echo " [$(date)] Scheduled backup time reached, running backup..."
/scripts/backup.sh
# Daily backup check (9:00 AM)
if [ "$$current_hour" = "09" ] && [ "$$current_minute" = "00" ] && [ "$$last_daily_hour" != "$$current_hour" ]; then
echo "📅 [$(date)] Daily backup time reached, running daily backup..."
/tmp/backup-daily.sh
last_daily_hour="$$current_hour"
# Sleep for 2 minutes to avoid running multiple times
sleep 120
# Hourly backup check (every hour at minute 0, except during daily backup)
elif [ "$$current_minute" = "00" ] && [ "$$current_hour" != "09" ] && [ "$$last_hourly_minute" != "$$current_minute" ]; then
echo "⏰ [$(date)] Hourly backup time reached, running hourly backup..."
/tmp/backup-hourly.sh
last_hourly_minute="$$current_minute"
# Sleep for 2 minutes to avoid running multiple times
sleep 120
else

View File

@@ -49,34 +49,6 @@ KEIRA3_EXTERNAL_PORT=4201
KEIRA_DATABASE_HOST=ac-mysql
KEIRA_DATABASE_PORT=3306
# Grafana settings
GF_EXTERNAL_PORT=3001
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=acore123
GF_SERVER_ROOT_URL=http://localhost:3001
GF_PLUGINS_PREINSTALL=grafana-piechart-panel
# Security settings
GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION=false
GF_SECURITY_SECRET_KEY=
GF_USERS_ALLOW_SIGN_UP=false
GF_USERS_ALLOW_ORG_CREATE=false
GF_AUTH_ANONYMOUS_ENABLED=false
GF_SERVER_ENABLE_GZIP=true
GF_SECURITY_COOKIE_SECURE=false
GF_SECURITY_COOKIE_SAMESITE=lax
# InfluxDB settings
INFLUXDB_EXTERNAL_PORT=8087
INFLUXDB_INIT_MODE=setup
INFLUXDB_ADMIN_USER=acore
INFLUXDB_ADMIN_PASSWORD=acore123
INFLUXDB_ORG=azerothcore
INFLUXDB_BUCKET=metrics
INFLUXDB_TOKEN=acore-monitoring-token-12345
# Security settings
INFLUXDB_HTTP_AUTH_ENABLED=true
INFLUXDB_HTTP_HTTPS_ENABLED=false
# ==============================================
# DEPLOYMENT CONFIGURATION
# ==============================================

View File

@@ -66,67 +66,6 @@ services:
networks:
- azerothcore
# InfluxDB for Monitoring
ac-influxdb:
image: influxdb:2.7-alpine
container_name: ac-influxdb
environment:
DOCKER_INFLUXDB_INIT_MODE: ${INFLUXDB_INIT_MODE:-setup}
DOCKER_INFLUXDB_INIT_USERNAME: ${INFLUXDB_ADMIN_USER:-acore}
DOCKER_INFLUXDB_INIT_PASSWORD: ${INFLUXDB_ADMIN_PASSWORD:-acore123}
DOCKER_INFLUXDB_INIT_ORG: ${INFLUXDB_ORG:-azerothcore}
DOCKER_INFLUXDB_INIT_BUCKET: ${INFLUXDB_BUCKET:-metrics}
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: ${INFLUXDB_TOKEN:-acore-monitoring-token-12345}
INFLUXDB_HTTP_AUTH_ENABLED: ${INFLUXDB_HTTP_AUTH_ENABLED:-true}
INFLUXDB_HTTP_HTTPS_ENABLED: ${INFLUXDB_HTTP_HTTPS_ENABLED:-false}
ports:
- "${INFLUXDB_EXTERNAL_PORT:-8087}:8086"
volumes:
- ${STORAGE_PATH:-./storage/azerothcore}/influxdb:/var/lib/influxdb2
restart: unless-stopped
networks:
- azerothcore
# Grafana Monitoring Dashboard
ac-grafana:
image: grafana/grafana:latest
container_name: ac-grafana
user: "0:0" # Run as root to handle NFS permissions
depends_on:
- ac-influxdb
environment:
GF_SECURITY_ADMIN_USER: ${GF_SECURITY_ADMIN_USER:-admin}
GF_SECURITY_ADMIN_PASSWORD: ${GF_SECURITY_ADMIN_PASSWORD:-acore123}
GF_INSTALL_PLUGINS: ${GF_PLUGINS_PREINSTALL:-grafana-piechart-panel}
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL:-http://localhost:3001}
GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION: ${GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION:-false}
GF_SECURITY_SECRET_KEY: ${GF_SECURITY_SECRET_KEY:-}
GF_USERS_ALLOW_SIGN_UP: ${GF_USERS_ALLOW_SIGN_UP:-false}
GF_USERS_ALLOW_ORG_CREATE: ${GF_USERS_ALLOW_ORG_CREATE:-false}
GF_AUTH_ANONYMOUS_ENABLED: ${GF_AUTH_ANONYMOUS_ENABLED:-false}
GF_SERVER_ENABLE_GZIP: ${GF_SERVER_ENABLE_GZIP:-true}
GF_SECURITY_COOKIE_SECURE: ${GF_SECURITY_COOKIE_SECURE:-false}
GF_SECURITY_COOKIE_SAMESITE: ${GF_SECURITY_COOKIE_SAMESITE:-lax}
ports:
- "${GF_EXTERNAL_PORT:-3001}:3000"
volumes:
- ${STORAGE_PATH:-./storage/azerothcore}/grafana:/var/lib/grafana
entrypoint: ["/bin/bash", "-c"]
command:
- |
echo "🔧 Setting up Grafana permissions..."
# Create directories with proper ownership
mkdir -p /var/lib/grafana
chown -R 472:472 /var/lib/grafana
chmod -R 755 /var/lib/grafana
echo "🚀 Starting Grafana server..."
exec su -s /bin/bash grafana -c "/run.sh"
restart: unless-stopped
networks:
- azerothcore
networks:
azerothcore:

173
readme.md
View File

@@ -12,7 +12,7 @@ This project is a Docker/Podman implementation based on:
- **Logger Issue Resolution**: Fixed worldserver startup issues with proper logger configuration
- **Dynamic URL Generation**: Web interfaces automatically detect external URLs for deployment flexibility
- **Port Collision Prevention**: All external ports optimized to avoid common development tool conflicts
- **Enhanced Security**: Comprehensive security settings for all web interfaces (Grafana, InfluxDB, PHPMyAdmin)
- **Enhanced Security**: Comprehensive security settings for all web interfaces (PHPMyAdmin)
- **Full Environment Variable Configuration**: No hardcoded values, everything configurable via .env
- **External Domain Support**: Configurable base URLs for custom domain deployment
- **Multi-Runtime Support**: Works with both Docker and Podman
@@ -315,8 +315,7 @@ acore-compose/
| `ac-worldserver` | acore/ac-wotlk-worldserver:14.0.0-dev | Game world server | 8215:8085, 7778:7878 |
| `ac-eluna` | acore/eluna-ts:master | Lua scripting engine | - |
| `ac-phpmyadmin` | phpmyadmin/phpmyadmin:latest | Database management web UI | 8081:80|
| `ac-grafana` | grafana/grafana:latest | Monitoring dashboard | 3001:3000 |
| `ac-influxdb` | influxdb:2.7-alpine | Metrics database | 8087:8086 |
| `ac-keira3` | uprightbass360/keira3:latest | Production database editor with API | 4201:8080 |
| `ac-backup` | mysql:8.0 | Automated backup service | - |
| `ac-modules` | alpine/git:latest | Module management | - |
@@ -472,8 +471,6 @@ Configuration is managed through separate `.env` files for each layer:
#### Tools Layer (`docker-compose-azerothcore-tools.env`)
- `PMA_EXTERNAL_PORT`: PHPMyAdmin port (8081)
- `KEIRA3_EXTERNAL_PORT`: Database editor port (4201)
- `GF_EXTERNAL_PORT`: Grafana monitoring port (3001)
- `INFLUXDB_EXTERNAL_PORT`: InfluxDB metrics port (8087)
- `STORAGE_ROOT`: Root storage path (default: ./storage)
- `STORAGE_PATH`: Derived storage path (${STORAGE_ROOT}/azerothcore)
@@ -505,8 +502,6 @@ The deployment uses a unified storage approach controlled by the `STORAGE_ROOT`
| **Game Data** | `./storage/azerothcore/data` | `${STORAGE_ROOT}/azerothcore/data` | Maps, vmaps, mmaps, DBC files |
| **Configuration** | `./storage/azerothcore/config` | `${STORAGE_ROOT}/azerothcore/config` | Server configuration files |
| **Application Logs** | `./storage/azerothcore/logs` | `${STORAGE_ROOT}/azerothcore/logs` | Server and service logs |
| **Tools Data** | `./storage/azerothcore/azerothcore/grafana` | `${STORAGE_ROOT}/azerothcore/azerothcore/grafana` | Grafana dashboards |
| **Metrics Data** | `./storage/azerothcore/azerothcore/influxdb` | `${STORAGE_ROOT}/azerothcore/azerothcore/influxdb` | InfluxDB time series data |
| **Backups** | `./backups` | `./backups` | Database backup files |
### Storage Configuration Examples
@@ -693,98 +688,172 @@ EOF
## Backup System
The deployment includes a comprehensive automated backup system with individual database backups, compression, and retention management.
The deployment includes a comprehensive automated backup system with tiered backup schedules, individual database backups, compression, and intelligent retention management.
### Enhanced Backup Strategy
The system provides **dual backup schedules** for comprehensive data protection:
- **🕘 Hourly Backups**: Every hour with 6-hour retention (for recent recovery)
- **📅 Daily Backups**: Every day at 9:00 AM UTC with 3-day retention (for longer-term recovery)
- **🔄 Auto-Restore**: Automatic backup restoration on fresh MySQL installations
### Backup Configuration
Configure via environment variables in `docker-compose-azerothcore-database.env`:
- `STORAGE_ROOT`: Root storage path (default: ./storage)
- `BACKUP_CRON_SCHEDULE`: Cron expression (default: "0 3 * * *" - 3 AM daily)
- `BACKUP_RETENTION_DAYS`: Days to keep backups (default: 7)
- `HOST_BACKUP_PATH`: Local backup storage path (default: ./backups)
- `HOST_BACKUP_SCRIPTS_PATH`: Backup scripts path (default: ./scripts)
- `BACKUP_CRON_SCHEDULE`: Daily backup time (default: "0 9 * * *" - 9 AM UTC)
- `BACKUP_RETENTION_DAYS`: Days to keep daily backups (default: 3)
- `BACKUP_RETENTION_HOURS`: Hours to keep hourly backups (default: 6)
- `BACKUP_DIR`: Container backup directory (default: /backups)
- `HOST_BACKUP_PATH`: Host backup storage path (default: ${STORAGE_PATH}/backups)
- `DB_AUTH_NAME`, `DB_WORLD_NAME`, `DB_CHARACTERS_NAME`: Database names (configurable)
**Note**: The backup service operates independently of `STORAGE_ROOT` and uses dedicated backup paths for database exports.
**Note**: All backup settings are now fully parameterized via environment variables for maximum flexibility.
### Backup Features
✅ **Tiered Backup Strategy**: Hourly + Daily schedules with different retention policies
✅ **Individual Database Backups**: Separate compressed files for each database
✅ **Backup Manifests**: JSON metadata with timestamps and backup information
✅ **Automated Compression**: Gzip compression for space efficiency
✅ **Retention Management**: Automatic cleanup of old backups
✅ **External Scripts**: Uses external backup/restore scripts for flexibility
✅ **Intelligent Retention**: Different policies for hourly vs daily backups
✅ **Auto-Restore**: Automatic restoration from latest backup on fresh installations
✅ **Environment-Based Config**: All settings configurable via environment variables
✅ **Shared Storage**: Backups persist in host filesystem independent of container lifecycle
### Backup Operations
#### Automatic Backups
The `ac-backup` container runs continuously and performs scheduled backups:
- **Schedule**: Daily at 3:00 AM by default (configurable via `BACKUP_CRON_SCHEDULE`)
- **Databases**: All AzerothCore databases (auth, world, characters)
- **Format**: Individual compressed SQL files per database
- **Retention**: Automatic cleanup after configured days
The `ac-backup` container runs continuously with dual scheduling:
**Hourly Backups**:
- **Schedule**: Every hour at minute 0 (except during daily backup)
- **Retention**: 6 hours (keeps last 6 hourly backups)
- **Location**: `${HOST_BACKUP_PATH}/hourly/`
- **Purpose**: Recent recovery and frequent data protection
**Daily Backups**:
- **Schedule**: Daily at 9:00 AM UTC (configurable via `BACKUP_CRON_SCHEDULE`)
- **Retention**: 3 days (keeps last 3 daily backups)
- **Location**: `${HOST_BACKUP_PATH}/daily/`
- **Features**: Enhanced with database statistics and comprehensive metadata
- **Purpose**: Longer-term recovery and compliance
#### Manual Backups
```bash
# Execute backup immediately using container
docker exec ac-backup /scripts/backup.sh
# Execute hourly backup immediately
docker exec ac-backup /tmp/backup-hourly.sh
# Or run backup script directly (if scripts are accessible)
cd scripts
./backup.sh
# Execute daily backup immediately
docker exec ac-backup /tmp/backup-daily.sh
# Check backup status and logs
docker logs ac-backup --tail 20
# List available backups
ls -la backups/
# List available backups by type
ls -la ${HOST_BACKUP_PATH}/hourly/
ls -la ${HOST_BACKUP_PATH}/daily/
# Check backup storage usage
du -sh ${HOST_BACKUP_PATH}/*/
```
### Backup Structure
### Tiered Backup Structure
```
backups/
├── 20250930_181843/ # Timestamp-based backup directory
│ ├── acore_auth.sql.gz # Compressed auth database (8KB)
│ ├── acore_world.sql.gz # Compressed world database (77MB)
│ ├── acore_characters.sql.gz # Compressed characters database (16KB)
└── manifest.json # Backup metadata
├── 20250930_120000/ # Previous backup
└── ... # Additional backups (retention managed)
storage/azerothcore/backups/
├── hourly/ # Hourly backups (6-hour retention)
│ ├── 20251003_140000/ # Recent hourly backup
├── acore_auth.sql.gz # Compressed auth database (8KB)
├── acore_world.sql.gz # Compressed world database (77MB)
│ ├── acore_characters.sql.gz # Compressed characters database (16KB)
└── manifest.json # Backup metadata
│ └── 20251003_150000/ # Next hourly backup
├── daily/ # Daily backups (3-day retention)
│ ├── 20251003_090000/ # Daily backup with enhanced features
│ │ ├── acore_auth.sql.gz # Compressed auth database (8KB)
│ │ ├── acore_world.sql.gz # Compressed world database (77MB)
│ │ ├── acore_characters.sql.gz # Compressed characters database (16KB)
│ │ ├── manifest.json # Enhanced backup metadata
│ │ └── database_stats.txt # Database statistics and sizing
│ └── 20251004_090000/ # Next daily backup
└── [legacy backups] # Previous single-schedule backups
```
### Backup Metadata
Each backup includes a `manifest.json` file with backup information:
### Enhanced Backup Metadata
**Hourly Backup Manifest** (`hourly/*/manifest.json`):
```json
{
"timestamp": "20250930_181843",
"databases": ["acore_auth acore_world acore_characters"],
"timestamp": "20251003_140000",
"type": "hourly",
"databases": ["acore_auth", "acore_world", "acore_characters"],
"backup_size": "77M",
"retention_days": 7,
"retention_hours": 6,
"mysql_version": "8.0.43"
}
```
### Backup Restoration
#### Using Restore Script
```bash
cd scripts
./restore.sh /path/to/backup/directory/20250930_181843
**Daily Backup Manifest** (`daily/*/manifest.json`):
```json
{
"timestamp": "20251003_090000",
"type": "daily",
"databases": ["acore_auth", "acore_world", "acore_characters"],
"backup_size": "77M",
"retention_days": 3,
"mysql_version": "8.0.43",
"backup_method": "mysqldump with master-data and flush-logs",
"created_by": "acore-compose2 backup system"
}
```
#### Manual Restoration
**Daily Database Statistics** (`daily/*/database_stats.txt`):
```
Database: acore_auth, Tables: 15, Size: 1.2MB
Database: acore_world, Tables: 422, Size: 75.8MB
Database: acore_characters, Tables: 25, Size: 0.8MB
```
### Auto-Restore Functionality
The system includes intelligent auto-restore capabilities:
**Restoration Priority**:
1. **Daily backups** (preferred for consistency)
2. **Hourly backups** (fallback for recent data)
3. **Legacy backups** (compatibility with older backups)
**Auto-Restore Process**:
- Detects fresh MySQL installations automatically
- Finds latest available backup using priority order
- Downloads restoration script from GitHub
- Performs automated database restoration
- Continues normal MySQL startup after restoration
**Manual Restoration**:
```bash
# Restore from specific daily backup
cd scripts
./restore.sh daily/20251003_090000
# Restore from specific hourly backup
cd scripts
./restore.sh hourly/20251003_140000
```
#### Legacy Manual Restoration
```bash
# Restore individual database from compressed backup
gunzip -c backups/20250930_181843/acore_world.sql.gz | \
gunzip -c backups/daily/20251003_090000/acore_world.sql.gz | \
docker exec -i ac-mysql mysql -uroot -p${MYSQL_ROOT_PASSWORD} acore_world
# Restore all databases from a backup directory
for db in auth world characters; do
gunzip -c backups/20250930_181843/acore_${db}.sql.gz | \
gunzip -c backups/daily/20251003_090000/acore_${db}.sql.gz | \
docker exec -i ac-mysql mysql -uroot -p${MYSQL_ROOT_PASSWORD} acore_${db}
done
```
@@ -1055,8 +1124,6 @@ docker stats --no-stream
| **SOAP API** | `localhost:7778` | 7778 | Server administration API |
| **PHPMyAdmin** | `http://localhost:8081` | 8081 | Database management interface |
| **Keira3** | `http://localhost:4201` | 4201 | Database editor web UI with API backend |
| **Grafana** | `http://localhost:3001` | 3001 | Monitoring dashboard |
| **InfluxDB** | `localhost:8087` | 8087 | Metrics database |
| **MySQL** | `localhost:64306` | 64306 | Direct database access |
### Database Credentials

87
scripts/backup-daily.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/bin/bash
set -e
# Configuration from environment variables
MYSQL_HOST=${MYSQL_HOST:-ac-mysql}
MYSQL_PORT=${MYSQL_PORT:-3306}
MYSQL_USER=${MYSQL_USER:-root}
MYSQL_PASSWORD=${MYSQL_PASSWORD:-password}
BACKUP_DIR=${BACKUP_DIR:-/backups}
RETENTION_DAYS=${BACKUP_RETENTION_DAYS:-3}
DATE_FORMAT="%Y%m%d_%H%M%S"
# Database names from environment variables
DATABASES=("${DB_AUTH_NAME:-acore_auth}" "${DB_WORLD_NAME:-acore_world}" "${DB_CHARACTERS_NAME:-acore_characters}")
# Create daily backup directory
DAILY_DIR="$BACKUP_DIR/daily"
mkdir -p $DAILY_DIR
# Generate timestamp
TIMESTAMP=$(date +$DATE_FORMAT)
BACKUP_SUBDIR="$DAILY_DIR/$TIMESTAMP"
mkdir -p $BACKUP_SUBDIR
echo "[$TIMESTAMP] Starting AzerothCore daily backup..."
# Backup each database with additional options for daily backups
for db in "${DATABASES[@]}"; do
echo "[$TIMESTAMP] Backing up database: $db"
mysqldump -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD \
--single-transaction --routines --triggers --events \
--hex-blob --quick --lock-tables=false \
--add-drop-database --databases $db \
--master-data=2 --flush-logs \
| gzip > $BACKUP_SUBDIR/${db}.sql.gz
if [ $? -eq 0 ]; then
SIZE=$(du -h $BACKUP_SUBDIR/${db}.sql.gz | cut -f1)
echo "[$TIMESTAMP] ✅ Successfully backed up $db ($SIZE)"
else
echo "[$TIMESTAMP] ❌ Failed to backup $db"
exit 1
fi
done
# Create comprehensive backup manifest for daily backups
BACKUP_SIZE=$(du -sh $BACKUP_SUBDIR | cut -f1)
MYSQL_VERSION=$(mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'SELECT VERSION();' -s -N)
cat > $BACKUP_SUBDIR/manifest.json <<EOF
{
"timestamp": "$TIMESTAMP",
"type": "daily",
"databases": ["${DATABASES[@]}"],
"backup_size": "$BACKUP_SIZE",
"retention_days": $RETENTION_DAYS,
"mysql_version": "$MYSQL_VERSION",
"backup_method": "mysqldump with master-data and flush-logs",
"created_by": "acore-compose2 backup system"
}
EOF
# Create database statistics for daily backups
echo "[$TIMESTAMP] Generating database statistics..."
for db in "${DATABASES[@]}"; do
echo "[$TIMESTAMP] Statistics for $db:"
mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -e "
SELECT
TABLE_SCHEMA as 'Database',
COUNT(*) as 'Tables',
ROUND(SUM(DATA_LENGTH + INDEX_LENGTH) / 1024 / 1024, 2) as 'Size_MB'
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = '$db'
GROUP BY TABLE_SCHEMA;
" >> $BACKUP_SUBDIR/database_stats.txt
done
# Clean up old daily backups (keep only last N days)
echo "[$TIMESTAMP] Cleaning up daily backups older than $RETENTION_DAYS days..."
find $DAILY_DIR -type d -name "[0-9]*" -mtime +$RETENTION_DAYS -exec rm -rf {} + 2>/dev/null || true
# Log backup completion
echo "[$TIMESTAMP] ✅ Daily backup completed successfully"
echo "[$TIMESTAMP] Backup location: $BACKUP_SUBDIR"
echo "[$TIMESTAMP] Backup size: $BACKUP_SIZE"
echo "[$TIMESTAMP] Current daily backups:"
ls -la $DAILY_DIR/ | tail -n +2

65
scripts/backup-hourly.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/bin/bash
set -e
# Configuration from environment variables
MYSQL_HOST=${MYSQL_HOST:-ac-mysql}
MYSQL_PORT=${MYSQL_PORT:-3306}
MYSQL_USER=${MYSQL_USER:-root}
MYSQL_PASSWORD=${MYSQL_PASSWORD:-password}
BACKUP_DIR=${BACKUP_DIR:-/backups}
RETENTION_HOURS=${BACKUP_RETENTION_HOURS:-6}
DATE_FORMAT="%Y%m%d_%H%M%S"
# Database names from environment variables
DATABASES=("${DB_AUTH_NAME:-acore_auth}" "${DB_WORLD_NAME:-acore_world}" "${DB_CHARACTERS_NAME:-acore_characters}")
# Create hourly backup directory
HOURLY_DIR="$BACKUP_DIR/hourly"
mkdir -p $HOURLY_DIR
# Generate timestamp
TIMESTAMP=$(date +$DATE_FORMAT)
BACKUP_SUBDIR="$HOURLY_DIR/$TIMESTAMP"
mkdir -p $BACKUP_SUBDIR
echo "[$TIMESTAMP] Starting AzerothCore hourly backup..."
# Backup each database
for db in "${DATABASES[@]}"; do
echo "[$TIMESTAMP] Backing up database: $db"
mysqldump -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD \
--single-transaction --routines --triggers --events \
--hex-blob --quick --lock-tables=false \
--add-drop-database --databases $db \
| gzip > $BACKUP_SUBDIR/${db}.sql.gz
if [ $? -eq 0 ]; then
SIZE=$(du -h $BACKUP_SUBDIR/${db}.sql.gz | cut -f1)
echo "[$TIMESTAMP] ✅ Successfully backed up $db ($SIZE)"
else
echo "[$TIMESTAMP] ❌ Failed to backup $db"
exit 1
fi
done
# Create backup manifest
cat > $BACKUP_SUBDIR/manifest.json <<EOF
{
"timestamp": "$TIMESTAMP",
"type": "hourly",
"databases": ["${DATABASES[@]}"],
"backup_size": "$(du -sh $BACKUP_SUBDIR | cut -f1)",
"retention_hours": $RETENTION_HOURS,
"mysql_version": "$(mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'SELECT VERSION();' -s -N)"
}
EOF
# Clean up old hourly backups (keep only last N hours)
echo "[$TIMESTAMP] Cleaning up hourly backups older than $RETENTION_HOURS hours..."
find $HOURLY_DIR -type d -name "[0-9]*" -mmin +$((RETENTION_HOURS * 60)) -exec rm -rf {} + 2>/dev/null || true
# Log backup completion
echo "[$TIMESTAMP] ✅ Hourly backup completed successfully"
echo "[$TIMESTAMP] Backup location: $BACKUP_SUBDIR"
echo "[$TIMESTAMP] Current hourly backups:"
ls -la $HOURLY_DIR/ | tail -n +2